summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-03-22 06:37:01 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2017-04-01 13:12:28 +0300
commitfbfef9027c2a7ad9277755509fdb849dbccfe8c1 (patch)
treef44258e7f2972d130c9fa4f80cdb969d0495f4f3 /arch/powerpc
parent82228e362f9b7f4b876d0fbb1036c235797c6b1d (diff)
downloadlinux-fbfef9027c2a7ad9277755509fdb849dbccfe8c1.tar.xz
powerpc/mm: Switch some TASK_SIZE checks to use mm_context addr_limit
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c4
-rw-r--r--arch/powerpc/mm/mmap.c12
-rw-r--r--arch/powerpc/mm/slice.c6
-rw-r--r--arch/powerpc/mm/subpage-prot.c3
4 files changed, 13 insertions, 12 deletions
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 35254a678456..95207c117b82 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -52,7 +52,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h))
return -EINVAL;
- if (len > TASK_SIZE)
+ if (len > mm->context.addr_limit)
return -ENOMEM;
if (flags & MAP_FIXED) {
@@ -64,7 +64,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
+ if (mm->context.addr_limit - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index a5d9ef59debe..6e9fead92969 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -97,7 +97,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
- if (len > TASK_SIZE - mmap_min_addr)
+ if (len > mm->context.addr_limit - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
@@ -114,7 +114,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
- info.high_limit = TASK_SIZE;
+ info.high_limit = mm->context.addr_limit;
info.align_mask = 0;
return vm_unmapped_area(&info);
}
@@ -132,7 +132,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
- if (len > TASK_SIZE - mmap_min_addr)
+ if (len > mm->context.addr_limit - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ if (mm->context.addr_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
@@ -164,7 +164,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
+ info.high_limit = mm->context.addr_limit;
addr = vm_unmapped_area(&info);
}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index ded96edac817..19d8788820e1 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -277,7 +277,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
info.align_offset = 0;
addr = TASK_UNMAPPED_BASE;
- while (addr < TASK_SIZE) {
+ while (addr < mm->context.addr_limit) {
info.low_limit = addr;
if (!slice_scan_available(addr, available, 1, &addr))
continue;
@@ -289,8 +289,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
* Check if we need to reduce the range, or if we can
* extend it to cover the next available slice.
*/
- if (addr >= TASK_SIZE)
- addr = TASK_SIZE;
+ if (addr >= mm->context.addr_limit)
+ addr = mm->context.addr_limit;
else if (slice_scan_available(addr, available, 1, &next_end)) {
addr = next_end;
goto next_slice;
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 94210940112f..a409f78d206b 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -197,7 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
/* Check parameters */
if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
- addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
+ addr >= mm->context.addr_limit || len >= mm->context.addr_limit ||
+ addr + len > mm->context.addr_limit)
return -EINVAL;
if (is_hugepage_only_range(mm, addr, len))