summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-05-28 08:36:25 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2019-07-04 17:48:00 +0300
commit5d49275a27310233964fc3edc8dd097a094ce338 (patch)
tree4b792dec2affcd9e58e2bf9b63afbe0f7209accd /arch/powerpc/mm/hugetlbpage.c
parent2230ebf6e6dd0b7751e2921b40f6cfe34f09bb16 (diff)
downloadlinux-5d49275a27310233964fc3edc8dd097a094ce338.tar.xz
powerpc/mm/hugetlb: Fix kernel crash if we fail to allocate page table caches
We only check for hugetlb allocations, because with hugetlb we do conditional registration. For PGD/PUD/PMD levels we register them always in pgtable_cache_init. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1de0f43a68e5..f55dc110f2ad 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -61,12 +61,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
num_hugepd = 1;
}
+ if (!cachep) {
+ WARN_ONCE(1, "No page table cache created for hugetlb tables");
+ return -ENOMEM;
+ }
+
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
- if (! new)
+ if (!new)
return -ENOMEM;
/*