From d1b6d2e1fe1d9d3ff2c7cb7ce84d463560aa5e33 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 17 Oct 2020 16:15:17 -0700 Subject: zsmalloc: switch from alloc_vm_area to get_vm_area Just manually pre-fault the PTEs using apply_to_page_range. Co-developed-by: Minchan Kim Signed-off-by: Christoph Hellwig Signed-off-by: Andrew Morton Cc: Boris Ostrovsky Cc: Chris Wilson Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Juergen Gross Cc: Matthew Auld Cc: "Matthew Wilcox (Oracle)" Cc: Nitin Gupta Cc: Peter Zijlstra Cc: Rodrigo Vivi Cc: Stefano Stabellini Cc: Tvrtko Ursulin Cc: Uladzislau Rezki (Sony) Link: https://lkml.kernel.org/r/20201002122204.1534411-6-hch@lst.de Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c36fdff9a371..918c7b019b3d 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1122,10 +1122,16 @@ static inline int __zs_cpu_up(struct mapping_area *area) */ if (area->vm) return 0; - area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); + area->vm = get_vm_area(PAGE_SIZE * 2, 0); if (!area->vm) return -ENOMEM; - return 0; + + /* + * Populate ptes in advance to avoid pte allocation with GFP_KERNEL + * in non-preemtible context of zs_map_object. + */ + return apply_to_page_range(&init_mm, (unsigned long)area->vm->addr, + PAGE_SIZE * 2, NULL, NULL); } static inline void __zs_cpu_down(struct mapping_area *area) -- cgit v1.2.3