diff options
-rw-r--r-- | mm/page_alloc.c | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5703f7fca832..a42fa09ee91f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3510,35 +3510,42 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, */ alloc_flags = gfp_to_alloc_flags(gfp_mask); + if (gfp_mask & __GFP_KSWAPD_RECLAIM) + wake_all_kswapds(order, ac); + + /* + * The adjusted alloc_flags might result in immediate success, so try + * that first + */ + page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); + if (page) + goto got_pg; + + retry: + /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, ac); + if (gfp_pfmemalloc_allowed(gfp_mask)) + alloc_flags = ALLOC_NO_WATERMARKS; + /* * Reset the zonelist iterators if memory policies can be ignored. * These allocations are high priority and system rather than user * orientated. */ - if (!(alloc_flags & ALLOC_CPUSET) || gfp_pfmemalloc_allowed(gfp_mask)) { + if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) { ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->high_zoneidx, ac->nodemask); } - /* This is the last chance, in general, before the goto nopage. */ + /* Attempt with potentially adjusted zonelist and alloc_flags */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); if (page) goto got_pg; - /* Allocate without watermarks if the context allows */ - if (gfp_pfmemalloc_allowed(gfp_mask)) { - - page = get_page_from_freelist(gfp_mask, order, - ALLOC_NO_WATERMARKS, ac); - if (page) - goto got_pg; - } - /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) { /* |