summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2015-02-12 02:25:04 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 04:06:01 +0300
commit91fbdc0f89807bb97792ea6893717a8d3154b871 (patch)
tree189e5604fa8be6662b9d691465d81037cbc4136f
parent6de226191d12fce30331ebf024ca3ed24834f0ee (diff)
downloadlinux-91fbdc0f89807bb97792ea6893717a8d3154b871.tar.xz
mm/page_alloc.c:__alloc_pages_nodemask(): don't alter arg gfp_mask
__alloc_pages_nodemask() strips __GFP_IO when retrying the page allocation. But it does this by altering the function-wide variable gfp_mask. This will cause subsequent allocation attempts to inadvertently use the modified gfp_mask. Also, pass the correct mask (the mask we actually used) into trace_mm_page_alloc(). Cc: Ming Lei <ming.lei@canonical.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f121050e8530..1c7d90f7a84a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2865,6 +2865,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
int classzone_idx;
+ gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
gfp_mask &= gfp_allowed_mask;
@@ -2898,22 +2899,24 @@ retry_cpuset:
classzone_idx = zonelist_zone_idx(preferred_zoneref);
/* First allocation attempt */
- page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
- zonelist, high_zoneidx, alloc_flags,
- preferred_zone, classzone_idx, migratetype);
+ alloc_mask = gfp_mask|__GFP_HARDWALL;
+ page = get_page_from_freelist(alloc_mask, nodemask, order, zonelist,
+ high_zoneidx, alloc_flags, preferred_zone,
+ classzone_idx, migratetype);
if (unlikely(!page)) {
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
*/
- gfp_mask = memalloc_noio_flags(gfp_mask);
- page = __alloc_pages_slowpath(gfp_mask, order,
+ alloc_mask = memalloc_noio_flags(gfp_mask);
+
+ page = __alloc_pages_slowpath(alloc_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, classzone_idx, migratetype);
}
- trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+ trace_mm_page_alloc(page, order, alloc_mask, migratetype);
out:
/*