summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/gfp.h25
-rw-r--r--mm/kmemleak.c12
2 files changed, 29 insertions, 8 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 450c2cbcf04b..7f9691d375f0 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -157,6 +157,31 @@ static inline int gfp_zonelist(gfp_t flags)
}
/*
+ * gfp flag masking for nested internal allocations.
+ *
+ * For code that needs to do allocations inside the public allocation API (e.g.
+ * memory allocation tracking code) the allocations need to obey the caller
+ * allocation context constrains to prevent allocation context mismatches (e.g.
+ * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
+ * situations.
+ *
+ * It is also assumed that these nested allocations are for internal kernel
+ * object storage purposes only and are not going to be used for DMA, etc. Hence
+ * we strip out all the zone information and leave just the context information
+ * intact.
+ *
+ * Further, internal allocations must fail before the higher level allocation
+ * can fail, so we must make them fail faster and fail silently. We also don't
+ * want them to deplete emergency reserves. Hence nested allocations must be
+ * prepared for these allocations to fail.
+ */
+static inline gfp_t gfp_nested_mask(gfp_t flags)
+{
+ return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
+ (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
+}
+
+/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index fdcf01f62202..d5b6fba44fc9 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -114,12 +114,6 @@
#define BYTES_PER_POINTER sizeof(void *)
-/* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
- __GFP_NOLOCKDEP)) | \
- __GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN)
-
/* scanning area inside a memory block */
struct kmemleak_scan_area {
struct hlist_node node;
@@ -463,7 +457,8 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
/* try the slab allocator first */
if (object_cache) {
- object = kmem_cache_alloc_noprof(object_cache, gfp_kmemleak_mask(gfp));
+ object = kmem_cache_alloc_noprof(object_cache,
+ gfp_nested_mask(gfp));
if (object)
return object;
}
@@ -947,7 +942,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
if (scan_area_cache)
- area = kmem_cache_alloc_noprof(scan_area_cache, gfp_kmemleak_mask(gfp));
+ area = kmem_cache_alloc_noprof(scan_area_cache,
+ gfp_nested_mask(gfp));
raw_spin_lock_irqsave(&object->lock, flags);
if (!area) {