summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-01-13 05:18:40 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-13 08:13:06 +0400
commit00c54c0bac24bb02d2460c516da76651a7451286 (patch)
treed88356ab217fa4c119a6c11dd7f614413adc020a
parentcfa449461e67b60df986170eecb089831fa9e49a (diff)
downloadlinux-00c54c0bac24bb02d2460c516da76651a7451286.tar.xz
mm: page_cgroup: check page_cgroup arrays in lookup_page_cgroup() only when necessary
lookup_page_cgroup() is usually used only against pages that are used in userspace. The exception is the CONFIG_DEBUG_VM-only memcg check from the page allocator: it can run on pages without page_cgroup descriptors allocated when the pages are fed into the page allocator for the first time during boot or memory hotplug. Include the array check only when CONFIG_DEBUG_VM is set and save the unnecessary check in production kernels. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Balbir Singh <bsingharora@gmail.com> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_cgroup.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f0559e049e00..e910524e5a08 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -28,9 +28,16 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
struct page_cgroup *base;
base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * The sanity checks the page allocator does upon freeing a
+ * page can reach here before the page_cgroup arrays are
+ * allocated when feeding a range of pages to the allocator
+ * for the first time during bootup or memory hotplug.
+ */
if (unlikely(!base))
return NULL;
-
+#endif
offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
return base + offset;
}
@@ -85,9 +92,16 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
-
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * The sanity checks the page allocator does upon freeing a
+ * page can reach here before the page_cgroup arrays are
+ * allocated when feeding a range of pages to the allocator
+ * for the first time during bootup or memory hotplug.
+ */
if (!section->page_cgroup)
return NULL;
+#endif
return section->page_cgroup + pfn;
}