summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2008-02-07 11:13:59 +0300
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-07 19:42:19 +0300
commit8697d33194faae6fdd6b2e799f6308aa00cfdf67 (patch)
treeedf6b3e4698b80aac6f1d1f2b9e5698ce8dfa6e5 /include/linux
parentc7ba5c9e8176704bfac0729875fa62798037584d (diff)
downloadlinux-8697d33194faae6fdd6b2e799f6308aa00cfdf67.tar.xz
Memory controller: add switch to control what type of pages to limit
Choose if we want cached pages to be accounted or not. By default both are accounted for. A new set of tunables are added. echo -n 1 > mem_control_type switches the accounting to account for only mapped pages echo -n 3 > mem_control_type switches the behaviour back [bunk@kernel.org: mm/memcontrol.c: clenups] [akpm@linux-foundation.org: fix sparc32 build] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/memcontrol.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9bbbf524ba8f..bb6f5105401b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -22,6 +22,8 @@
struct mem_cgroup;
struct page_cgroup;
+struct page;
+struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_CONT
@@ -40,6 +42,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct mem_cgroup *mem_cont,
int active);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
+extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm);
static inline void mem_cgroup_uncharge_page(struct page *page)
{
@@ -84,6 +87,12 @@ static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
{
}
+static inline int mem_cgroup_cache_charge(struct page *page,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
#endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */