diff options
author | Balbir Singh <balbir@linux.vnet.ibm.com> | 2008-02-07 11:13:59 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 19:42:19 +0300 |
commit | 8697d33194faae6fdd6b2e799f6308aa00cfdf67 (patch) | |
tree | edf6b3e4698b80aac6f1d1f2b9e5698ce8dfa6e5 /mm | |
parent | c7ba5c9e8176704bfac0729875fa62798037584d (diff) | |
download | linux-8697d33194faae6fdd6b2e799f6308aa00cfdf67.tar.xz |
Memory controller: add switch to control what type of pages to limit
Choose if we want cached pages to be accounted or not. By default both are
accounted for. A new set of tunables are added.
echo -n 1 > mem_control_type
switches the accounting to account for only mapped pages
echo -n 3 > mem_control_type
switches the behaviour back
[bunk@kernel.org: mm/memcontrol.c: clenups]
[akpm@linux-foundation.org: fix sparc32 build]
Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 98 | ||||
-rw-r--r-- | mm/swap_state.c | 2 |
3 files changed, 97 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index b7a01e927953..8ae171cc2811 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -464,7 +464,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, if (error == 0) { - error = mem_cgroup_charge(page, current->mm); + error = mem_cgroup_cache_charge(page, current->mm); if (error) goto out; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5260658c90aa..10833d969e3f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -29,6 +29,8 @@ #include <linux/spinlock.h> #include <linux/fs.h> +#include <asm/uaccess.h> + struct cgroup_subsys mem_cgroup_subsys; static const int MEM_CGROUP_RECLAIM_RETRIES = 5; @@ -60,6 +62,7 @@ struct mem_cgroup { * spin_lock to protect the per cgroup LRU */ spinlock_t lru_lock; + unsigned long control_type; /* control RSS or RSS+Pagecache */ }; /* @@ -82,6 +85,15 @@ struct page_cgroup { /* mapped and cached states */ }; +enum { + MEM_CGROUP_TYPE_UNSPEC = 0, + MEM_CGROUP_TYPE_MAPPED, + MEM_CGROUP_TYPE_CACHED, + MEM_CGROUP_TYPE_ALL, + MEM_CGROUP_TYPE_MAX, +}; + +static struct mem_cgroup init_mem_cgroup; static inline struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) @@ -139,18 +151,18 @@ struct page_cgroup *page_get_page_cgroup(struct page *page) (page->page_cgroup & ~PAGE_CGROUP_LOCK); } -void __always_inline lock_page_cgroup(struct page *page) +static void __always_inline lock_page_cgroup(struct page *page) { bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); VM_BUG_ON(!page_cgroup_locked(page)); } -void __always_inline unlock_page_cgroup(struct page *page) +static void __always_inline unlock_page_cgroup(struct page *page) { bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); } -void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) +static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) { if (active) list_move(&pc->lru, &pc->mem_cgroup->active_list); @@ -366,6 +378,22 @@ err: } /* + * See if the cached pages should be charged at all? + */ +int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm) +{ + struct mem_cgroup *mem; + if (!mm) + mm = &init_mm; + + mem = rcu_dereference(mm->mem_cgroup); + if (mem->control_type == MEM_CGROUP_TYPE_ALL) + return mem_cgroup_charge(page, mm); + else + return 0; +} + +/* * Uncharging is always a welcome operation, we never complain, simply * uncharge. */ @@ -375,6 +403,10 @@ void mem_cgroup_uncharge(struct page_cgroup *pc) struct page *page; unsigned long flags; + /* + * This can handle cases when a page is not charged at all and we + * are switching between handling the control_type. + */ if (!pc) return; @@ -425,6 +457,60 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, mem_cgroup_write_strategy); } +static ssize_t mem_control_type_write(struct cgroup *cont, + struct cftype *cft, struct file *file, + const char __user *userbuf, + size_t nbytes, loff_t *pos) +{ + int ret; + char *buf, *end; + unsigned long tmp; + struct mem_cgroup *mem; + + mem = mem_cgroup_from_cont(cont); + buf = kmalloc(nbytes + 1, GFP_KERNEL); + ret = -ENOMEM; + if (buf == NULL) + goto out; + + buf[nbytes] = 0; + ret = -EFAULT; + if (copy_from_user(buf, userbuf, nbytes)) + goto out_free; + + ret = -EINVAL; + tmp = simple_strtoul(buf, &end, 10); + if (*end != '\0') + goto out_free; + + if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) + goto out_free; + + mem->control_type = tmp; + ret = nbytes; +out_free: + kfree(buf); +out: + return ret; +} + +static ssize_t mem_control_type_read(struct cgroup *cont, + struct cftype *cft, + struct file *file, char __user *userbuf, + size_t nbytes, loff_t *ppos) +{ + unsigned long val; + char buf[64], *s; + struct mem_cgroup *mem; + + mem = mem_cgroup_from_cont(cont); + s = buf; + val = mem->control_type; + s += sprintf(s, "%lu\n", val); + return simple_read_from_buffer((void __user *)userbuf, nbytes, + ppos, buf, s - buf); +} + static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", @@ -442,6 +528,11 @@ static struct cftype mem_cgroup_files[] = { .private = RES_FAILCNT, .read = mem_cgroup_read, }, + { + .name = "control_type", + .write = mem_control_type_write, + .read = mem_control_type_read, + }, }; static struct mem_cgroup init_mem_cgroup; @@ -464,6 +555,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) INIT_LIST_HEAD(&mem->active_list); INIT_LIST_HEAD(&mem->inactive_list); spin_lock_init(&mem->lru_lock); + mem->control_type = MEM_CGROUP_TYPE_ALL; return &mem->css; } diff --git a/mm/swap_state.c b/mm/swap_state.c index f96e3ff1e791..88258869c8e7 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -78,7 +78,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) error = radix_tree_preload(gfp_mask); if (!error) { - error = mem_cgroup_charge(page, current->mm); + error = mem_cgroup_cache_charge(page, current->mm); if (error) goto out; |