summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-01-21 02:03:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-21 04:09:18 +0300
commit5ccc5abaaf6f9242cc63342c5286990233f392fa (patch)
tree14406eb21280bafa4a447d4b45ea5bd6caa354af /include/linux
parentd8b38438a0bcb362c396f49d8279ef7b505917f4 (diff)
downloadlinux-5ccc5abaaf6f9242cc63342c5286990233f392fa.tar.xz
mm: free swap cache aggressively if memcg swap is full
Swap cache pages are freed aggressively if swap is nearly full (>50% currently), because otherwise we are likely to stop scanning anonymous when we near the swap limit even if there is plenty of freeable swap cache pages. We should follow the same trend in case of memory cgroup, which has its own swap limit. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/swap.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a587050204f9..d18b65c53dbb 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -552,6 +552,7 @@ extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
+extern bool mem_cgroup_swap_full(struct page *page);
#else
static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
@@ -571,6 +572,11 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
return get_nr_swap_pages();
}
+
+static inline bool mem_cgroup_swap_full(struct page *page)
+{
+ return vm_swap_full();
+}
#endif
#endif /* __KERNEL__*/