diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2016-07-29 01:46:05 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 02:07:41 +0300 |
commit | ef8f2327996b5c20f11420f64e439e87c7a01604 (patch) | |
tree | 0ea9bf78d88e1207005fc5310fe812d1edb0efc2 /include | |
parent | a9dd0a83104c01269ea36a9b4ec42b51edf85427 (diff) | |
download | linux-ef8f2327996b5c20f11420f64e439e87c7a01604.tar.xz |
mm, memcg: move memcg limit enforcement from zones to nodes
Memcg needs adjustment after moving LRUs to the node. Limits are
tracked per memcg but the soft-limit excess is tracked per zone. As
global page reclaim is based on the node, it is easy to imagine a
situation where a zone soft limit is exceeded even though the memcg
limit is fine.
This patch moves the soft limit tree the node. Technically, all the
variable names should also change but people are already familiar by the
meaning of "mz" even if "mn" would be a more appropriate name now.
Link: http://lkml.kernel.org/r/1467970510-21195-15-git-send-email-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/memcontrol.h | 38 | ||||
-rw-r--r-- | include/linux/swap.h | 2 |
2 files changed, 16 insertions, 24 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f4963ee4fdbc..b759827b2f1e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -60,7 +60,7 @@ enum mem_cgroup_stat_index { }; struct mem_cgroup_reclaim_cookie { - struct zone *zone; + pg_data_t *pgdat; int priority; unsigned int generation; }; @@ -118,7 +118,7 @@ struct mem_cgroup_reclaim_iter { /* * per-zone information in memory controller. */ -struct mem_cgroup_per_zone { +struct mem_cgroup_per_node { struct lruvec lruvec; unsigned long lru_size[NR_LRU_LISTS]; @@ -132,10 +132,6 @@ struct mem_cgroup_per_zone { /* use container_of */ }; -struct mem_cgroup_per_node { - struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; -}; - struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; @@ -314,19 +310,15 @@ void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); -static inline struct mem_cgroup_per_zone * -mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) +static struct mem_cgroup_per_node * +mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) { - int nid = zone_to_nid(zone); - int zid = zone_idx(zone); - - return &memcg->nodeinfo[nid]->zoneinfo[zid]; + return memcg->nodeinfo[nid]; } /** * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone * @node: node of the wanted lruvec - * @zone: zone of the wanted lruvec * @memcg: memcg of the wanted lruvec * * Returns the lru list vector holding pages for a given @node or a given @@ -334,9 +326,9 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) * is disabled. */ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, - struct zone *zone, struct mem_cgroup *memcg) + struct mem_cgroup *memcg) { - struct mem_cgroup_per_zone *mz; + struct mem_cgroup_per_node *mz; struct lruvec *lruvec; if (mem_cgroup_disabled()) { @@ -344,7 +336,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, goto out; } - mz = mem_cgroup_zone_zoneinfo(memcg, zone); + mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); lruvec = &mz->lruvec; out: /* @@ -352,8 +344,8 @@ out: * we have to be prepared to initialize lruvec->pgdat here; * and if offlined then reonlined, we need to reinitialize it. */ - if (unlikely(lruvec->pgdat != zone->zone_pgdat)) - lruvec->pgdat = zone->zone_pgdat; + if (unlikely(lruvec->pgdat != pgdat)) + lruvec->pgdat = pgdat; return lruvec; } @@ -446,9 +438,9 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, static inline unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) { - struct mem_cgroup_per_zone *mz; + struct mem_cgroup_per_node *mz; - mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec); + mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); return mz->lru_size[lru]; } @@ -519,7 +511,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, mem_cgroup_update_page_stat(page, idx, -1); } -unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); @@ -611,7 +603,7 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new) } static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, - struct zone *zone, struct mem_cgroup *memcg) + struct mem_cgroup *memcg) { return node_lruvec(pgdat); } @@ -723,7 +715,7 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, } static inline -unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) { diff --git a/include/linux/swap.h b/include/linux/swap.h index 0ad616d7c381..2a23ddc96edd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -318,7 +318,7 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, bool may_swap); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, - struct zone *zone, + pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; |