summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c48
1 files changed, 35 insertions, 13 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b4624580d18a..17d38c7f630f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3157,10 +3157,12 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
return false;
}
-static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
+ bool allow_uncharge)
{
struct memcg_stock_pcp *stock;
unsigned long flags;
+ unsigned int nr_pages = 0;
local_irq_save(flags);
@@ -3169,14 +3171,21 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
drain_obj_stock(stock);
obj_cgroup_get(objcg);
stock->cached_objcg = objcg;
- stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
+ stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
+ ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
+ allow_uncharge = true; /* Allow uncharge when objcg changes */
}
stock->nr_bytes += nr_bytes;
- if (stock->nr_bytes > PAGE_SIZE)
- drain_obj_stock(stock);
+ if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
+ nr_pages = stock->nr_bytes >> PAGE_SHIFT;
+ stock->nr_bytes &= (PAGE_SIZE - 1);
+ }
local_irq_restore(flags);
+
+ if (nr_pages)
+ obj_cgroup_uncharge_pages(objcg, nr_pages);
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -3188,14 +3197,27 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
return 0;
/*
- * In theory, memcg->nr_charged_bytes can have enough
+ * In theory, objcg->nr_charged_bytes can have enough
* pre-charged bytes to satisfy the allocation. However,
- * flushing memcg->nr_charged_bytes requires two atomic
- * operations, and memcg->nr_charged_bytes can't be big,
- * so it's better to ignore it and try grab some new pages.
- * memcg->nr_charged_bytes will be flushed in
- * refill_obj_stock(), called from this function or
- * independently later.
+ * flushing objcg->nr_charged_bytes requires two atomic
+ * operations, and objcg->nr_charged_bytes can't be big.
+ * The shared objcg->nr_charged_bytes can also become a
+ * performance bottleneck if all tasks of the same memcg are
+ * trying to update it. So it's better to ignore it and try
+ * grab some new pages. The stock's nr_bytes will be flushed to
+ * objcg->nr_charged_bytes later on when objcg changes.
+ *
+ * The stock's nr_bytes may contain enough pre-charged bytes
+ * to allow one less page from being charged, but we can't rely
+ * on the pre-charged bytes not being changed outside of
+ * consume_obj_stock() or refill_obj_stock(). So ignore those
+ * pre-charged bytes as well when charging pages. To avoid a
+ * page uncharge right after a page charge, we set the
+ * allow_uncharge flag to false when calling refill_obj_stock()
+ * to temporarily allow the pre-charged bytes to exceed the page
+ * size limit. The maximum reachable value of the pre-charged
+ * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
+ * race.
*/
nr_pages = size >> PAGE_SHIFT;
nr_bytes = size & (PAGE_SIZE - 1);
@@ -3205,14 +3227,14 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
if (!ret && nr_bytes)
- refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
+ refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
return ret;
}
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
{
- refill_obj_stock(objcg, size);
+ refill_obj_stock(objcg, size, true);
}
#endif /* CONFIG_MEMCG_KMEM */