summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorShakeel Butt <shakeel.butt@linux.dev>2024-12-10 07:06:57 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-19 20:13:01 +0300
commit825bccd94343a59eaeacacfa1c8bb3ae7d625fb1 (patch)
tree07911cee18615691d07ac6dcb119546760a89b3d /mm/slub.c
parent6e564f2ae92414f506a7e92fca529b8f1c18211c (diff)
downloadlinux-825bccd94343a59eaeacacfa1c8bb3ae7d625fb1.tar.xz
memcg: slub: fix SUnreclaim for post charged objects
commit b7ffecbe198e2dfc44abf92ceb90f46150f7527a upstream. Large kmalloc directly allocates from the page allocator and then use lruvec_stat_mod_folio() to increment the unreclaimable slab stats for global and memcg. However when post memcg charging of slab objects was added in commit 9028cdeb38e1 ("memcg: add charging of already allocated slab objects"), it missed to correctly handle the unreclaimable slab stats for memcg. One user visisble effect of that bug is that the node level unreclaimable slab stat will work correctly but the memcg level stat can underflow as kernel correctly handles the free path but the charge path missed to increment the memcg level unreclaimable slab stat. Let's fix by correctly handle in the post charge code path. Fixes: 9028cdeb38e1 ("memcg: add charging of already allocated slab objects") Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: <stable@vger.kernel.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 15ba89fef89a..b9447a955f61 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2199,9 +2199,24 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
folio = virt_to_folio(p);
if (!folio_test_slab(folio)) {
- return folio_memcg_kmem(folio) ||
- (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
- folio_order(folio)) == 0);
+ int size;
+
+ if (folio_memcg_kmem(folio))
+ return true;
+
+ if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
+ folio_order(folio)))
+ return false;
+
+ /*
+ * This folio has already been accounted in the global stats but
+ * not in the memcg stats. So, subtract from the global and use
+ * the interface which adds to both global and memcg stats.
+ */
+ size = folio_size(folio);
+ node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
+ lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
+ return true;
}
slab = folio_slab(folio);