summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-08-20 05:21:01 +0300
committerJakub Kicinski <kuba@kernel.org>2025-08-20 05:21:01 +0300
commitf9ca2820f51867aba6a55c64fcd264d1efd69449 (patch)
tree301d2429a3fb600e781c1127c1c2d75759ab0f7a /include/linux
parent5c69e0b395c1ffb37fd6fbdbd428353fc0894005 (diff)
parentbf64002c94fc330b996bc438f3d1b6bd3d781659 (diff)
downloadlinux-f9ca2820f51867aba6a55c64fcd264d1efd69449.tar.xz
Merge branch 'net-memcg-gather-memcg-code-under-config_memcg'
Kuniyuki Iwashima says: ==================== net-memcg: Gather memcg code under CONFIG_MEMCG. This series converts most sk->sk_memcg access to helper functions under CONFIG_MEMCG and finally defines sk_memcg under CONFIG_MEMCG. This is v5 of the series linked below but without core changes that decoupled memcg and global socket memory accounting. I will defer the changes to a follow-up series that will use BPF to store a flag in sk->sk_memcg. Overview of the series: patch 1 is a trivial fix for MPTCP patch 2 ~ 9 move sk->sk_memcg accesses to a single place patch 10 moves sk_memcg under CONFIG_MEMCG v4: https://lore.kernel.org/20250814200912.1040628-1-kuniyu@google.com v3: https://lore.kernel.org/20250812175848.512446-1-kuniyu@google.com v2: https://lore.kernel.org/20250811173116.2829786-1-kuniyu@google.com v1: https://lore.kernel.org/20250721203624.3807041-1-kuniyu@google.com ==================== Link: https://patch.msgid.link/20250815201712.1745332-1-kuniyu@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/memcontrol.h45
1 files changed, 26 insertions, 19 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 785173aa0739..fb27e3d2fdac 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1596,14 +1596,16 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
- gfp_t gfp_mask);
-void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
+bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
+ gfp_t gfp_mask);
+void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
#if BITS_PER_LONG < 64
static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
@@ -1640,32 +1642,37 @@ static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
}
#endif
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
-{
-#ifdef CONFIG_MEMCG_V1
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return !!memcg->tcpmem_pressure;
-#endif /* CONFIG_MEMCG_V1 */
- do {
- if (time_before64(get_jiffies_64(), mem_cgroup_get_socket_pressure(memcg)))
- return true;
- } while ((memcg = parent_mem_cgroup(memcg)));
- return false;
-}
-
int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled 0
-static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
-static inline void mem_cgroup_sk_free(struct sock *sk) { };
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+
+static inline void mem_cgroup_sk_alloc(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_free(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline bool mem_cgroup_sk_charge(const struct sock *sk,
+ unsigned int nr_pages,
+ gfp_t gfp_mask)
{
return false;
}
+static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
+ unsigned int nr_pages)
+{
+}
+
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{