diff options
author | Chengming Zhou <zhouchengming@bytedance.com> | 2024-02-19 16:33:51 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-02-24 04:48:30 +0300 |
commit | 568b567f78acbe64d2e91b7e58a85feb6846434c (patch) | |
tree | 26c0b065007de9f5619214dda6483a316e31facd /mm/zsmalloc.c | |
parent | 75c40c2509e797830dd90d92568262ba69a89c9c (diff) | |
download | linux-568b567f78acbe64d2e91b7e58a85feb6846434c.tar.xz |
mm/zsmalloc: fix migrate_write_lock() when !CONFIG_COMPACTION
Patch series "mm/zsmalloc: fix and optimize objects/page migration".
This series is to fix and optimize the zsmalloc objects/page migration.
This patch (of 3):
migrate_write_lock() is a empty function when !CONFIG_COMPACTION, in which
case zs_compact() can be triggered from shrinker reclaim context. (Maybe
it's better to rename it to zs_shrink()?)
And zspage map object users rely on this migrate_read_lock() so object
won't be migrated elsewhere.
Fix it by always implementing the migrate_write_lock() related functions.
Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-0-34cd49c6545b@bytedance.com
Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-1-34cd49c6545b@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/zsmalloc.c')
-rw-r--r-- | mm/zsmalloc.c | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c937635e0ad1..64d5533fa5d8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -278,18 +278,15 @@ static bool ZsHugePage(struct zspage *zspage) static void migrate_lock_init(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage); - -#ifdef CONFIG_COMPACTION static void migrate_write_lock(struct zspage *zspage); static void migrate_write_lock_nested(struct zspage *zspage); static void migrate_write_unlock(struct zspage *zspage); + +#ifdef CONFIG_COMPACTION static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); #else -static void migrate_write_lock(struct zspage *zspage) {} -static void migrate_write_lock_nested(struct zspage *zspage) {} -static void migrate_write_unlock(struct zspage *zspage) {} static void kick_deferred_free(struct zs_pool *pool) {} static void init_deferred_free(struct zs_pool *pool) {} static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} @@ -1725,7 +1722,6 @@ static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) read_unlock(&zspage->lock); } -#ifdef CONFIG_COMPACTION static void migrate_write_lock(struct zspage *zspage) { write_lock(&zspage->lock); @@ -1741,6 +1737,7 @@ static void migrate_write_unlock(struct zspage *zspage) write_unlock(&zspage->lock); } +#ifdef CONFIG_COMPACTION /* Number of isolated subpage for *page migration* in this zspage */ static void inc_zspage_isolation(struct zspage *zspage) { |