summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2024-02-20 09:53:00 +0300
committerAndrew Morton <akpm@linux-foundation.org>2024-02-24 04:48:32 +0300
commita6a8cdfdde43b6231dd1ed61b8a7a6ee55956376 (patch)
treed2ad77f2839fbe2178dd49c145d80875fd2df50c
parentf6f3f27597864d38bce5b4c346494970ecc19544 (diff)
downloadlinux-a6a8cdfdde43b6231dd1ed61b8a7a6ee55956376.tar.xz
mm/zsmalloc: remove set_zspage_mapping()
Patch series "mm/zsmalloc: some cleanup for get/set_zspage_mapping()". The discussion[1] with Sergey shows there are some cleanup works to do in get/set_zspage_mapping(): - the fullness returned from get_zspage_mapping() is not stable outside pool->lock, this usage pattern is confusing, but should be ok in this free_zspage path. - we seldom use the class_idx returned from get_zspage_mapping(), only free_zspage path use to get its class. - set_zspage_mapping() always set the zspage->class, but it's never changed after zspage allocated. [1] https://lore.kernel.org/all/a6c22e30-cf10-4122-91bc-ceb9fb57a5d6@bytedance.com/ This patch (of 3): We only need to update zspage->fullness when insert_zspage(), since zspage->class is never changed after allocated. Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-0-5c5ee4ccdd87@bytedance.com Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-1-5c5ee4ccdd87@bytedance.com Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/zsmalloc.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a48f4651d143..a6653915bf17 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool,
return pool->size_class[zspage->class];
}
-static void set_zspage_mapping(struct zspage *zspage,
- unsigned int class_idx,
- int fullness)
-{
- zspage->class = class_idx;
- zspage->fullness = fullness;
-}
-
/*
* zsmalloc divides the pool into various size classes where each
* class maintains a list of zspages where each zspage is divided
@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class,
{
class_stat_inc(class, fullness, 1);
list_add(&zspage->list, &class->fullness_list[fullness]);
+ zspage->fullness = fullness;
}
/*
@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
remove_zspage(class, zspage, currfg);
insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class_idx, newfg);
out:
return newfg;
}
@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, pages);
init_zspage(class, zspage);
zspage->pool = pool;
+ zspage->class = class->index;
return zspage;
}
@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
obj = obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class->index, newfg);
record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
fullness = get_fullness_group(class, zspage);
insert_zspage(class, zspage, fullness);
- set_zspage_mapping(zspage, class->index, fullness);
return fullness;
}