summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2023-06-24 08:12:16 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-08-18 20:12:08 +0300
commitada5caed79b313b3046839c9de9bf9048561e4bb (patch)
treef2e6ff2e1bd79ffd34a5f8b281490b063d04716a /mm
parent4ce36584da19ff6a5d171a317c6c34c567d4628e (diff)
downloadlinux-ada5caed79b313b3046839c9de9bf9048561e4bb.tar.xz
zsmalloc: remove zs_compact_control
__zs_compact always putback src_zspage into class list after migrate_zspage. Thus, we don't need to keep last position of src_zspage any more. Let's remove it. Link: https://lkml.kernel.org/r/20230624053120.643409-4-senozhatsky@chromium.org Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Alexey Romanov <avromanov@sberdevices.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/zsmalloc.c37
1 files changed, 9 insertions, 28 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 4a84f7877669..84beadc088b8 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1590,25 +1590,14 @@ static unsigned long find_alloced_obj(struct size_class *class,
return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG);
}
-struct zs_compact_control {
- /* Source spage for migration which could be a subpage of zspage */
- struct page *s_page;
- /* Destination page for migration which should be a first page
- * of zspage. */
- struct page *d_page;
- /* Starting object index within @s_page which used for live object
- * in the subpage. */
- int obj_idx;
-};
-
-static void migrate_zspage(struct zs_pool *pool, struct size_class *class,
- struct zs_compact_control *cc)
+static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
+ struct zspage *dst_zspage)
{
unsigned long used_obj, free_obj;
unsigned long handle;
- struct page *s_page = cc->s_page;
- struct page *d_page = cc->d_page;
- int obj_idx = cc->obj_idx;
+ int obj_idx = 0;
+ struct page *s_page = get_first_page(src_zspage);
+ struct size_class *class = pool->size_class[src_zspage->class];
while (1) {
handle = find_alloced_obj(class, s_page, &obj_idx);
@@ -1621,24 +1610,20 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class,
}
used_obj = handle_to_obj(handle);
- free_obj = obj_malloc(pool, get_zspage(d_page), handle);
+ free_obj = obj_malloc(pool, dst_zspage, handle);
zs_object_copy(class, free_obj, used_obj);
obj_idx++;
record_obj(handle, free_obj);
obj_free(class->size, used_obj);
/* Stop if there is no more space */
- if (zspage_full(class, get_zspage(d_page)))
+ if (zspage_full(class, dst_zspage))
break;
/* Stop if there are no more objects to migrate */
- if (zspage_empty(get_zspage(s_page)))
+ if (zspage_empty(src_zspage))
break;
}
-
- /* Remember last position in this iteration */
- cc->s_page = s_page;
- cc->obj_idx = obj_idx;
}
static struct zspage *isolate_src_zspage(struct size_class *class)
@@ -2013,7 +1998,6 @@ static unsigned long zs_can_compact(struct size_class *class)
static unsigned long __zs_compact(struct zs_pool *pool,
struct size_class *class)
{
- struct zs_compact_control cc;
struct zspage *src_zspage = NULL;
struct zspage *dst_zspage = NULL;
unsigned long pages_freed = 0;
@@ -2031,7 +2015,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (!dst_zspage)
break;
migrate_write_lock(dst_zspage);
- cc.d_page = get_first_page(dst_zspage);
}
src_zspage = isolate_src_zspage(class);
@@ -2040,9 +2023,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
migrate_write_lock_nested(src_zspage);
- cc.obj_idx = 0;
- cc.s_page = get_first_page(src_zspage);
- migrate_zspage(pool, class, &cc);
+ migrate_zspage(pool, src_zspage, dst_zspage);
fg = putback_zspage(class, src_zspage);
migrate_write_unlock(src_zspage);