diff options
author | Yang Shi <shy828301@gmail.com> | 2020-12-15 06:13:13 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-15 23:13:45 +0300 |
commit | 236c32eb109696590b7428957eda50cc05e22af8 (patch) | |
tree | a5585ded73d7e9a1d5ce66732f5911a47302f727 /mm | |
parent | c77c5cbafe549eb330e8909861a3e16cbda2c848 (diff) | |
download | linux-236c32eb109696590b7428957eda50cc05e22af8.tar.xz |
mm: migrate: clean up migrate_prep{_local}
The migrate_prep{_local} never fails, so it is pointless to have return
value and check the return value.
Link: https://lkml.kernel.org/r/20201113205359.556831-5-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Song Liu <songliubraving@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 8 | ||||
-rw-r--r-- | mm/migrate.c | 8 |
2 files changed, 4 insertions, 12 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3ca4898f3f24..8cf96bd21341 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1114,9 +1114,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, int err; nodemask_t tmp; - err = migrate_prep(); - if (err) - return err; + migrate_prep(); mmap_read_lock(mm); @@ -1315,9 +1313,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - err = migrate_prep(); - if (err) - goto mpol_out; + migrate_prep(); } { NODEMASK_SCRATCH(scratch); diff --git a/mm/migrate.c b/mm/migrate.c index a7ccda55ca82..76e2a6f3749f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -62,7 +62,7 @@ * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is * undesirable, use migrate_prep_local() */ -int migrate_prep(void) +void migrate_prep(void) { /* * Clear the LRU lists so pages can be isolated. @@ -71,16 +71,12 @@ int migrate_prep(void) * pages that may be busy. */ lru_add_drain_all(); - - return 0; } /* Do the necessary work of migrate_prep but not if it involves other CPUs */ -int migrate_prep_local(void) +void migrate_prep_local(void) { lru_add_drain(); - - return 0; } int isolate_movable_page(struct page *page, isolate_mode_t mode) |