diff options
author | Usama Arif <usama.arif@bytedance.com> | 2023-10-11 17:45:57 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-10-26 02:47:08 +0300 |
commit | c5ad3233ead566d74918bd76ba2dbdbe83ba1d9d (patch) | |
tree | a1d72ceb058241463978c0f7956b363609c8ad9d /mm/hugetlb.c | |
parent | c24f188b22892908a2a3bb2de0ce7d121dd72989 (diff) | |
download | linux-c5ad3233ead566d74918bd76ba2dbdbe83ba1d9d.tar.xz |
hugetlb_vmemmap: use folio argument for hugetlb_vmemmap_* functions
Most function calls in hugetlb.c are made with folio arguments. This
brings hugetlb_vmemmap calls inline with them by using folio instead of
head struct page. Head struct page is still needed within these
functions.
The set/clear/test functions for hugepages are also changed to folio
versions.
Link: https://lkml.kernel.org/r/20231011144557.1720481-2-usama.arif@bytedance.com
Signed-off-by: Usama Arif <usama.arif@bytedance.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Fam Zheng <fam.zheng@bytedance.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cf834bb7f820..dd8065e36038 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1747,10 +1747,10 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, /* * If folio is not vmemmap optimized (!clear_dtor), then the folio - * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore + * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio * can only be passed hugetlb pages and will BUG otherwise. */ - if (clear_dtor && hugetlb_vmemmap_restore(h, &folio->page)) { + if (clear_dtor && hugetlb_vmemmap_restore_folio(h, folio)) { spin_lock_irq(&hugetlb_lock); /* * If we cannot allocate vmemmap pages, just refuse to free the @@ -1893,7 +1893,7 @@ static void bulk_vmemmap_restore_error(struct hstate *h, * quit processing the list to retry the bulk operation. */ list_for_each_entry_safe(folio, t_folio, folio_list, lru) - if (hugetlb_vmemmap_restore(h, &folio->page)) { + if (hugetlb_vmemmap_restore_folio(h, folio)) { list_del(&folio->lru); spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, true); @@ -2051,7 +2051,7 @@ static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio) static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) { init_new_hugetlb_folio(h, folio); - hugetlb_vmemmap_optimize(h, &folio->page); + hugetlb_vmemmap_optimize_folio(h, folio); } static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) @@ -2462,7 +2462,7 @@ retry: * non-vmemmap optimized hugetlb folios. */ if (folio_test_hugetlb(folio)) { - rc = hugetlb_vmemmap_restore(h, &folio->page); + rc = hugetlb_vmemmap_restore_folio(h, folio); if (rc) { spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, false); @@ -3886,11 +3886,11 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) /* * If vmemmap already existed for folio, the remove routine above would * have cleared the hugetlb folio flag. Hence the folio is technically - * no longer a hugetlb folio. hugetlb_vmemmap_restore can only be + * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be * passed hugetlb folios and will BUG otherwise. */ if (folio_test_hugetlb(folio)) { - rc = hugetlb_vmemmap_restore(h, &folio->page); + rc = hugetlb_vmemmap_restore_folio(h, folio); if (rc) { /* Allocation of vmemmmap failed, we can not demote folio */ spin_lock_irq(&hugetlb_lock); |