diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2023-10-19 05:31:06 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-10-26 02:47:07 +0300 |
commit | cfb8c75099dbf152db1b075eead5029c5a30ce51 (patch) | |
tree | dcce91e1071ed741f26166bbf0c1aebc1acb2910 /mm/hugetlb.c | |
parent | 79359d6d24df2f304abbf6f137b01d2b76175ce3 (diff) | |
download | linux-cfb8c75099dbf152db1b075eead5029c5a30ce51.tar.xz |
hugetlb: perform vmemmap restoration on a list of pages
The routine update_and_free_pages_bulk already performs vmemmap
restoration on the list of hugetlb pages in a separate step. In
preparation for more functionality to be added in this step, create a new
routine hugetlb_vmemmap_restore_folios() that will restore vmemmap for a
list of folios.
This new routine must provide sufficient feedback about errors and actual
restoration performed so that update_and_free_pages_bulk can perform
optimally.
Special care must be taken when encountering an error from
hugetlb_vmemmap_restore_folios. We want to continue making as much
forward progress as possible. A new routine bulk_vmemmap_restore_error
handles this specific situation.
Link: https://lkml.kernel.org/r/20231019023113.345257-5-mike.kravetz@oracle.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Konrad Dybcio <konradybcio@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Usama Arif <usama.arif@bytedance.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 99 |
1 files changed, 71 insertions, 28 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8b171f866d0a..cf834bb7f820 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1859,50 +1859,93 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, schedule_work(&free_hpage_work); } -static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) +static void bulk_vmemmap_restore_error(struct hstate *h, + struct list_head *folio_list, + struct list_head *non_hvo_folios) { struct folio *folio, *t_folio; - bool clear_dtor = false; - /* - * First allocate required vmemmmap (if necessary) for all folios on - * list. If vmemmap can not be allocated, we can not free folio to - * lower level allocator, so add back as hugetlb surplus page. - * add_hugetlb_folio() removes the page from THIS list. - * Use clear_dtor to note if vmemmap was successfully allocated for - * ANY page on the list. - */ - list_for_each_entry_safe(folio, t_folio, list, lru) { - if (folio_test_hugetlb_vmemmap_optimized(folio)) { + if (!list_empty(non_hvo_folios)) { + /* + * Free any restored hugetlb pages so that restore of the + * entire list can be retried. + * The idea is that in the common case of ENOMEM errors freeing + * hugetlb pages with vmemmap we will free up memory so that we + * can allocate vmemmap for more hugetlb pages. + */ + list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) { + list_del(&folio->lru); + spin_lock_irq(&hugetlb_lock); + __clear_hugetlb_destructor(h, folio); + spin_unlock_irq(&hugetlb_lock); + update_and_free_hugetlb_folio(h, folio, false); + cond_resched(); + } + } else { + /* + * In the case where there are no folios which can be + * immediately freed, we loop through the list trying to restore + * vmemmap individually in the hope that someone elsewhere may + * have done something to cause success (such as freeing some + * memory). If unable to restore a hugetlb page, the hugetlb + * page is made a surplus page and removed from the list. + * If are able to restore vmemmap and free one hugetlb page, we + * quit processing the list to retry the bulk operation. + */ + list_for_each_entry_safe(folio, t_folio, folio_list, lru) if (hugetlb_vmemmap_restore(h, &folio->page)) { + list_del(&folio->lru); spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, true); spin_unlock_irq(&hugetlb_lock); - } else - clear_dtor = true; - } + } else { + list_del(&folio->lru); + spin_lock_irq(&hugetlb_lock); + __clear_hugetlb_destructor(h, folio); + spin_unlock_irq(&hugetlb_lock); + update_and_free_hugetlb_folio(h, folio, false); + cond_resched(); + break; + } } +} + +static void update_and_free_pages_bulk(struct hstate *h, + struct list_head *folio_list) +{ + long ret; + struct folio *folio, *t_folio; + LIST_HEAD(non_hvo_folios); /* - * If vmemmmap allocation was performed on any folio above, take lock - * to clear destructor of all folios on list. This avoids the need to - * lock/unlock for each individual folio. - * The assumption is vmemmap allocation was performed on all or none - * of the folios on the list. This is true expect in VERY rare cases. + * First allocate required vmemmmap (if necessary) for all folios. + * Carefully handle errors and free up any available hugetlb pages + * in an effort to make forward progress. */ - if (clear_dtor) { +retry: + ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios); + if (ret < 0) { + bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios); + goto retry; + } + + /* + * At this point, list should be empty, ret should be >= 0 and there + * should only be pages on the non_hvo_folios list. + * Do note that the non_hvo_folios list could be empty. + * Without HVO enabled, ret will be 0 and there is no need to call + * __clear_hugetlb_destructor as this was done previously. + */ + VM_WARN_ON(!list_empty(folio_list)); + VM_WARN_ON(ret < 0); + if (!list_empty(&non_hvo_folios) && ret) { spin_lock_irq(&hugetlb_lock); - list_for_each_entry(folio, list, lru) + list_for_each_entry(folio, &non_hvo_folios, lru) __clear_hugetlb_destructor(h, folio); spin_unlock_irq(&hugetlb_lock); } - /* - * Free folios back to low level allocators. vmemmap and destructors - * were taken care of above, so update_and_free_hugetlb_folio will - * not need to take hugetlb lock. - */ - list_for_each_entry_safe(folio, t_folio, list, lru) { + list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) { update_and_free_hugetlb_folio(h, folio, false); cond_resched(); } |