summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--mm/hugetlb.c23
-rw-r--r--mm/memory-failure.c89
3 files changed, 116 insertions, 13 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6d0620edf0a6..3ec981a0d8b3 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -43,6 +43,9 @@ enum {
SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ SUBPAGE_INDEX_HWPOISON,
+#endif
__NR_USED_SUBPAGE,
};
@@ -551,7 +554,7 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Synchronization: Initially set after new page allocation with no
* locking. When examined and modified during migration processing
* (isolate, migrate, putback) the hugetlb_lock is held.
- * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
* allocator. Typically used for migration target pages when no pages
* are available in the pool. The hugetlb free page path will
* immediately free pages with this flag set to the buddy allocator.
@@ -561,6 +564,8 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
* HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
@@ -568,6 +573,7 @@ enum hugetlb_page_flags {
HPG_temporary,
HPG_freed,
HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
__NR_HPAGEFLAGS,
};
@@ -614,6 +620,7 @@ HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
#ifdef CONFIG_HUGETLB_PAGE
@@ -796,6 +803,14 @@ extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
+#ifdef CONFIG_MEMORY_FAILURE
+extern void hugetlb_clear_page_hwpoison(struct page *hpage);
+#else
+static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
+{
+}
+#endif
+
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9b79806be17b..0aee2f3ae15c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1535,6 +1535,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
+ /*
+ * If we don't know which subpages are hwpoisoned, we can't free
+ * the hugepage, so it's leaked intentionally.
+ */
+ if (HPageRawHwpUnreliable(page))
+ return;
+
if (hugetlb_vmemmap_restore(h, page)) {
spin_lock_irq(&hugetlb_lock);
/*
@@ -1547,6 +1554,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
return;
}
+ /*
+ * Move PageHWPoison flag from head page to the raw error pages,
+ * which makes any healthy subpages reusable.
+ */
+ if (unlikely(PageHWPoison(page)))
+ hugetlb_clear_page_hwpoison(page);
+
for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
@@ -2109,15 +2123,6 @@ retry:
*/
rc = hugetlb_vmemmap_restore(h, head);
if (!rc) {
- /*
- * Move PageHWPoison flag from head page to the raw
- * error page, which makes any subpages rather than
- * the error page reusable.
- */
- if (PageHWPoison(head) && page != head) {
- SetPageHWPoison(page);
- ClearPageHWPoison(head);
- }
update_and_free_page(h, head, false);
} else {
spin_lock_irq(&hugetlb_lock);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9a7a228ad04a..61668ce20836 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1662,6 +1662,90 @@ unlock:
EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
#endif /* CONFIG_FS_DAX */
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * Struct raw_hwp_page represents information about "raw error page",
+ * constructing singly linked list originated from ->private field of
+ * SUBPAGE_INDEX_HWPOISON-th tail page.
+ */
+struct raw_hwp_page {
+ struct llist_node node;
+ struct page *page;
+};
+
+static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
+{
+ return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
+}
+
+static void __free_raw_hwp_pages(struct page *hpage)
+{
+ struct llist_head *head;
+ struct llist_node *t, *tnode;
+
+ head = raw_hwp_list_head(hpage);
+ llist_for_each_safe(tnode, t, head->first) {
+ struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
+
+ SetPageHWPoison(p->page);
+ kfree(p);
+ }
+ llist_del_all(head);
+}
+
+static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
+{
+ struct llist_head *head;
+ struct raw_hwp_page *raw_hwp;
+ struct llist_node *t, *tnode;
+ int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0;
+
+ /*
+ * Once the hwpoison hugepage has lost reliable raw error info,
+ * there is little meaning to keep additional error info precisely,
+ * so skip to add additional raw error info.
+ */
+ if (HPageRawHwpUnreliable(hpage))
+ return -EHWPOISON;
+ head = raw_hwp_list_head(hpage);
+ llist_for_each_safe(tnode, t, head->first) {
+ struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
+
+ if (p->page == page)
+ return -EHWPOISON;
+ }
+
+ raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
+ if (raw_hwp) {
+ raw_hwp->page = page;
+ llist_add(&raw_hwp->node, head);
+ /* the first error event will be counted in action_result(). */
+ if (ret)
+ num_poisoned_pages_inc();
+ } else {
+ /*
+ * Failed to save raw error info. We no longer trace all
+ * hwpoisoned subpages, and we need refuse to free/dissolve
+ * this hwpoisoned hugepage.
+ */
+ SetHPageRawHwpUnreliable(hpage);
+ /*
+ * Once HPageRawHwpUnreliable is set, raw_hwp_page is not
+ * used any more, so free it.
+ */
+ __free_raw_hwp_pages(hpage);
+ }
+ return ret;
+}
+
+void hugetlb_clear_page_hwpoison(struct page *hpage)
+{
+ if (HPageRawHwpUnreliable(hpage))
+ return;
+ ClearPageHWPoison(hpage);
+ __free_raw_hwp_pages(hpage);
+}
+
/*
* Called from hugetlb code with hugetlb_lock held.
*
@@ -1696,7 +1780,7 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
goto out;
}
- if (TestSetPageHWPoison(head)) {
+ if (hugetlb_set_page_hwpoison(head, page)) {
ret = -EHWPOISON;
goto out;
}
@@ -1708,7 +1792,6 @@ out:
return ret;
}
-#ifdef CONFIG_HUGETLB_PAGE
/*
* Taking refcount of hugetlb pages needs extra care about race conditions
* with basic operations like hugepage allocation/free/demotion.
@@ -1749,7 +1832,7 @@ retry:
lock_page(head);
if (hwpoison_filter(p)) {
- ClearPageHWPoison(head);
+ hugetlb_clear_page_hwpoison(head);
res = -EOPNOTSUPP;
goto out;
}