diff options
author | Alistair Popple <apopple@nvidia.com> | 2021-07-01 04:54:22 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-01 21:06:03 +0300 |
commit | 9a5cc85c407402ae66128d31f0422a3a7ffa5c5c (patch) | |
tree | 801de2493d54996d46135d7824a16abb5b24f76e /mm/memory.c | |
parent | 6b49bf6ddbb0d7992c816846acfa5fd1cf751c36 (diff) | |
download | linux-9a5cc85c407402ae66128d31f0422a3a7ffa5c5c.tar.xz |
mm/memory.c: allow different return codes for copy_nonpresent_pte()
Currently if copy_nonpresent_pte() returns a non-zero value it is assumed
to be a swap entry which requires further processing outside the loop in
copy_pte_range() after dropping locks. This prevents other values being
returned to signal conditions such as failure which a subsequent change
requires.
Instead make copy_nonpresent_pte() return an error code if further
processing is required and read the value for the swap entry in the main
loop under the ptl.
Link: https://lkml.kernel.org/r/20210616105937.23201-7-apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 28 |
1 files changed, 17 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c index e30488e9202f..75feddbf0190 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -717,7 +717,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) - return entry.val; + return -EIO; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { @@ -973,12 +973,14 @@ again: continue; } if (unlikely(!pte_present(*src_pte))) { - entry.val = copy_nonpresent_pte(dst_mm, src_mm, - dst_pte, src_pte, - dst_vma, src_vma, - addr, rss); - if (entry.val) + ret = copy_nonpresent_pte(dst_mm, src_mm, + dst_pte, src_pte, + dst_vma, src_vma, + addr, rss); + if (ret == -EIO) { + entry = pte_to_swp_entry(*src_pte); break; + } progress += 8; continue; } @@ -1011,20 +1013,24 @@ again: pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); - if (entry.val) { + if (ret == -EIO) { + VM_WARN_ON_ONCE(!entry.val); if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { ret = -ENOMEM; goto out; } entry.val = 0; - } else if (ret) { - WARN_ON_ONCE(ret != -EAGAIN); + } else if (ret == -EAGAIN) { prealloc = page_copy_prealloc(src_mm, src_vma, addr); if (!prealloc) return -ENOMEM; - /* We've captured and resolved the error. Reset, try again. */ - ret = 0; + } else if (ret) { + VM_WARN_ON_ONCE(1); } + + /* We've captured and resolved the error. Reset, try again. */ + ret = 0; + if (addr != end) goto again; out: |