diff options
-rw-r--r-- | arch/mips/mm/gup.c | 9 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 9 | ||||
-rw-r--r-- | arch/sh/mm/gup.c | 9 | ||||
-rw-r--r-- | arch/sparc/mm/gup.c | 9 | ||||
-rw-r--r-- | arch/x86/mm/gup.c | 9 | ||||
-rw-r--r-- | mm/gup.c | 44 | ||||
-rw-r--r-- | mm/huge_memory.c | 7 |
7 files changed, 81 insertions, 15 deletions
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index d8c3c159289a..71a19d20bbb7 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -271,7 +271,14 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ + if (!gup_pud_range(pgd, addr, next, 1, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); local_irq_enable(); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index cf045f56581e..be1e2ed6405d 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -261,7 +261,14 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, might_sleep(); start &= PAGE_MASK; - nr = __get_user_pages_fast(start, nr_pages, write, pages); + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ + nr = __get_user_pages_fast(start, nr_pages, 1, pages); if (nr == nr_pages) return nr; diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index 063c298ba56c..7fec66e34af0 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c @@ -239,7 +239,14 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ + if (!gup_pud_range(pgd, addr, next, 1, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); local_irq_enable(); diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index cd0e32bbcb1d..685679f87988 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -218,7 +218,14 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ + if (!gup_pud_range(pgd, addr, next, 1, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 82f727fbbbd2..549f89fb3abc 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -454,7 +454,14 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, next = pgd_addr_end(addr, end); if (pgd_none(pgd)) goto slow; - if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ + if (!gup_pud_range(pgd, addr, next, 1, pages, &nr)) goto slow; } while (pgdp++, addr = next, addr != end); local_irq_enable(); @@ -61,13 +61,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, } /* - * FOLL_FORCE can write to even unwritable pte's, but only - * after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE or a forced COW break can write even to unwritable pte's, + * but only after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { - return pte_write(pte) || - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); + return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); +} + +/* + * A (separate) COW fault might break the page the other way and + * get_user_pages() would return the page from what is now the wrong + * VM. So we need to force a COW break at GUP time even for reads. + */ +static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) +{ + return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); } static struct page *follow_page_pte(struct vm_area_struct *vma, @@ -577,12 +586,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (!vma || check_vma_flags(vma, gup_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { + if (should_force_cow_break(vma, foll_flags)) + foll_flags |= FOLL_WRITE; i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, - gup_flags); + foll_flags); continue; } } + + if (should_force_cow_break(vma, foll_flags)) + foll_flags |= FOLL_WRITE; + retry: /* * If we have a pending SIGKILL, don't keep faulting pages and @@ -1503,6 +1518,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. It will only return non-negative values. + * + * Careful, careful! COW breaking can go either way, so a non-write + * access can get ambiguous page results. If you call this function without + * 'write' set, you'd better be sure that you're ok with that ambiguity. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) @@ -1532,6 +1551,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. + * + * NOTE! We allow read-only gup_fast() here, but you'd better be + * careful about possible COW pages. You'll get _a_ COW page, but + * not necessarily the one you intended to get depending on what + * COW event happens after this. COW may break the page copy in a + * random direction. */ local_irq_save(flags); @@ -1580,7 +1605,14 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, int nr, ret; start &= PAGE_MASK; - nr = __get_user_pages_fast(start, nr_pages, write, pages); + /* + * The FAST_GUP case requires FOLL_WRITE even for pure reads, + * because get_user_pages() may need to cause an early COW in + * order to avoid confusing the normal COW routines. So only + * targets that are already writable are safe to do by just + * looking at the page tables. + */ + nr = __get_user_pages_fast(start, nr_pages, 1, pages); ret = nr; if (nr < nr_pages) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 91f33bb43f17..3f3a86cc62b6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1135,13 +1135,12 @@ out_unlock: } /* - * FOLL_FORCE can write to even unwritable pmd's, but only - * after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, + * but only after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) { - return pmd_write(pmd) || - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); + return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); } struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |