diff options
author | Kees Cook <keescook@chromium.org> | 2016-07-25 23:50:36 +0300 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2016-07-25 23:50:36 +0300 |
commit | 74e630a7582e6b3cb39559d712a0049f08dea8a0 (patch) | |
tree | 98a752412dcfc74d802024c1d9e8c541b93174f9 /mm/memory.c | |
parent | 35da60941e44dbf57868e67686dd24cc1a33125a (diff) | |
parent | 523d939ef98fd712632d93a5a2b588e477a7565e (diff) | |
download | linux-74e630a7582e6b3cb39559d712a0049f08dea8a0.tar.xz |
Merge tag 'v4.7' into for-linus/pstore
Linux 4.7
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 34 |
1 files changed, 6 insertions, 28 deletions
diff --git a/mm/memory.c b/mm/memory.c index 15322b73636b..9e046819e619 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * Protected against the rmap code by * the page lock. */ - page_move_anon_rmap(compound_head(old_page), - vma, address); + page_move_anon_rmap(old_page, vma); } unlock_page(old_page); return wp_page_reuse(mm, vma, address, page_table, ptl, @@ -2877,7 +2876,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, * vm_ops->map_pages. */ void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon, bool old) + struct page *page, pte_t *pte, bool write, bool anon) { pte_t entry; @@ -2885,8 +2884,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, entry = mk_pte(page, vma->vm_page_prot); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (old) - entry = pte_mkold(entry); if (anon) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address, false); @@ -2900,16 +2897,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, update_mmu_cache(vma, address, pte); } -/* - * If architecture emulates "accessed" or "young" bit without HW support, - * there is no much gain with fault_around. - */ static unsigned long fault_around_bytes __read_mostly = -#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS - PAGE_SIZE; -#else rounddown_pow_of_two(65536); -#endif #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) @@ -3032,20 +3021,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { pte = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!pte_same(*pte, orig_pte)) - goto unlock_out; do_fault_around(vma, address, pte, pgoff, flags); - /* Check if the fault is handled by faultaround */ - if (!pte_same(*pte, orig_pte)) { - /* - * Faultaround produce old pte, but the pte we've - * handler fault for should be young. - */ - pte_t entry = pte_mkyoung(*pte); - if (ptep_set_access_flags(vma, address, pte, entry, 0)) - update_mmu_cache(vma, address, pte); + if (!pte_same(*pte, orig_pte)) goto unlock_out; - } pte_unmap_unlock(pte, ptl); } @@ -3060,7 +3038,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, false, false, false); + do_set_pte(vma, address, fault_page, pte, false, false); unlock_page(fault_page); unlock_out: pte_unmap_unlock(pte, ptl); @@ -3111,7 +3089,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } goto uncharge_out; } - do_set_pte(vma, address, new_page, pte, true, true, false); + do_set_pte(vma, address, new_page, pte, true, true); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); pte_unmap_unlock(pte, ptl); @@ -3164,7 +3142,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, true, false, false); + do_set_pte(vma, address, fault_page, pte, true, false); pte_unmap_unlock(pte, ptl); if (set_page_dirty(fault_page)) |