From 9f1c2674b328a69ab5a9b5a1c52405795ee4163f Mon Sep 17 00:00:00 2001 From: Eric Dumazet <edumazet@google.com> Date: Sun, 8 Oct 2017 21:44:51 -0700 Subject: net: memcontrol: defer call to mem_cgroup_sk_alloc() Instead of calling mem_cgroup_sk_alloc() from BH context, it is better to call it from inet_csk_accept() in process context. Not only this removes code in mem_cgroup_sk_alloc(), but it also fixes a bug since listener might have been dismantled and css_get() might cause a use-after-free. Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net> --- mm/memcontrol.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d5f3a62887cf..661f046ad318 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk) if (!mem_cgroup_sockets_enabled) return; - /* - * Socket cloning can throw us here with sk_memcg already - * filled. It won't however, necessarily happen from - * process context. So the test for root memcg given - * the current task's memcg won't help us in this case. - * - * Respecting the original socket's memcg is a better - * decision in this case. - */ - if (sk->sk_memcg) { - BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); - css_get(&sk->sk_memcg->css); - return; - } - rcu_read_lock(); memcg = mem_cgroup_from_task(current); if (memcg == root_mem_cgroup) -- cgit v1.2.3 From e20d103b6c37038ca27409f746f0b3351bcd0c44 Mon Sep 17 00:00:00 2001 From: Mark Hairgrove <mhairgrove@nvidia.com> Date: Fri, 13 Oct 2017 15:57:30 -0700 Subject: mm/migrate: fix indexing bug (off by one) and avoid out of bound access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Index was incremented before last use and thus the second array could dereference to an invalid address (not mentioning the fact that it did not properly clear the entry we intended to clear). Link: http://lkml.kernel.org/r/1506973525-16491-1-git-send-email-jglisse@redhat.com Fixes: 8315ada7f095bf ("mm/migrate: allow migrate_vma() to alloc new page on empty entry") Signed-off-by: Mark Hairgrove <mhairgrove@nvidia.com> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Reza Arbab <arbab@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/migrate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 6954c1435833..e00814ca390e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start, unsigned long addr; for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE; + migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; migrate->dst[migrate->npages] = 0; + migrate->npages++; migrate->cpages++; } -- cgit v1.2.3 From c02c30093254189a6ef55fed415a4ffb55a74cdf Mon Sep 17 00:00:00 2001 From: Yang Shi <yang.s@alibaba-inc.com> Date: Fri, 13 Oct 2017 15:57:37 -0700 Subject: mm/madvise.c: add description for MADV_WIPEONFORK and MADV_KEEPONFORK mm/madvise.c has a brief description about all MADV_ flags. Add a description for the newly added MADV_WIPEONFORK and MADV_KEEPONFORK. Although man page has the similar information, but it'd better to keep the consistent with other flags. Link: http://lkml.kernel.org/r/1506117328-88228-1-git-send-email-yang.s@alibaba-inc.com Signed-off-by: Yang Shi <yang.s@alibaba-inc.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/madvise.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index 25bade36e9ca..fd70d6aabc3e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -757,6 +757,9 @@ madvise_behavior_valid(int behavior) * MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. + * MADV_WIPEONFORK - present the child process with zero-filled memory in this + * range after a fork. + * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK * MADV_HWPOISON - trigger memory error handler as if the given memory range * were corrupted by unrecoverable hardware memory failure. * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. @@ -777,7 +780,9 @@ madvise_behavior_valid(int behavior) * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application - * is attempting to release locked or shared pages. + * is attempting to release locked or shared pages, + * or the specified address range includes file, Huge TLB, + * MAP_SHARED or VMPFNMAP range. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. -- cgit v1.2.3 From de55c8b251974247edda38e952da8e8dd71683ec Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin <aryabinin@virtuozzo.com> Date: Fri, 13 Oct 2017 15:57:43 -0700 Subject: mm/mempolicy: fix NUMA_INTERLEAVE_HIT counter Commit 3a321d2a3dde ("mm: change the call sites of numa statistics items") separated NUMA counters from zone counters, but the NUMA_INTERLEAVE_HIT call site wasn't updated to use the new interface. So alloc_page_interleave() actually increments NR_ZONE_INACTIVE_FILE instead of NUMA_INTERLEAVE_HIT. Fix this by using __inc_numa_state() interface to increment NUMA_INTERLEAVE_HIT. Link: http://lkml.kernel.org/r/20171003191003.8573-1-aryabinin@virtuozzo.com Fixes: 3a321d2a3dde ("mm: change the call sites of numa statistics items") Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Kemi Wang <kemi.wang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/mempolicy.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 006ba625c0b8..a2af6d58a68f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, struct page *page; page = __alloc_pages(gfp, order, nid); - if (page && page_to_nid(page) == nid) - inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); + if (page && page_to_nid(page) == nid) { + preempt_disable(); + __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); + preempt_enable(); + } return page; } -- cgit v1.2.3 From af0db981f35ea99b00a0b249bf0bedef8cf972e8 Mon Sep 17 00:00:00 2001 From: Zi Yan <zi.yan@cs.rutgers.edu> Date: Fri, 13 Oct 2017 15:57:47 -0700 Subject: mm: remove unnecessary WARN_ONCE in page_vma_mapped_walk(). A non present pmd entry can appear after pmd_lock is taken in page_vma_mapped_walk(), even if THP migration is not enabled. The WARN_ONCE is unnecessary. Link: http://lkml.kernel.org/r/20171003142606.12324-1-zi.yan@sent.com Fixes: 616b8371539a ("mm: thp: enable thp migration in generic path") Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu> Reported-by: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Tested-by: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/page_vma_mapped.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 6a03946469a9..eb462e7db0a9 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -167,8 +167,7 @@ restart: return not_found(pvmw); return true; } - } else - WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); + } return not_found(pvmw); } else { /* THP pmd was split under us: handle on pte level */ -- cgit v1.2.3 From ef4650144e76ae361fe4b8c9a0afcd53074cd520 Mon Sep 17 00:00:00 2001 From: Boris Brezillon <boris.brezillon@free-electrons.com> Date: Fri, 13 Oct 2017 15:58:01 -0700 Subject: mm/cma.c: take __GFP_NOWARN into account in cma_alloc() cma_alloc() unconditionally prints an INFO message when the CMA allocation fails. Make this message conditional on the non-presence of __GFP_NOWARN in gfp_mask. This patch aims at removing INFO messages that are displayed when the VC4 driver tries to allocate buffer objects. From the driver perspective an allocation failure is acceptable, and the driver can possibly do something to make following allocation succeed (like flushing the VC4 internal cache). Link: http://lkml.kernel.org/r/20171004125447.15195-1-boris.brezillon@free-electrons.com Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com> Acked-by: Laura Abbott <labbott@redhat.com> Cc: Jaewon Kim <jaewon31.kim@samsung.com> Cc: David Airlie <airlied@linux.ie> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Eric Anholt <eric@anholt.net> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/cma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/cma.c b/mm/cma.c index c0da318c020e..022e52bd8370 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, trace_cma_alloc(pfn, page, count, align); - if (ret) { + if (ret && !(gfp_mask & __GFP_NOWARN)) { pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", __func__, count, ret); cma_debug_show_areas(cma); -- cgit v1.2.3 From b8c8a338f75e052d9fa2fed851259320af412e3f Mon Sep 17 00:00:00 2001 From: Johannes Weiner <hannes@cmpxchg.org> Date: Fri, 13 Oct 2017 15:58:05 -0700 Subject: Revert "vmalloc: back off when the current task is killed" This reverts commits 5d17a73a2ebe ("vmalloc: back off when the current task is killed") and 171012f56127 ("mm: don't warn when vmalloc() fails due to a fatal signal"). Commit 5d17a73a2ebe ("vmalloc: back off when the current task is killed") made all vmalloc allocations from a signal-killed task fail. We have seen crashes in the tty driver from this, where a killed task exiting tries to switch back to N_TTY, fails n_tty_open because of the vmalloc failing, and later crashes when dereferencing tty->disc_data. Arguably, relying on a vmalloc() call to succeed in order to properly exit a task is not the most robust way of doing things. There will be a follow-up patch to the tty code to fall back to the N_NULL ldisc. But the justification to make that vmalloc() call fail like this isn't convincing, either. The patch mentions an OOM victim exhausting the memory reserves and thus deadlocking the machine. But the OOM killer is only one, improbable source of fatal signals. It doesn't make sense to fail allocations preemptively with plenty of memory in most cases. The patch doesn't mention real-life instances where vmalloc sites would exhaust memory, which makes it sound more like a theoretical issue to begin with. But just in case, the OOM access to memory reserves has been restricted on the allocator side in cd04ae1e2dc8 ("mm, oom: do not rely on TIF_MEMDIE for memory reserves access"), which should take care of any theoretical concerns on that front. Revert this patch, and the follow-up that suppresses the allocation warnings when we fail the allocations due to a signal. Link: http://lkml.kernel.org/r/20171004185906.GB2136@cmpxchg.org Fixes: 171012f56127 ("mm: don't warn when vmalloc() fails due to a fatal signal") Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Alan Cox <alan@llwyncelyn.cymru> Cc: Christoph Hellwig <hch@lst.de> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/vmalloc.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8a43db6284eb..673942094328 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, for (i = 0; i < area->nr_pages; i++) { struct page *page; - if (fatal_signal_pending(current)) { - area->nr_pages = i; - goto fail_no_warn; - } - if (node == NUMA_NO_NODE) page = alloc_page(alloc_mask|highmem_mask); else @@ -1723,7 +1718,6 @@ fail: warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure, allocated %ld of %ld bytes", (area->nr_pages*PAGE_SIZE), area->size); -fail_no_warn: vfree(area->addr); return NULL; } -- cgit v1.2.3 From a7b100953aa33a5bbdc3e5e7f2241b9c0704606e Mon Sep 17 00:00:00 2001 From: Will Deacon <will.deacon@arm.com> Date: Fri, 13 Oct 2017 15:58:25 -0700 Subject: mm: page_vma_mapped: ensure pmd is loaded with READ_ONCE outside of lock Loading the pmd without holding the pmd_lock exposes us to races with concurrent updaters of the page tables but, worse still, it also allows the compiler to cache the pmd value in a register and reuse it later on, even if we've performed a READ_ONCE in between and seen a more recent value. In the case of page_vma_mapped_walk, this leads to the following crash when the pmd loaded for the initial pmd_trans_huge check is all zeroes and a subsequent valid table entry is loaded by check_pmd. We then proceed into map_pte, but the compiler re-uses the zero entry inside pte_offset_map, resulting in a junk pointer being installed in pvmw->pte: PC is at check_pte+0x20/0x170 LR is at page_vma_mapped_walk+0x2e0/0x540 [...] Process doio (pid: 2463, stack limit = 0xffff00000f2e8000) Call trace: check_pte+0x20/0x170 page_vma_mapped_walk+0x2e0/0x540 page_mkclean_one+0xac/0x278 rmap_walk_file+0xf0/0x238 rmap_walk+0x64/0xa0 page_mkclean+0x90/0xa8 clear_page_dirty_for_io+0x84/0x2a8 mpage_submit_page+0x34/0x98 mpage_process_page_bufs+0x164/0x170 mpage_prepare_extent_to_map+0x134/0x2b8 ext4_writepages+0x484/0xe30 do_writepages+0x44/0xe8 __filemap_fdatawrite_range+0xbc/0x110 file_write_and_wait_range+0x48/0xd8 ext4_sync_file+0x80/0x4b8 vfs_fsync_range+0x64/0xc0 SyS_msync+0x194/0x1e8 This patch fixes the problem by ensuring that READ_ONCE is used before the initial checks on the pmd, and this value is subsequently used when checking whether or not the pmd is present. pmd_check is removed and the pmd_present check is inlined directly. Link: http://lkml.kernel.org/r/1507222630-5839-1-git-send-email-will.deacon@arm.com Fixes: f27176cfc363 ("mm: convert page_mkclean_one() to use page_vma_mapped_walk()") Signed-off-by: Will Deacon <will.deacon@arm.com> Tested-by: Yury Norov <ynorov@caviumnetworks.com> Tested-by: Richard Ruigrok <rruigrok@codeaurora.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- mm/page_vma_mapped.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index eb462e7db0a9..53afbb919a1c 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -6,17 +6,6 @@ #include "internal.h" -static inline bool check_pmd(struct page_vma_mapped_walk *pvmw) -{ - pmd_t pmde; - /* - * Make sure we don't re-load pmd between present and !trans_huge check. - * We need a consistent view. - */ - pmde = READ_ONCE(*pvmw->pmd); - return pmd_present(pmde) && !pmd_trans_huge(pmde); -} - static inline bool not_found(struct page_vma_mapped_walk *pvmw) { page_vma_mapped_walk_done(pvmw); @@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) pgd_t *pgd; p4d_t *p4d; pud_t *pud; + pmd_t pmde; /* The only possible pmd mapping has been handled on last iteration */ if (pvmw->pmd && !pvmw->pte) @@ -148,7 +138,13 @@ restart: if (!pud_present(*pud)) return false; pvmw->pmd = pmd_offset(pud, pvmw->address); - if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { + /* + * Make sure the pmd value isn't cached in a register by the + * compiler and used as a stale value after we've observed a + * subsequent update. + */ + pmde = READ_ONCE(*pvmw->pmd); + if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { pvmw->ptl = pmd_lock(mm, pvmw->pmd); if (likely(pmd_trans_huge(*pvmw->pmd))) { if (pvmw->flags & PVMW_MIGRATION) @@ -174,9 +170,8 @@ restart: spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } - } else { - if (!check_pmd(pvmw)) - return false; + } else if (!pmd_present(pmde)) { + return false; } if (!map_pte(pvmw)) goto next_pte; -- cgit v1.2.3 From 61b639723be5a9fc4812d5d85cb769589afa5a38 Mon Sep 17 00:00:00 2001 From: Huang Ying <ying.huang@intel.com> Date: Fri, 13 Oct 2017 15:58:29 -0700 Subject: mm, swap: use page-cluster as max window of VMA based swap readahead When the VMA based swap readahead was introduced, a new knob /sys/kernel/mm/swap/vma_ra_max_order was added as the max window of VMA swap readahead. This is to make it possible to use different max window for VMA based readahead and original physical readahead. But Minchan Kim pointed out that this will cause a regression because setting page-cluster sysctl to zero cannot disable swap readahead with the change. To fix the regression, the page-cluster sysctl is used as the max window of both the VMA based swap readahead and original physical swap readahead. If more fine grained control is needed in the future, more knobs can be added as the subordinate knobs of the page-cluster sysctl. The vma_ra_max_order knob is deleted. Because the knob was introduced in v4.14-rc1, and this patch is targeting being merged before v4.14 releasing, there should be no existing users of this newly added ABI. Link: http://lkml.kernel.org/r/20171011070847.16003-1-ying.huang@intel.com Fixes: ec560175c0b6fce ("mm, swap: VMA based swap readahead") Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Reported-by: Minchan Kim <minchan@kernel.org> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- Documentation/ABI/testing/sysfs-kernel-mm-swap | 10 ------- mm/swap_state.c | 41 +++++--------------------- 2 files changed, 7 insertions(+), 44 deletions(-) (limited to 'mm') diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap index 587db52084c7..94672016c268 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-swap +++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap @@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead. still used for tmpfs etc. other users. If set to false, the global swap readahead algorithm will be used for all swappable pages. - -What: /sys/kernel/mm/swap/vma_ra_max_order -Date: August 2017 -Contact: Linux memory management mailing list <linux-mm@kvack.org> -Description: The max readahead size in order for VMA based swap readahead - - VMA based swap readahead algorithm will readahead at - most 1 << max_order pages for each readahead. The - real readahead size for each readahead will be scaled - according to the estimation algorithm. diff --git a/mm/swap_state.c b/mm/swap_state.c index ed91091d1e68..05b6803f0cce 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES]; static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; bool swap_vma_readahead = true; -#define SWAP_RA_MAX_ORDER_DEFAULT 3 - -static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT; - #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK @@ -664,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, pte_t *tpte; #endif + max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), + SWAP_RA_ORDER_CEILING); + if (max_win == 1) { + swap_ra->win = 1; + return NULL; + } + faddr = vmf->address; entry = pte_to_swp_entry(vmf->orig_pte); if ((unlikely(non_swap_entry(entry)))) @@ -672,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, if (page) return page; - max_win = 1 << READ_ONCE(swap_ra_max_order); - if (max_win == 1) { - swap_ra->win = 1; - return NULL; - } - fpfn = PFN_DOWN(faddr); swap_ra_info = GET_SWAP_RA_VAL(vma); pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); @@ -786,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr = __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, vma_ra_enabled_store); -static ssize_t vma_ra_max_order_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", swap_ra_max_order); -} -static ssize_t vma_ra_max_order_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int err, v; - - err = kstrtoint(buf, 10, &v); - if (err || v > SWAP_RA_ORDER_CEILING || v <= 0) - return -EINVAL; - - swap_ra_max_order = v; - - return count; -} -static struct kobj_attribute vma_ra_max_order_attr = - __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show, - vma_ra_max_order_store); - static struct attribute *swap_attrs[] = { &vma_ra_enabled_attr.attr, - &vma_ra_max_order_attr.attr, NULL, }; -- cgit v1.2.3 From 0ea7eeec24be5f04ae80d68f5b1ea3a11f49de2f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann <daniel@iogearbox.net> Date: Tue, 17 Oct 2017 16:55:52 +0200 Subject: mm, percpu: add support for __GFP_NOWARN flag Add an option for pcpu_alloc() to support __GFP_NOWARN flag. Currently, we always throw a warning when size or alignment is unsupported (and also dump stack on failed allocation requests). The warning itself is harmless since we return NULL anyway for any failed request, which callers are required to handle anyway. However, it becomes harmful when panic_on_warn is set. The rationale for the WARN() in pcpu_alloc() is that it can be tracked when larger than supported allocation requests are made such that allocations limits can be tweaked if warranted. This makes sense for in-kernel users, however, there are users of pcpu allocator where allocation size is derived from user space requests, e.g. when creating BPF maps. In these cases, the requests should fail gracefully without throwing a splat. The current work-around was to check allocation size against the upper limit of PCPU_MIN_UNIT_SIZE from call-sites for bailing out prior to a call to pcpu_alloc() in order to avoid throwing the WARN(). This is bad in multiple ways since PCPU_MIN_UNIT_SIZE is an implementation detail, and having the checks on call-sites only complicates the code for no good reason. Thus, lets fix it generically by supporting the __GFP_NOWARN flag that users can then use with calling the __alloc_percpu_gfp() helper instead. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Tejun Heo <tj@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net> --- mm/percpu.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index aa121cef76de..a0e0c82c1e4c 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) * @gfp: allocation flags * * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't - * contain %GFP_KERNEL, the allocation is atomic. + * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN + * then no warning will be triggered on invalid or failed allocation + * requests. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. @@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, gfp_t gfp) { + bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; + bool do_warn = !(gfp & __GFP_NOWARN); static int warn_limit = 10; struct pcpu_chunk *chunk; const char *err; - bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; int slot, off, cpu, ret; unsigned long flags; void __percpu *ptr; @@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || !is_power_of_2(align))) { - WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", + WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", size, align); return NULL; } @@ -1482,7 +1485,7 @@ fail_unlock: fail: trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); - if (!is_atomic && warn_limit) { + if (!is_atomic && do_warn && warn_limit) { pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", size, align, is_atomic, err); dump_stack(); @@ -1507,7 +1510,9 @@ fail: * * Allocate zero-filled percpu area of @size bytes aligned at @align. If * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can - * be called from any context but is a lot more likely to fail. + * be called from any context but is a lot more likely to fail. If @gfp + * has __GFP_NOWARN then no warning will be triggered on invalid or failed + * allocation requests. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. -- cgit v1.2.3