diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 2 | ||||
-rw-r--r-- | mm/kasan/common.c | 4 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 32 | ||||
-rw-r--r-- | mm/kasan/sw_tags.c | 7 | ||||
-rw-r--r-- | mm/memfd.c | 4 | ||||
-rw-r--r-- | mm/mempool.c | 6 | ||||
-rw-r--r-- | mm/mlock.c | 22 | ||||
-rw-r--r-- | mm/mmap.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 66 | ||||
-rw-r--r-- | mm/shmem.c | 10 |
10 files changed, 108 insertions, 49 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 7d41b58fb17c..3a509fbf2bea 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory(int order) static bool kswapd_is_running(pg_data_t *pgdat) { - return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); + return pgdat->kswapd && task_is_running(pgdat->kswapd); } /* diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 267500896b1e..2baf121fb8c5 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -100,7 +100,7 @@ slab_flags_t __kasan_never_merge(void) return 0; } -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init) +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) { u8 tag; unsigned long i; @@ -114,7 +114,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init) kasan_unpoison(page_address(page), PAGE_SIZE << order, init); } -void __kasan_free_pages(struct page *page, unsigned int order, bool init) +void __kasan_poison_pages(struct page *page, unsigned int order, bool init) { if (likely(!PageHighMem(page))) kasan_poison(page_address(page), PAGE_SIZE << order, diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index d867b22ddbb7..4ea8c368b5b8 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -216,6 +216,38 @@ void __init kasan_init_hw_tags(void) pr_info("KernelAddressSanitizer initialized\n"); } +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) +{ + /* + * This condition should match the one in post_alloc_hook() in + * page_alloc.c. + */ + bool init = !want_init_on_free() && want_init_on_alloc(flags); + + if (flags & __GFP_SKIP_KASAN_POISON) + SetPageSkipKASanPoison(page); + + if (flags & __GFP_ZEROTAGS) { + int i; + + for (i = 0; i != 1 << order; ++i) + tag_clear_highpage(page + i); + } else { + kasan_unpoison_pages(page, order, init); + } +} + +void kasan_free_pages(struct page *page, unsigned int order) +{ + /* + * This condition should match the one in free_pages_prepare() in + * page_alloc.c. + */ + bool init = want_init_on_free(); + + kasan_poison_pages(page, order, init); +} + #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) void kasan_set_tagging_report_once(bool state) diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 675e67375fb5..bd3f540feb47 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -166,3 +166,10 @@ void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) kasan_poison((void *)addr, size, tag, false); } EXPORT_SYMBOL(__hwasan_tag_memory); + +void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, + unsigned long ret_ip) +{ + kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, + ret_ip); +} diff --git a/mm/memfd.c b/mm/memfd.c index 2647c898990c..081dd33e6a61 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -297,9 +297,9 @@ SYSCALL_DEFINE2(memfd_create, } if (flags & MFD_HUGETLB) { - struct user_struct *user = NULL; + struct ucounts *ucounts = NULL; - file = hugetlb_file_setup(name, 0, VM_NORESERVE, &user, + file = hugetlb_file_setup(name, 0, VM_NORESERVE, &ucounts, HUGETLB_ANONHUGE_INODE, (flags >> MFD_HUGE_SHIFT) & MFD_HUGE_MASK); diff --git a/mm/mempool.c b/mm/mempool.c index a258cf4de575..0b8afbec3e35 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_slab_free_mempool(element); else if (pool->alloc == mempool_alloc_pages) - kasan_free_pages(element, (unsigned long)pool->pool_data, false); + kasan_poison_pages(element, (unsigned long)pool->pool_data, + false); } static void kasan_unpoison_element(mempool_t *pool, void *element) @@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_unpoison_range(element, __ksize(element)); else if (pool->alloc == mempool_alloc_pages) - kasan_alloc_pages(element, (unsigned long)pool->pool_data, false); + kasan_unpoison_pages(element, (unsigned long)pool->pool_data, + false); } static __always_inline void add_element(mempool_t *pool, void *element) diff --git a/mm/mlock.c b/mm/mlock.c index df590fda5688..e338ebc4ad29 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -817,9 +817,10 @@ SYSCALL_DEFINE0(munlockall) */ static DEFINE_SPINLOCK(shmlock_user_lock); -int user_shm_lock(size_t size, struct user_struct *user) +int user_shm_lock(size_t size, struct ucounts *ucounts) { unsigned long lock_limit, locked; + long memlock; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -828,21 +829,26 @@ int user_shm_lock(size_t size, struct user_struct *user) allowed = 1; lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); - if (!allowed && - locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) + memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); + + if (!allowed && (memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) { + dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); + goto out; + } + if (!get_ucounts(ucounts)) { + dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); goto out; - get_uid(user); - user->locked_shm += locked; + } allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } -void user_shm_unlock(size_t size, struct user_struct *user) +void user_shm_unlock(size_t size, struct ucounts *ucounts) { spin_lock(&shmlock_user_lock); - user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); spin_unlock(&shmlock_user_lock); - free_uid(user); + put_ucounts(ucounts); } diff --git a/mm/mmap.c b/mm/mmap.c index d8c92ae50565..aa9de981b659 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1609,7 +1609,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, goto out_fput; } } else if (flags & MAP_HUGETLB) { - struct user_struct *user = NULL; + struct ucounts *ucounts = NULL; struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); @@ -1625,7 +1625,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, - &user, HUGETLB_ANONHUGE_INODE, + &ucounts, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index db00ee8d79d2..0817d88383d5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -387,7 +387,7 @@ int page_group_by_mobility_disabled __read_mostly; static DEFINE_STATIC_KEY_TRUE(deferred_pages); /* - * Calling kasan_free_pages() only after deferred memory initialization + * Calling kasan_poison_pages() only after deferred memory initialization * has completed. Poisoning pages during deferred memory init will greatly * lengthen the process and cause problem in large memory systems as the * deferred pages initialization is done with interrupt disabled. @@ -399,15 +399,12 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages); * on-demand allocation and then freed again before the deferred pages * initialization is done, but this is not likely to happen. */ -static inline void kasan_free_nondeferred_pages(struct page *page, int order, - bool init, fpi_t fpi_flags) +static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) { - if (static_branch_unlikely(&deferred_pages)) - return; - if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && - (fpi_flags & FPI_SKIP_KASAN_POISON)) - return; - kasan_free_pages(page, order, init); + return static_branch_unlikely(&deferred_pages) || + (!IS_ENABLED(CONFIG_KASAN_GENERIC) && + (fpi_flags & FPI_SKIP_KASAN_POISON)) || + PageSkipKASanPoison(page); } /* Returns true if the struct page for the pfn is uninitialised */ @@ -458,13 +455,11 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } #else -static inline void kasan_free_nondeferred_pages(struct page *page, int order, - bool init, fpi_t fpi_flags) +static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) { - if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && - (fpi_flags & FPI_SKIP_KASAN_POISON)) - return; - kasan_free_pages(page, order, init); + return (!IS_ENABLED(CONFIG_KASAN_GENERIC) && + (fpi_flags & FPI_SKIP_KASAN_POISON)) || + PageSkipKASanPoison(page); } static inline bool early_page_uninitialised(unsigned long pfn) @@ -1282,10 +1277,16 @@ out: return ret; } -static void kernel_init_free_pages(struct page *page, int numpages) +static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags) { int i; + if (zero_tags) { + for (i = 0; i < numpages; i++) + tag_clear_highpage(page + i); + return; + } + /* s390's use of memset() could override KASAN redzones. */ kasan_disable_current(); for (i = 0; i < numpages; i++) { @@ -1301,7 +1302,7 @@ static __always_inline bool free_pages_prepare(struct page *page, unsigned int order, bool check_free, fpi_t fpi_flags) { int bad = 0; - bool init; + bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); VM_BUG_ON_PAGE(PageTail(page), page); @@ -1370,10 +1371,17 @@ static __always_inline bool free_pages_prepare(struct page *page, * With hardware tag-based KASAN, memory tags must be set before the * page becomes unavailable via debug_pagealloc or arch_free_page. */ - init = want_init_on_free(); - if (init && !kasan_has_integrated_init()) - kernel_init_free_pages(page, 1 << order); - kasan_free_nondeferred_pages(page, order, init, fpi_flags); + if (kasan_has_integrated_init()) { + if (!skip_kasan_poison) + kasan_free_pages(page, order); + } else { + bool init = want_init_on_free(); + + if (init) + kernel_init_free_pages(page, 1 << order, false); + if (!skip_kasan_poison) + kasan_poison_pages(page, order, init); + } /* * arch_free_page() can make the page's contents inaccessible. s390 @@ -2399,8 +2407,6 @@ static bool check_new_pages(struct page *page, unsigned int order) inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { - bool init; - set_page_private(page, 0); set_page_refcounted(page); @@ -2419,10 +2425,16 @@ inline void post_alloc_hook(struct page *page, unsigned int order, * kasan_alloc_pages and kernel_init_free_pages must be * kept together to avoid discrepancies in behavior. */ - init = !want_init_on_free() && want_init_on_alloc(gfp_flags); - kasan_alloc_pages(page, order, init); - if (init && !kasan_has_integrated_init()) - kernel_init_free_pages(page, 1 << order); + if (kasan_has_integrated_init()) { + kasan_alloc_pages(page, order, gfp_flags); + } else { + bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags); + + kasan_unpoison_pages(page, order, init); + if (init) + kernel_init_free_pages(page, 1 << order, + gfp_flags & __GFP_ZEROTAGS); + } set_page_owner(page, order, gfp_flags); } diff --git a/mm/shmem.c b/mm/shmem.c index e72931b9246c..6268b9b4e41a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2239,7 +2239,7 @@ static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, } #endif -int shmem_lock(struct file *file, int lock, struct user_struct *user) +int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) { struct inode *inode = file_inode(file); struct shmem_inode_info *info = SHMEM_I(inode); @@ -2251,13 +2251,13 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) * no serialization needed when called from shm_destroy(). */ if (lock && !(info->flags & VM_LOCKED)) { - if (!user_shm_lock(inode->i_size, user)) + if (!user_shm_lock(inode->i_size, ucounts)) goto out_nomem; info->flags |= VM_LOCKED; mapping_set_unevictable(file->f_mapping); } - if (!lock && (info->flags & VM_LOCKED) && user) { - user_shm_unlock(inode->i_size, user); + if (!lock && (info->flags & VM_LOCKED) && ucounts) { + user_shm_unlock(inode->i_size, ucounts); info->flags &= ~VM_LOCKED; mapping_clear_unevictable(file->f_mapping); } @@ -4104,7 +4104,7 @@ int shmem_unuse(unsigned int type, bool frontswap, return 0; } -int shmem_lock(struct file *file, int lock, struct user_struct *user) +int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) { return 0; } |