From 5bb1bb353cfe343fc3c84faf06f72ba309fde541 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 7 Jan 2021 13:46:11 -0800 Subject: mm: Don't build mm_dump_obj() on CONFIG_PRINTK=n kernels The mem_dump_obj() functionality adds a few hundred bytes, which is a small price to pay. Except on kernels built with CONFIG_PRINTK=n, in which mem_dump_obj() messages will be suppressed. This commit therefore makes mem_dump_obj() be a static inline empty function on kernels built with CONFIG_PRINTK=n and excludes all of its support functions as well. This avoids kernel bloat on systems that cannot use mem_dump_obj(). Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Suggested-by: Andrew Morton Signed-off-by: Paul E. McKenney --- include/linux/vmalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/vmalloc.h') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index df92211cf771..3de7be6dd17c 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -241,7 +241,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) bool vmalloc_dump_obj(void *object); #else static inline bool vmalloc_dump_obj(void *object) { return false; } -- cgit v1.2.3 From bbc180a5adb05ee8053fab7a0c0bd56c5964240e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:26 -0700 Subject: mm: HUGE_VMAP arch support cleanup This changes the awkward approach where architectures provide init functions to determine which levels they can provide large mappings for, to one where the arch is queried for each call. This removes code and indirection, and allows constant-folding of dead code for unsupported levels. This also adds a prot argument to the arch query. This is unused currently but could help with some architectures (e.g., some powerpc processors can't map uncacheable memory with large pages). Link: https://lkml.kernel.org/r/20210317062402.533919-7-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Ding Tianhong Acked-by: Catalin Marinas [arm64] Cc: Will Deacon Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: "H. Peter Anvin" Cc: Christoph Hellwig Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/vmalloc.h | 8 +++ arch/arm64/mm/mmu.c | 10 ++-- arch/powerpc/include/asm/vmalloc.h | 8 +++ arch/powerpc/mm/book3s64/radix_pgtable.c | 8 +-- arch/x86/include/asm/vmalloc.h | 7 +++ arch/x86/mm/ioremap.c | 12 ++-- include/linux/io.h | 9 --- include/linux/vmalloc.h | 6 ++ init/main.c | 1 - mm/debug_vm_pgtable.c | 4 +- mm/ioremap.c | 94 ++++++++++++++------------------ 11 files changed, 87 insertions(+), 80 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 2ca708ab9b20..597b40405319 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d563335ad43f..8436e0755361 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1339,12 +1339,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* * Only 4k granule supports level 1 block mappings. @@ -1354,9 +1354,9 @@ int __init arch_ioremap_pud_supported(void) !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { - /* See arch_ioremap_pud_supported() */ + /* See arch_vmap_pud_supported() */ return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index b992dfaaa161..105abb73f075 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 98f0b243c1ab..743807fc210f 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, set_pte_at(mm, addr, ptep, pte); } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } @@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) return 1; } -int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index 29837740b520..094ea2b565f3 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -1,6 +1,13 @@ #ifndef _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H +#include #include +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9e5ccc56f8e0..fbaf0c447986 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int __init arch_ioremap_p4d_supported(void) +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { #ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); #else - return 0; + return false; #endif } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } +#endif /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/include/linux/io.h b/include/linux/io.h index 61ff7d6278b6..9595151d800d 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, } #endif -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -void __init ioremap_huge_init(void); -int arch_ioremap_p4d_supported(void); -int arch_ioremap_pud_supported(void); -int arch_ioremap_pmd_supported(void); -#else -static inline void ioremap_huge_init(void) { } -#endif - /* * Managed iomap interface */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3de7be6dd17c..358c51c702c0 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -78,6 +78,12 @@ struct vmap_area { }; }; +#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP +static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } +#endif + /* * Highlevel APIs for driver use */ diff --git a/init/main.c b/init/main.c index f498aac26e8c..ae96c79ad2d3 100644 --- a/init/main.c +++ b/init/main.c @@ -837,7 +837,6 @@ static void __init mm_init(void) pgtable_init(); debug_objects_mem_init(); vmalloc_init(); - ioremap_huge_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); /* Should be run after espfix64 is set up. */ diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index a9bd6ce1ba02..05efe98a9ac2 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -247,7 +247,7 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { pmd_t pmd; - if (!arch_ioremap_pmd_supported()) + if (!arch_vmap_pmd_supported(prot)) return; pr_debug("Validating PMD huge\n"); @@ -385,7 +385,7 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { pud_t pud; - if (!arch_ioremap_pud_supported()) + if (!arch_vmap_pud_supported(prot)) return; pr_debug("Validating PUD huge\n"); diff --git a/mm/ioremap.c b/mm/ioremap.c index 3f4d36f9745a..3264d0203785 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -16,49 +16,16 @@ #include "pgalloc-track.h" #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static int __read_mostly ioremap_p4d_capable; -static int __read_mostly ioremap_pud_capable; -static int __read_mostly ioremap_pmd_capable; -static int __read_mostly ioremap_huge_disabled; +static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT; static int __init set_nohugeiomap(char *str) { - ioremap_huge_disabled = 1; + iomap_max_page_shift = P4D_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); - -void __init ioremap_huge_init(void) -{ - if (!ioremap_huge_disabled) { - if (arch_ioremap_p4d_supported()) - ioremap_p4d_capable = 1; - if (arch_ioremap_pud_supported()) - ioremap_pud_capable = 1; - if (arch_ioremap_pmd_supported()) - ioremap_pmd_capable = 1; - } -} - -static inline int ioremap_p4d_enabled(void) -{ - return ioremap_p4d_capable; -} - -static inline int ioremap_pud_enabled(void) -{ - return ioremap_pud_capable; -} - -static inline int ioremap_pmd_enabled(void) -{ - return ioremap_pmd_capable; -} - -#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ -static inline int ioremap_p4d_enabled(void) { return 0; } -static inline int ioremap_pud_enabled(void) { return 0; } -static inline int ioremap_pmd_enabled(void) { return 0; } +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +static const bool iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, @@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pmd_enabled()) + if (max_page_shift < PMD_SHIFT) + return 0; + + if (!arch_vmap_pmd_supported(prot)) return 0; if ((end - addr) != PMD_SIZE) @@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; @@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, do { next = pmd_addr_end(addr, end); - if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_PMD_MODIFIED; continue; } @@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, } static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pud_enabled()) + if (max_page_shift < PUD_SHIFT) + return 0; + + if (!arch_vmap_pud_supported(prot)) return 0; if ((end - addr) != PUD_SIZE) @@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; @@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, do { next = pud_addr_end(addr, end); - if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) { + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_PUD_MODIFIED; continue; } - if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask)) + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, + max_page_shift, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_p4d_enabled()) + if (max_page_shift < P4D_SHIFT) + return 0; + + if (!arch_vmap_p4d_supported(prot)) return 0; if ((end - addr) != P4D_SIZE) @@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; @@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, do { next = p4d_addr_end(addr, end); - if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_P4D_MODIFIED; continue; } - if (vmap_pud_range(p4d, addr, next, phys_addr, prot, mask)) + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, + max_page_shift, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; @@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, &mask); + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, + max_page_shift, &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); @@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { - return vmap_range(addr, end, phys_addr, prot); + return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); } #ifdef CONFIG_GENERIC_IOREMAP -- cgit v1.2.3 From 6f680e70b6ff58c9670769534196800233685d55 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:39 -0700 Subject: mm/vmalloc: provide fallback arch huge vmap support functions If an architecture doesn't support a particular page table level as a huge vmap page size then allow it to skip defining the support query function. Link: https://lkml.kernel.org/r/20210317062402.533919-11-npiggin@gmail.com Signed-off-by: Nicholas Piggin Suggested-by: Christoph Hellwig Cc: Borislav Petkov Cc: Catalin Marinas Cc: Ding Tianhong Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/vmalloc.h | 7 +++---- arch/powerpc/include/asm/vmalloc.h | 7 +++---- arch/x86/include/asm/vmalloc.h | 13 +++++-------- include/linux/vmalloc.h | 24 ++++++++++++++++++++---- 4 files changed, 31 insertions(+), 20 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index fc9a12d6cc1a..7a22aeea9bb5 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -4,11 +4,8 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) -{ - return false; -} +#define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { /* @@ -19,11 +16,13 @@ static inline bool arch_vmap_pud_supported(pgprot_t prot) !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } +#define arch_vmap_pmd_supported arch_vmap_pmd_supported static inline bool arch_vmap_pmd_supported(pgprot_t prot) { /* See arch_vmap_pud_supported() */ return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } + #endif #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index 3f0c153befb0..4c69ece52a31 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -5,21 +5,20 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) -{ - return false; -} +#define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } +#define arch_vmap_pmd_supported arch_vmap_pmd_supported static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } + #endif #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index e714b00fc0ca..49ce331f3ac6 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -6,24 +6,21 @@ #include #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) -{ - return false; -} +#ifdef CONFIG_X86_64 +#define arch_vmap_pud_supported arch_vmap_pud_supported static inline bool arch_vmap_pud_supported(pgprot_t prot) { -#ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); -#else - return false; -#endif } +#endif +#define arch_vmap_pmd_supported arch_vmap_pmd_supported static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } + #endif #endif /* _ASM_X86_VMALLOC_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 358c51c702c0..eb5630be6783 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -78,10 +78,26 @@ struct vmap_area { }; }; -#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP -static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } -static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } -static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } +/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ +#ifndef arch_vmap_p4d_supported +static inline bool arch_vmap_p4d_supported(pgprot_t prot) +{ + return false; +} +#endif + +#ifndef arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return false; +} +#endif + +#ifndef arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return false; +} #endif /* -- cgit v1.2.3 From 5e9e3d777b99aabe2f91f793a52e870a02642160 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:43 -0700 Subject: mm: move vmap_range from mm/ioremap.c to mm/vmalloc.c This is a generic kernel virtual memory mapper, not specific to ioremap. Code is unchanged other than making vmap_range non-static. Link: https://lkml.kernel.org/r/20210317062402.533919-12-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Christoph Hellwig Cc: Borislav Petkov Cc: Catalin Marinas Cc: Ding Tianhong Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 3 + mm/ioremap.c | 203 ------------------------------------------------ mm/vmalloc.c | 202 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 205 insertions(+), 203 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index eb5630be6783..ae9eb07d30d4 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -189,6 +189,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); #ifdef CONFIG_MMU +int vmap_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift); extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, diff --git a/mm/ioremap.c b/mm/ioremap.c index 3264d0203785..d1dcc7e744ac 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -28,209 +28,6 @@ early_param("nohugeiomap", set_nohugeiomap); static const bool iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ -static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) -{ - pte_t *pte; - u64 pfn; - - pfn = phys_addr >> PAGE_SHIFT; - pte = pte_alloc_kernel_track(pmd, addr, mask); - if (!pte) - return -ENOMEM; - do { - BUG_ON(!pte_none(*pte)); - set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); - pfn++; - } while (pte++, addr += PAGE_SIZE, addr != end); - *mask |= PGTBL_PTE_MODIFIED; - return 0; -} - -static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - if (max_page_shift < PMD_SHIFT) - return 0; - - if (!arch_vmap_pmd_supported(prot)) - return 0; - - if ((end - addr) != PMD_SIZE) - return 0; - - if (!IS_ALIGNED(addr, PMD_SIZE)) - return 0; - - if (!IS_ALIGNED(phys_addr, PMD_SIZE)) - return 0; - - if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) - return 0; - - return pmd_set_huge(pmd, phys_addr, prot); -} - -static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift, pgtbl_mod_mask *mask) -{ - pmd_t *pmd; - unsigned long next; - - pmd = pmd_alloc_track(&init_mm, pud, addr, mask); - if (!pmd) - return -ENOMEM; - do { - next = pmd_addr_end(addr, end); - - if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, - max_page_shift)) { - *mask |= PGTBL_PMD_MODIFIED; - continue; - } - - if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) - return -ENOMEM; - } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); - return 0; -} - -static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - if (max_page_shift < PUD_SHIFT) - return 0; - - if (!arch_vmap_pud_supported(prot)) - return 0; - - if ((end - addr) != PUD_SIZE) - return 0; - - if (!IS_ALIGNED(addr, PUD_SIZE)) - return 0; - - if (!IS_ALIGNED(phys_addr, PUD_SIZE)) - return 0; - - if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) - return 0; - - return pud_set_huge(pud, phys_addr, prot); -} - -static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift, pgtbl_mod_mask *mask) -{ - pud_t *pud; - unsigned long next; - - pud = pud_alloc_track(&init_mm, p4d, addr, mask); - if (!pud) - return -ENOMEM; - do { - next = pud_addr_end(addr, end); - - if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, - max_page_shift)) { - *mask |= PGTBL_PUD_MODIFIED; - continue; - } - - if (vmap_pmd_range(pud, addr, next, phys_addr, prot, - max_page_shift, mask)) - return -ENOMEM; - } while (pud++, phys_addr += (next - addr), addr = next, addr != end); - return 0; -} - -static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - if (max_page_shift < P4D_SHIFT) - return 0; - - if (!arch_vmap_p4d_supported(prot)) - return 0; - - if ((end - addr) != P4D_SIZE) - return 0; - - if (!IS_ALIGNED(addr, P4D_SIZE)) - return 0; - - if (!IS_ALIGNED(phys_addr, P4D_SIZE)) - return 0; - - if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) - return 0; - - return p4d_set_huge(p4d, phys_addr, prot); -} - -static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift, pgtbl_mod_mask *mask) -{ - p4d_t *p4d; - unsigned long next; - - p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); - if (!p4d) - return -ENOMEM; - do { - next = p4d_addr_end(addr, end); - - if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, - max_page_shift)) { - *mask |= PGTBL_P4D_MODIFIED; - continue; - } - - if (vmap_pud_range(p4d, addr, next, phys_addr, prot, - max_page_shift, mask)) - return -ENOMEM; - } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); - return 0; -} - -static int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift) -{ - pgd_t *pgd; - unsigned long start; - unsigned long next; - int err; - pgtbl_mod_mask mask = 0; - - might_sleep(); - BUG_ON(addr >= end); - - start = addr; - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, end); - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, - max_page_shift, &mask); - if (err) - break; - } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); - - flush_cache_vmap(start, end); - - if (mask & ARCH_PAGE_TABLE_SYNC_MASK) - arch_sync_kernel_mappings(start, end); - - return err; -} - int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ce4066b46955..5c81717f7e0e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -68,6 +68,208 @@ static void free_work(struct work_struct *w) } /*** Page table manipulation functions ***/ +static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + pgtbl_mod_mask *mask) +{ + pte_t *pte; + u64 pfn; + + pfn = phys_addr >> PAGE_SHIFT; + pte = pte_alloc_kernel_track(pmd, addr, mask); + if (!pte) + return -ENOMEM; + do { + BUG_ON(!pte_none(*pte)); + set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); + pfn++; + } while (pte++, addr += PAGE_SIZE, addr != end); + *mask |= PGTBL_PTE_MODIFIED; + return 0; +} + +static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + if (max_page_shift < PMD_SHIFT) + return 0; + + if (!arch_vmap_pmd_supported(prot)) + return 0; + + if ((end - addr) != PMD_SIZE) + return 0; + + if (!IS_ALIGNED(addr, PMD_SIZE)) + return 0; + + if (!IS_ALIGNED(phys_addr, PMD_SIZE)) + return 0; + + if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) + return 0; + + return pmd_set_huge(pmd, phys_addr, prot); +} + +static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift, pgtbl_mod_mask *mask) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_alloc_track(&init_mm, pud, addr, mask); + if (!pmd) + return -ENOMEM; + do { + next = pmd_addr_end(addr, end); + + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, + max_page_shift)) { + *mask |= PGTBL_PMD_MODIFIED; + continue; + } + + if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask)) + return -ENOMEM; + } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); + return 0; +} + +static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + if (max_page_shift < PUD_SHIFT) + return 0; + + if (!arch_vmap_pud_supported(prot)) + return 0; + + if ((end - addr) != PUD_SIZE) + return 0; + + if (!IS_ALIGNED(addr, PUD_SIZE)) + return 0; + + if (!IS_ALIGNED(phys_addr, PUD_SIZE)) + return 0; + + if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) + return 0; + + return pud_set_huge(pud, phys_addr, prot); +} + +static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift, pgtbl_mod_mask *mask) +{ + pud_t *pud; + unsigned long next; + + pud = pud_alloc_track(&init_mm, p4d, addr, mask); + if (!pud) + return -ENOMEM; + do { + next = pud_addr_end(addr, end); + + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, + max_page_shift)) { + *mask |= PGTBL_PUD_MODIFIED; + continue; + } + + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, + max_page_shift, mask)) + return -ENOMEM; + } while (pud++, phys_addr += (next - addr), addr = next, addr != end); + return 0; +} + +static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + if (max_page_shift < P4D_SHIFT) + return 0; + + if (!arch_vmap_p4d_supported(prot)) + return 0; + + if ((end - addr) != P4D_SIZE) + return 0; + + if (!IS_ALIGNED(addr, P4D_SIZE)) + return 0; + + if (!IS_ALIGNED(phys_addr, P4D_SIZE)) + return 0; + + if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) + return 0; + + return p4d_set_huge(p4d, phys_addr, prot); +} + +static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift, pgtbl_mod_mask *mask) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); + if (!p4d) + return -ENOMEM; + do { + next = p4d_addr_end(addr, end); + + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, + max_page_shift)) { + *mask |= PGTBL_P4D_MODIFIED; + continue; + } + + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, + max_page_shift, mask)) + return -ENOMEM; + } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); + return 0; +} + +int vmap_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) +{ + pgd_t *pgd; + unsigned long start; + unsigned long next; + int err; + pgtbl_mod_mask mask = 0; + + might_sleep(); + BUG_ON(addr >= end); + + start = addr; + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, + max_page_shift, &mask); + if (err) + break; + } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); + + flush_cache_vmap(start, end); + + if (mask & ARCH_PAGE_TABLE_SYNC_MASK) + arch_sync_kernel_mappings(start, end); + + return err; +} static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) -- cgit v1.2.3 From 121e6f3258fe393e22c36f61a319be8a4f2c05ae Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:49 -0700 Subject: mm/vmalloc: hugepage vmalloc mappings Support huge page vmalloc mappings. Config option HAVE_ARCH_HUGE_VMALLOC enables support on architectures that define HAVE_ARCH_HUGE_VMAP and supports PMD sized vmap mappings. vmalloc will attempt to allocate PMD-sized pages if allocating PMD size or larger, and fall back to small pages if that was unsuccessful. Architectures must ensure that any arch specific vmalloc allocations that require PAGE_SIZE mappings (e.g., module allocations vs strict module rwx) use the VM_NOHUGE flag to inhibit larger mappings. This can result in more internal fragmentation and memory overhead for a given allocation, an option nohugevmalloc is added to disable at boot. [colin.king@canonical.com: fix read of uninitialized pointer area] Link: https://lkml.kernel.org/r/20210318155955.18220-1-colin.king@canonical.com Link: https://lkml.kernel.org/r/20210317062402.533919-14-npiggin@gmail.com Signed-off-by: Nicholas Piggin Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Ding Tianhong Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Miaohe Lin Cc: Michael Ellerman Cc: Russell King Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 11 +++ include/linux/vmalloc.h | 21 +++++ mm/page_alloc.c | 5 +- mm/vmalloc.c | 220 +++++++++++++++++++++++++++++++++++++----------- 4 files changed, 209 insertions(+), 48 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/arch/Kconfig b/arch/Kconfig index 5e8f6680d4bf..bf27159be4d9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -829,6 +829,17 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD config HAVE_ARCH_HUGE_VMAP bool +# +# Archs that select this would be capable of PMD-sized vmaps (i.e., +# arch_vmap_pmd_supported() returns true), and they must make no assumptions +# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag +# can be used to prohibit arch-specific allocations from using hugepages to +# help with this (e.g., modules may require it). +# +config HAVE_ARCH_HUGE_VMALLOC + depends on HAVE_ARCH_HUGE_VMAP + bool + config ARCH_WANT_HUGE_PMD_SHARE bool diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ae9eb07d30d4..b4c82f2d40dc 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -26,6 +26,7 @@ struct notifier_block; /* in notifier.h */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ +#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ /* * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. @@ -54,6 +55,9 @@ struct vm_struct { unsigned long size; unsigned long flags; struct page **pages; +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + unsigned int page_order; +#endif unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; @@ -188,6 +192,22 @@ void free_vm_area(struct vm_struct *area); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); +static inline bool is_vm_area_hugepages(const void *addr) +{ + /* + * This may not 100% tell if the area is mapped with > PAGE_SIZE + * page table entries, if for some reason the architecture indicates + * larger sizes are available but decides not to use them, nothing + * prevents that. This only indicates the size of the physical page + * allocated in the vmalloc layer. + */ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + return find_vm_area(addr)->page_order > 0; +#else + return false; +#endif +} + #ifdef CONFIG_MMU int vmap_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, @@ -205,6 +225,7 @@ static inline void set_vm_flush_reset_perms(void *addr) if (vm) vm->flags |= VM_FLUSH_RESET_PERMS; } + #else static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 56a8103580d6..39ff5c604cef 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include @@ -8222,6 +8223,7 @@ void *__init alloc_large_system_hash(const char *tablename, void *table = NULL; gfp_t gfp_flags; bool virt; + bool huge; /* allow the kernel cmdline to have a say */ if (!numentries) { @@ -8289,6 +8291,7 @@ void *__init alloc_large_system_hash(const char *tablename, } else if (get_order(size) >= MAX_ORDER || hashdist) { table = __vmalloc(size, gfp_flags); virt = true; + huge = is_vm_area_hugepages(table); } else { /* * If bucketsize is not a power-of-two, we may free @@ -8305,7 +8308,7 @@ void *__init alloc_large_system_hash(const char *tablename, pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, - virt ? "vmalloc" : "linear"); + virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); if (_hash_shift) *_hash_shift = log2qty; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 41c1dbdd2677..59c815eb7e74 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -42,6 +42,19 @@ #include "internal.h" #include "pgalloc-track.h" +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC +static bool __ro_after_init vmap_allow_huge = true; + +static int __init set_nohugevmalloc(char *str) +{ + vmap_allow_huge = false; + return 0; +} +early_param("nohugevmalloc", set_nohugevmalloc); +#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ +static const bool vmap_allow_huge = false; +#endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ + bool is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)x; @@ -483,31 +496,12 @@ static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, return 0; } -/** - * map_kernel_range_noflush - map kernel VM area with the specified pages - * @addr: start of the VM area to map - * @size: size of the VM area to map - * @prot: page protection flags to use - * @pages: pages to map - * - * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should - * have been allocated using get_vm_area() and its friends. - * - * NOTE: - * This function does NOT do any cache flushing. The caller is responsible for - * calling flush_cache_vmap() on to-be-mapped areas before calling this - * function. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int map_kernel_range_noflush(unsigned long addr, unsigned long size, - pgprot_t prot, struct page **pages) +static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages) { unsigned long start = addr; - unsigned long end = addr + size; - unsigned long next; pgd_t *pgd; + unsigned long next; int err = 0; int nr = 0; pgtbl_mod_mask mask = 0; @@ -529,6 +523,66 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size, return 0; } +static int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + unsigned int i, nr = (end - addr) >> PAGE_SHIFT; + + WARN_ON(page_shift < PAGE_SHIFT); + + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || + page_shift == PAGE_SHIFT) + return vmap_small_pages_range_noflush(addr, end, prot, pages); + + for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { + int err; + + err = vmap_range_noflush(addr, addr + (1UL << page_shift), + __pa(page_address(pages[i])), prot, + page_shift); + if (err) + return err; + + addr += 1UL << page_shift; + } + + return 0; +} + +static int vmap_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + int err; + + err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); + flush_cache_vmap(addr, end); + return err; +} + +/** + * map_kernel_range_noflush - map kernel VM area with the specified pages + * @addr: start of the VM area to map + * @size: size of the VM area to map + * @prot: page protection flags to use + * @pages: pages to map + * + * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should + * have been allocated using get_vm_area() and its friends. + * + * NOTE: + * This function does NOT do any cache flushing. The caller is responsible for + * calling flush_cache_vmap() on to-be-mapped areas before calling this + * function. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int map_kernel_range_noflush(unsigned long addr, unsigned long size, + pgprot_t prot, struct page **pages) +{ + return vmap_pages_range_noflush(addr, addr + size, prot, pages, PAGE_SHIFT); +} + int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) { @@ -2112,6 +2166,24 @@ EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; +static inline unsigned int vm_area_page_order(struct vm_struct *vm) +{ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + return vm->page_order; +#else + return 0; +#endif +} + +static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) +{ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + vm->page_order = order; +#else + BUG_ON(order != 0); +#endif +} + /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add @@ -2422,6 +2494,7 @@ static inline void set_area_direct_map(const struct vm_struct *area, { int i; + /* HUGE_VMALLOC passes small pages to set_direct_map */ for (i = 0; i < area->nr_pages; i++) if (page_address(area->pages[i])) set_direct_map(area->pages[i]); @@ -2431,6 +2504,7 @@ static inline void set_area_direct_map(const struct vm_struct *area, static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) { unsigned long start = ULONG_MAX, end = 0; + unsigned int page_order = vm_area_page_order(area); int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; int flush_dmap = 0; int i; @@ -2455,11 +2529,14 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) * map. Find the start and end range of the direct mappings to make sure * the vm_unmap_aliases() flush includes the direct map. */ - for (i = 0; i < area->nr_pages; i++) { + for (i = 0; i < area->nr_pages; i += 1U << page_order) { unsigned long addr = (unsigned long)page_address(area->pages[i]); if (addr) { + unsigned long page_size; + + page_size = PAGE_SIZE << page_order; start = min(addr, start); - end = max(addr + PAGE_SIZE, end); + end = max(addr + page_size, end); flush_dmap = 1; } } @@ -2500,13 +2577,14 @@ static void __vunmap(const void *addr, int deallocate_pages) vm_remove_mappings(area, deallocate_pages); if (deallocate_pages) { + unsigned int page_order = vm_area_page_order(area); int i; - for (i = 0; i < area->nr_pages; i++) { + for (i = 0; i < area->nr_pages; i += 1U << page_order) { struct page *page = area->pages[i]; BUG_ON(!page); - __free_pages(page, 0); + __free_pages(page, page_order); } atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); @@ -2697,15 +2775,19 @@ EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, - pgprot_t prot, int node) + pgprot_t prot, unsigned int page_shift, + int node) { const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; + unsigned long addr = (unsigned long)area->addr; + unsigned long size = get_vm_area_size(area); unsigned long array_size; - unsigned int i; + unsigned int nr_small_pages = size >> PAGE_SHIFT; + unsigned int page_order; struct page **pages; + unsigned int i; - array_size = (unsigned long)nr_pages * sizeof(struct page *); + array_size = (unsigned long)nr_small_pages * sizeof(struct page *); gfp_mask |= __GFP_NOWARN; if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) gfp_mask |= __GFP_HIGHMEM; @@ -2724,30 +2806,38 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, } area->pages = pages; - area->nr_pages = nr_pages; + area->nr_pages = nr_small_pages; + set_vm_area_page_order(area, page_shift - PAGE_SHIFT); - for (i = 0; i < area->nr_pages; i++) { - struct page *page; + page_order = vm_area_page_order(area); - if (node == NUMA_NO_NODE) - page = alloc_page(gfp_mask); - else - page = alloc_pages_node(node, gfp_mask, 0); + /* + * Careful, we allocate and map page_order pages, but tracking is done + * per PAGE_SIZE page so as to keep the vm_struct APIs independent of + * the physical/mapped size. + */ + for (i = 0; i < area->nr_pages; i += 1U << page_order) { + struct page *page; + int p; + /* Compound pages required for remap_vmalloc_page */ + page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vfree() */ area->nr_pages = i; atomic_long_add(area->nr_pages, &nr_vmalloc_pages); goto fail; } - area->pages[i] = page; + + for (p = 0; p < (1U << page_order); p++) + area->pages[i + p] = page + p; + if (gfpflags_allow_blocking(gfp_mask)) cond_resched(); } atomic_long_add(area->nr_pages, &nr_vmalloc_pages); - if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), - prot, pages) < 0) + if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) goto fail; return area->addr; @@ -2755,7 +2845,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, fail: warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure, allocated %ld of %ld bytes", - (area->nr_pages*PAGE_SIZE), area->size); + (area->nr_pages*PAGE_SIZE), size); __vfree(area->addr); return NULL; } @@ -2786,19 +2876,45 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, struct vm_struct *area; void *addr; unsigned long real_size = size; + unsigned long real_align = align; + unsigned int shift = PAGE_SHIFT; - size = PAGE_ALIGN(size); - if (!size || (size >> PAGE_SHIFT) > totalram_pages()) + if (!size || (size >> PAGE_SHIFT) > totalram_pages()) { + area = NULL; goto fail; + } + + if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) && + arch_vmap_pmd_supported(prot)) { + unsigned long size_per_node; - area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | + /* + * Try huge pages. Only try for PAGE_KERNEL allocations, + * others like modules don't yet expect huge pages in + * their allocations due to apply_to_page_range not + * supporting them. + */ + + size_per_node = size; + if (node == NUMA_NO_NODE) + size_per_node /= num_online_nodes(); + if (size_per_node >= PMD_SIZE) { + shift = PMD_SHIFT; + align = max(real_align, 1UL << shift); + size = ALIGN(real_size, 1UL << shift); + } + } + +again: + size = PAGE_ALIGN(size); + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) goto fail; - addr = __vmalloc_area_node(area, gfp_mask, prot, node); + addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!addr) - return NULL; + goto fail; /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED @@ -2812,8 +2928,18 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, return addr; fail: - warn_alloc(gfp_mask, NULL, + if (shift > PAGE_SHIFT) { + shift = PAGE_SHIFT; + align = real_align; + size = real_size; + goto again; + } + + if (!area) { + /* Warn for area allocation, page allocations already warn */ + warn_alloc(gfp_mask, NULL, "vmalloc: allocation failure: %lu bytes", real_size); + } return NULL; } -- cgit v1.2.3 From b67177ecd956333029dbc1a4971a857fee0ccbb1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:58:53 -0700 Subject: mm/vmalloc: remove map_kernel_range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm/vmalloc: cleanup after hugepage series", v2. Christoph pointed out some overdue cleanups required after the huge vmalloc series, and I had another failure error message improvement as well. This patch (of 5): This is a shim around vmap_pages_range, get rid of it. Move the main API comment from the _noflush variant to the normal variant, and make _noflush internal to mm/. Link: https://lkml.kernel.org/r/20210322021806.892164-1-npiggin@gmail.com Link: https://lkml.kernel.org/r/20210322021806.892164-2-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Christoph Hellwig Cc: Uladzislau Rezki Cc: Cédric Le Goater Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/cachetlb.rst | 2 +- include/linux/vmalloc.h | 11 ------- mm/internal.h | 6 ++++ mm/percpu-vm.c | 5 +-- mm/vmalloc.c | 65 ++++++++++++++++--------------------- 5 files changed, 38 insertions(+), 51 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index a1582cc79f0f..756f7bcf8191 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -213,7 +213,7 @@ Here are the routines, one by one: there will be no entries in the cache for the kernel address space for virtual addresses in the range 'start' to 'end-1'. - The first of these two routines is invoked after map_kernel_range() + The first of these two routines is invoked after vmap_range() has installed the page table entries. The second is invoked before unmap_kernel_range() deletes the page table entries. diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b4c82f2d40dc..fb3b9989a4c5 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -212,10 +212,6 @@ static inline bool is_vm_area_hugepages(const void *addr) int vmap_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift); -extern int map_kernel_range_noflush(unsigned long start, unsigned long size, - pgprot_t prot, struct page **pages); -int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, - struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); static inline void set_vm_flush_reset_perms(void *addr) @@ -227,13 +223,6 @@ static inline void set_vm_flush_reset_perms(void *addr) } #else -static inline int -map_kernel_range_noflush(unsigned long start, unsigned long size, - pgprot_t prot, struct page **pages) -{ - return size >> PAGE_SHIFT; -} -#define map_kernel_range map_kernel_range_noflush static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { diff --git a/mm/internal.h b/mm/internal.h index bbe900f9f095..58c3757c52d9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -637,4 +637,10 @@ struct migration_target_control { gfp_t gfp_mask; }; +/* + * mm/vmalloc.c + */ +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift); + #endif /* __MM_INTERNAL_H */ diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index e46f7a6917f9..88a53eb68a94 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -8,6 +8,7 @@ * Chunks are mapped into vmalloc areas and populated page by page. * This is the default chunk allocator. */ +#include "internal.h" static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) @@ -192,8 +193,8 @@ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, static int __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) { - return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, - PAGE_KERNEL, pages); + return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), + PAGE_KERNEL, pages, PAGE_SHIFT); } /** diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 59c815eb7e74..527781a3a0fe 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -523,7 +523,16 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, return 0; } -static int vmap_pages_range_noflush(unsigned long addr, unsigned long end, +/* + * vmap_pages_range_noflush is similar to vmap_pages_range, but does not + * flush caches. + * + * The caller is responsible for calling flush_cache_vmap() after this + * function returns successfully and before the addresses are accessed. + * + * This is an internal function only. Do not use outside mm/. + */ +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { unsigned int i, nr = (end - addr) >> PAGE_SHIFT; @@ -549,48 +558,26 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end, return 0; } -static int vmap_pages_range(unsigned long addr, unsigned long end, - pgprot_t prot, struct page **pages, unsigned int page_shift) -{ - int err; - - err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); - flush_cache_vmap(addr, end); - return err; -} - /** - * map_kernel_range_noflush - map kernel VM area with the specified pages + * vmap_pages_range - map pages to a kernel virtual address * @addr: start of the VM area to map - * @size: size of the VM area to map + * @end: end of the VM area to map (non-inclusive) * @prot: page protection flags to use - * @pages: pages to map - * - * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should - * have been allocated using get_vm_area() and its friends. - * - * NOTE: - * This function does NOT do any cache flushing. The caller is responsible for - * calling flush_cache_vmap() on to-be-mapped areas before calling this - * function. + * @pages: pages to map (always PAGE_SIZE pages) + * @page_shift: maximum shift that the pages may be mapped with, @pages must + * be aligned and contiguous up to at least this shift. * * RETURNS: * 0 on success, -errno on failure. */ -int map_kernel_range_noflush(unsigned long addr, unsigned long size, - pgprot_t prot, struct page **pages) -{ - return vmap_pages_range_noflush(addr, addr + size, prot, pages, PAGE_SHIFT); -} - -int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, - struct page **pages) +static int vmap_pages_range(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) { - int ret; + int err; - ret = map_kernel_range_noflush(start, size, prot, pages); - flush_cache_vmap(start, start + size); - return ret; + err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); + flush_cache_vmap(addr, end); + return err; } int is_vmalloc_or_module_addr(const void *x) @@ -2156,10 +2143,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node) kasan_unpoison_vmalloc(mem, size); - if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) { + if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, + pages, PAGE_SHIFT) < 0) { vm_unmap_ram(mem, count); return NULL; } + return mem; } EXPORT_SYMBOL(vm_map_ram); @@ -2703,6 +2692,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; + unsigned long addr; unsigned long size; /* In bytes */ might_sleep(); @@ -2715,8 +2705,9 @@ void *vmap(struct page **pages, unsigned int count, if (!area) return NULL; - if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), - pages) < 0) { + addr = (unsigned long)area->addr; + if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), + pages, PAGE_SHIFT) < 0) { vunmap(area->addr); return NULL; } -- cgit v1.2.3 From 4ad0ae8c64ac8f81a3651bca11be7c3cb086df80 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Apr 2021 22:59:01 -0700 Subject: mm/vmalloc: remove unmap_kernel_range MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a shim around vunmap_range, get rid of it. Move the main API comment from the _noflush variant to the normal variant, and make _noflush internal to mm/. [npiggin@gmail.com: fix nommu builds and a comment bug per sfr] Link: https://lkml.kernel.org/r/1617292598.m6g0knx24s.astroid@bobo.none [akpm@linux-foundation.org: move vunmap_range_noflush() stub inside !CONFIG_MMU, not !CONFIG_NUMA] [npiggin@gmail.com: fix nommu builds] Link: https://lkml.kernel.org/r/1617292497.o1uhq5ipxp.astroid@bobo.none Link: https://lkml.kernel.org/r/20210322021806.892164-5-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Christoph Hellwig Cc: Cédric Le Goater Cc: Uladzislau Rezki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/core-api/cachetlb.rst | 2 +- arch/arm64/mm/init.c | 2 +- arch/powerpc/kernel/isa-bridge.c | 4 +-- arch/powerpc/kernel/pci_64.c | 2 +- arch/powerpc/mm/ioremap.c | 2 +- drivers/pci/pci.c | 2 +- include/linux/vmalloc.h | 8 +---- mm/internal.h | 15 +++++++++- mm/percpu-vm.c | 2 +- mm/vmalloc.c | 59 ++++++++++++++++++------------------- 10 files changed, 51 insertions(+), 47 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index 756f7bcf8191..fe4290e26729 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -215,7 +215,7 @@ Here are the routines, one by one: The first of these two routines is invoked after vmap_range() has installed the page table entries. The second is invoked - before unmap_kernel_range() deletes the page table entries. + before vunmap_range() deletes the page table entries. There exists another whole class of cpu cache issues which currently require a whole different set of interfaces to handle properly. diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 3685e12aba9b..470f92e6a542 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -521,7 +521,7 @@ void free_initmem(void) * prevents the region from being reused for kernel modules, which * is not supported by kallsyms. */ - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); + vunmap_range((u64)__init_begin, (u64)__init_end); } void dump_mem_limit(void) diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 2257d24e6a26..39c625737c09 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -48,7 +48,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size) if (slab_is_available()) { if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa, pgprot_noncached(PAGE_KERNEL))) - unmap_kernel_range(ISA_IO_BASE, size); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size); } else { early_ioremap_range(ISA_IO_BASE, pa, size, pgprot_noncached(PAGE_KERNEL)); @@ -311,7 +311,7 @@ static void isa_bridge_remove(void) isa_bridge_pcidev = NULL; /* Unmap the ISA area */ - unmap_kernel_range(ISA_IO_BASE, 0x10000); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000); } /** diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 9312e6eda7ff..3fb7e572abed 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -140,7 +140,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size) addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, paddr, pgprot_noncached(PAGE_KERNEL))) { - unmap_kernel_range(addr, size); + vunmap_range(addr, addr + size); return NULL; } diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index b1a0aebe8c48..57342154d2b0 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -93,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, if (!ret) return (void __iomem *)area->addr + offset; - unmap_kernel_range(va, size); + vunmap_range(va, va + size); free_vm_area(area); return NULL; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e4d4e399004b..f4c26e6118ea 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4102,7 +4102,7 @@ void pci_unmap_iospace(struct resource *res) #if defined(PCI_IOBASE) && defined(CONFIG_MMU) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; - unmap_kernel_range(vaddr, resource_size(res)); + vunmap_range(vaddr, vaddr + resource_size(res)); #endif } EXPORT_SYMBOL(pci_unmap_iospace); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index fb3b9989a4c5..394d03cc0e92 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -212,8 +212,7 @@ static inline bool is_vm_area_hugepages(const void *addr) int vmap_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift); -extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); -extern void unmap_kernel_range(unsigned long addr, unsigned long size); +void vunmap_range(unsigned long addr, unsigned long end); static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); @@ -223,11 +222,6 @@ static inline void set_vm_flush_reset_perms(void *addr) } #else -static inline void -unmap_kernel_range_noflush(unsigned long addr, unsigned long size) -{ -} -#define unmap_kernel_range unmap_kernel_range_noflush static inline void set_vm_flush_reset_perms(void *addr) { } diff --git a/mm/internal.h b/mm/internal.h index 58c3757c52d9..42e30e71554a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -446,7 +446,9 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } - +static inline void vunmap_range_noflush(unsigned long start, unsigned long end) +{ +} #endif /* !CONFIG_MMU */ /* @@ -640,7 +642,18 @@ struct migration_target_control { /* * mm/vmalloc.c */ +#ifdef CONFIG_MMU int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift); +#else +static inline +int vmap_pages_range_noflush(unsigned long addr, unsigned long end, + pgprot_t prot, struct page **pages, unsigned int page_shift) +{ + return -EINVAL; +} +#endif + +void vunmap_range_noflush(unsigned long start, unsigned long end); #endif /* __MM_INTERNAL_H */ diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 88a53eb68a94..8d3844bc0c7c 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -134,7 +134,7 @@ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) { - unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); + vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); } /** diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 527781a3a0fe..f7a53c19e84b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -378,22 +378,20 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, } while (p4d++, addr = next, addr != end); } -/** - * unmap_kernel_range_noflush - unmap kernel VM area - * @start: start of the VM area to unmap - * @size: size of the VM area to unmap +/* + * vunmap_range_noflush is similar to vunmap_range, but does not + * flush caches or TLBs. * - * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify - * should have been allocated using get_vm_area() and its friends. + * The caller is responsible for calling flush_cache_vmap() before calling + * this function, and flush_tlb_kernel_range after it has returned + * successfully (and before the addresses are expected to cause a page fault + * or be re-mapped for something else, if TLB flushes are being delayed or + * coalesced). * - * NOTE: - * This function does NOT do any cache flushing. The caller is responsible - * for calling flush_cache_vunmap() on to-be-mapped areas before calling this - * function and flush_tlb_kernel_range() after. + * This is an internal function only. Do not use outside mm/. */ -void unmap_kernel_range_noflush(unsigned long start, unsigned long size) +void vunmap_range_noflush(unsigned long start, unsigned long end) { - unsigned long end = start + size; unsigned long next; pgd_t *pgd; unsigned long addr = start; @@ -414,6 +412,22 @@ void unmap_kernel_range_noflush(unsigned long start, unsigned long size) arch_sync_kernel_mappings(start, end); } +/** + * vunmap_range - unmap kernel virtual addresses + * @addr: start of the VM area to unmap + * @end: end of the VM area to unmap (non-inclusive) + * + * Clears any present PTEs in the virtual address range, flushes TLBs and + * caches. Any subsequent access to the address before it has been re-mapped + * is a kernel bug. + */ +void vunmap_range(unsigned long addr, unsigned long end) +{ + flush_cache_vunmap(addr, end); + vunmap_range_noflush(addr, end); + flush_tlb_kernel_range(addr, end); +} + static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) @@ -1712,7 +1726,7 @@ static void free_vmap_area_noflush(struct vmap_area *va) static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); - unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start); + vunmap_range_noflush(va->va_start, va->va_end); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); @@ -1990,7 +2004,7 @@ static void vb_free(unsigned long addr, unsigned long size) offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); - unmap_kernel_range_noflush(addr, size); + vunmap_range_noflush(addr, addr + size); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(addr, addr + size); @@ -2307,23 +2321,6 @@ void __init vmalloc_init(void) vmap_initialized = true; } -/** - * unmap_kernel_range - unmap kernel VM area and flush cache and TLB - * @addr: start of the VM area to unmap - * @size: size of the VM area to unmap - * - * Similar to unmap_kernel_range_noflush() but flushes vcache before - * the unmapping and tlb after. - */ -void unmap_kernel_range(unsigned long addr, unsigned long size) -{ - unsigned long end = addr + size; - - flush_cache_vunmap(addr, end); - unmap_kernel_range_noflush(addr, size); - flush_tlb_kernel_range(addr, end); -} - static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { -- cgit v1.2.3 From bbcd53c960713507ae764bf81970651b5577b95a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 6 May 2021 18:05:55 -0700 Subject: drivers/char: remove /dev/kmem for good Patch series "drivers/char: remove /dev/kmem for good". Exploring /dev/kmem and /dev/mem in the context of memory hot(un)plug and memory ballooning, I started questioning the existence of /dev/kmem. Comparing it with the /proc/kcore implementation, it does not seem to be able to deal with things like a) Pages unmapped from the direct mapping (e.g., to be used by secretmem) -> kern_addr_valid(). virt_addr_valid() is not sufficient. b) Special cases like gart aperture memory that is not to be touched -> mem_pfn_is_ram() Unless I am missing something, it's at least broken in some cases and might fault/crash the machine. Looks like its existence has been questioned before in 2005 and 2010 [1], after ~11 additional years, it might make sense to revive the discussion. CONFIG_DEVKMEM is only enabled in a single defconfig (on purpose or by mistake?). All distributions disable it: in Ubuntu it has been disabled for more than 10 years, in Debian since 2.6.31, in Fedora at least starting with FC3, in RHEL starting with RHEL4, in SUSE starting from 15sp2, and OpenSUSE has it disabled as well. 1) /dev/kmem was popular for rootkits [2] before it got disabled basically everywhere. Ubuntu documents [3] "There is no modern user of /dev/kmem any more beyond attackers using it to load kernel rootkits.". RHEL documents in a BZ [5] "it served no practical purpose other than to serve as a potential security problem or to enable binary module drivers to access structures/functions they shouldn't be touching" 2) /proc/kcore is a decent interface to have a controlled way to read kernel memory for debugging puposes. (will need some extensions to deal with memory offlining/unplug, memory ballooning, and poisoned pages, though) 3) It might be useful for corner case debugging [1]. KDB/KGDB might be a better fit, especially, to write random memory; harder to shoot yourself into the foot. 4) "Kernel Memory Editor" [4] hasn't seen any updates since 2000 and seems to be incompatible with 64bit [1]. For educational purposes, /proc/kcore might be used to monitor value updates -- or older kernels can be used. 5) It's broken on arm64, and therefore, completely disabled there. Looks like it's essentially unused and has been replaced by better suited interfaces for individual tasks (/proc/kcore, KDB/KGDB). Let's just remove it. [1] https://lwn.net/Articles/147901/ [2] https://www.linuxjournal.com/article/10505 [3] https://wiki.ubuntu.com/Security/Features#A.2Fdev.2Fkmem_disabled [4] https://sourceforge.net/projects/kme/ [5] https://bugzilla.redhat.com/show_bug.cgi?id=154796 Link: https://lkml.kernel.org/r/20210324102351.6932-1-david@redhat.com Link: https://lkml.kernel.org/r/20210324102351.6932-2-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Michal Hocko Acked-by: Kees Cook Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: "Alexander A. Klimov" Cc: Alexander Viro Cc: Alexandre Belloni Cc: Andrew Lunn Cc: Andrey Zhizhikin Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Brian Cain Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Chris Zankel Cc: Corentin Labbe Cc: "David S. Miller" Cc: "Eric W. Biederman" Cc: Geert Uytterhoeven Cc: Gerald Schaefer Cc: Greentime Hu Cc: Gregory Clement Cc: Heiko Carstens Cc: Helge Deller Cc: Hillf Danton Cc: huang ying Cc: Ingo Molnar Cc: Ivan Kokshaysky Cc: "James E.J. Bottomley" Cc: James Troup Cc: Jiaxun Yang Cc: Jonas Bonn Cc: Jonathan Corbet Cc: Kairui Song Cc: Krzysztof Kozlowski Cc: Kuninori Morimoto Cc: Liviu Dudau Cc: Lorenzo Pieralisi Cc: Luc Van Oostenryck Cc: Luis Chamberlain Cc: Matthew Wilcox Cc: Matt Turner Cc: Max Filippov Cc: Michael Ellerman Cc: Mike Rapoport Cc: Mikulas Patocka Cc: Minchan Kim Cc: Niklas Schnelle Cc: Oleksiy Avramchenko Cc: openrisc@lists.librecores.org Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: "Pavel Machek (CIP)" Cc: Pavel Machek Cc: "Peter Zijlstra (Intel)" Cc: Pierre Morel Cc: Randy Dunlap Cc: Richard Henderson Cc: Rich Felker Cc: Robert Richter Cc: Rob Herring Cc: Russell King Cc: Sam Ravnborg Cc: Sebastian Andrzej Siewior Cc: Sebastian Hesselbarth Cc: sparclinux@vger.kernel.org Cc: Stafford Horne Cc: Stefan Kristiansson Cc: Steven Rostedt Cc: Sudeep Holla Cc: Theodore Dubois Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Viresh Kumar Cc: William Cohen Cc: Xiaoming Ni Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/devices.txt | 2 +- arch/arm/configs/dove_defconfig | 1 - arch/arm/configs/magician_defconfig | 1 - arch/arm/configs/moxart_defconfig | 1 - arch/arm/configs/mps2_defconfig | 1 - arch/arm/configs/mvebu_v5_defconfig | 1 - arch/arm/configs/xcep_defconfig | 1 - arch/hexagon/configs/comet_defconfig | 1 - arch/m68k/configs/amcore_defconfig | 1 - arch/openrisc/configs/or1ksim_defconfig | 1 - arch/sh/configs/edosk7705_defconfig | 1 - arch/sh/configs/se7206_defconfig | 1 - arch/sh/configs/sh2007_defconfig | 1 - arch/sh/configs/sh7724_generic_defconfig | 1 - arch/sh/configs/sh7770_generic_defconfig | 1 - arch/sh/configs/sh7785lcr_32bit_defconfig | 1 - arch/sparc/configs/sparc64_defconfig | 1 - arch/xtensa/configs/xip_kc705_defconfig | 1 - drivers/char/Kconfig | 10 -- drivers/char/mem.c | 231 ------------------------------ include/linux/fs.h | 2 +- include/linux/vmalloc.h | 2 +- kernel/configs/android-base.config | 1 - mm/ksm.c | 2 +- mm/vmalloc.c | 2 +- 25 files changed, 5 insertions(+), 264 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index ef41f77cb979..9c2be821c225 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -4,7 +4,7 @@ 1 char Memory devices 1 = /dev/mem Physical memory access - 2 = /dev/kmem Kernel virtual memory access + 2 = /dev/kmem OBSOLETE - replaced by /proc/kcore 3 = /dev/null Null device 4 = /dev/port I/O port access 5 = /dev/zero Null byte source diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index e70c997d5f4c..b935162a8bba 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig @@ -63,7 +63,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig index b4670d42f378..abde1fb23b20 100644 --- a/arch/arm/configs/magician_defconfig +++ b/arch/arm/configs/magician_defconfig @@ -72,7 +72,6 @@ CONFIG_INPUT_TOUCHSCREEN=y CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=m # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index 6834e97af348..eacc089d86c5 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -79,7 +79,6 @@ CONFIG_INPUT_EVBUG=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=1 diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig index 1d923dbb9928..89f4a6ff30bd 100644 --- a/arch/arm/configs/mps2_defconfig +++ b/arch/arm/configs/mps2_defconfig @@ -69,7 +69,6 @@ CONFIG_SMSC911X=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MPS2_UART_CONSOLE=y CONFIG_SERIAL_MPS2_UART=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index 4f16716bfc32..d57ff30dabff 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig @@ -100,7 +100,6 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_LEGACY_PTY_COUNT=16 -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_RUNTIME_UARTS=2 diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig index f1fbdfc5c8c6..4d8e7f2eaef7 100644 --- a/arch/arm/configs/xcep_defconfig +++ b/arch/arm/configs/xcep_defconfig @@ -53,7 +53,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_PXA=y CONFIG_SERIAL_PXA_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index f19ae2ab0aaa..c5a214716a38 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -34,7 +34,6 @@ CONFIG_NET_ETHERNET=y # CONFIG_SERIO is not set # CONFIG_CONSOLE_TRANSLATIONS is not set CONFIG_LEGACY_PTY_COUNT=64 -# CONFIG_DEVKMEM is not set # CONFIG_HW_RANDOM is not set CONFIG_SPI=y CONFIG_SPI_DEBUG=y diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 3a84f24d41c8..6d9ed2198170 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -60,7 +60,6 @@ CONFIG_DM9000=y # CONFIG_VT is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_DEVMEM is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_MCF=y CONFIG_SERIAL_MCF_BAUDRATE=115200 CONFIG_SERIAL_MCF_CONSOLE=y diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index 75f2da324d0e..6e1e004047c7 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -43,7 +43,6 @@ CONFIG_MICREL_PHY=y # CONFIG_SERIO is not set # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig index ef7cc31997b1..9ee35269bee2 100644 --- a/arch/sh/configs/edosk7705_defconfig +++ b/arch/sh/configs/edosk7705_defconfig @@ -23,7 +23,6 @@ CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index 315b04a8dd2f..601d062250d1 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig @@ -71,7 +71,6 @@ CONFIG_SMC91X=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=4 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 99975db461d8..79f02f1c0dc8 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -75,7 +75,6 @@ CONFIG_INPUT_FF_MEMLESS=y # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_LEGACY_PTYS is not set diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig index 2c46c0004780..cbc9389a89a8 100644 --- a/arch/sh/configs/sh7724_generic_defconfig +++ b/arch/sh/configs/sh7724_generic_defconfig @@ -18,7 +18,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig index 88193153e51b..ee2357deba0f 100644 --- a/arch/sh/configs/sh7770_generic_defconfig +++ b/arch/sh/configs/sh7770_generic_defconfig @@ -20,7 +20,6 @@ CONFIG_CPU_IDLE=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 9b885c14c400..5c725c75fcef 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -66,7 +66,6 @@ CONFIG_INPUT_FF_MEMLESS=m CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVBUG=m CONFIG_VT_HW_CONSOLE_BINDING=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=6 CONFIG_SERIAL_SH_SCI_CONSOLE=y diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 12a4fb0bd52a..18099099583e 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -122,7 +122,6 @@ CONFIG_INPUT_SPARCSPKR=y # CONFIG_SERIO_SERPORT is not set CONFIG_SERIO_PCIPS2=m CONFIG_SERIO_RAW=m -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_SUNSU=y CONFIG_SERIAL_SUNSU_CONSOLE=y CONFIG_SERIAL_SUNSAB=y diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig index 4f1ff9531f6a..062148e17135 100644 --- a/arch/xtensa/configs/xip_kc705_defconfig +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -72,7 +72,6 @@ CONFIG_MARVELL_PHY=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set -CONFIG_DEVKMEM=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d229a2d0c017..b151e0fcdeb5 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -334,16 +334,6 @@ config DEVMEM memory. When in doubt, say "Y". -config DEVKMEM - bool "/dev/kmem virtual device support" - # On arm64, VMALLOC_START < PAGE_OFFSET, which confuses kmem read/write - depends on !ARM64 - help - Say Y here if you want to support the /dev/kmem device. The - /dev/kmem device is rarely used, but can be used for certain - kind of kernel debugging operations. - When in doubt, say "N". - config NVRAM tristate "/dev/nvram support" depends on X86 || HAVE_ARCH_NVRAM_OPS diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 869b9f5e8e03..15dc54fa1d47 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -403,221 +403,6 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma) return 0; } -static int mmap_kmem(struct file *file, struct vm_area_struct *vma) -{ - unsigned long pfn; - - /* Turn a kernel-virtual address into a physical page frame */ - pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; - - /* - * RED-PEN: on some architectures there is more mapped memory than - * available in mem_map which pfn_valid checks for. Perhaps should add a - * new macro here. - * - * RED-PEN: vmalloc is not supported right now. - */ - if (!pfn_valid(pfn)) - return -EIO; - - vma->vm_pgoff = pfn; - return mmap_mem(file, vma); -} - -/* - * This function reads the *virtual* memory as seen by the kernel. - */ -static ssize_t read_kmem(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - unsigned long p = *ppos; - ssize_t low_count, read, sz; - char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ - int err = 0; - - read = 0; - if (p < (unsigned long) high_memory) { - low_count = count; - if (count > (unsigned long)high_memory - p) - low_count = (unsigned long)high_memory - p; - -#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED - /* we don't have page 0 mapped on sparc and m68k.. */ - if (p < PAGE_SIZE && low_count > 0) { - sz = size_inside_page(p, low_count); - if (clear_user(buf, sz)) - return -EFAULT; - buf += sz; - p += sz; - read += sz; - low_count -= sz; - count -= sz; - } -#endif - while (low_count > 0) { - sz = size_inside_page(p, low_count); - - /* - * On ia64 if a page has been mapped somewhere as - * uncached, then it must also be accessed uncached - * by the kernel or data corruption may occur - */ - kbuf = xlate_dev_kmem_ptr((void *)p); - if (!virt_addr_valid(kbuf)) - return -ENXIO; - - if (copy_to_user(buf, kbuf, sz)) - return -EFAULT; - buf += sz; - p += sz; - read += sz; - low_count -= sz; - count -= sz; - if (should_stop_iteration()) { - count = 0; - break; - } - } - } - - if (count > 0) { - kbuf = (char *)__get_free_page(GFP_KERNEL); - if (!kbuf) - return -ENOMEM; - while (count > 0) { - sz = size_inside_page(p, count); - if (!is_vmalloc_or_module_addr((void *)p)) { - err = -ENXIO; - break; - } - sz = vread(kbuf, (char *)p, sz); - if (!sz) - break; - if (copy_to_user(buf, kbuf, sz)) { - err = -EFAULT; - break; - } - count -= sz; - buf += sz; - read += sz; - p += sz; - if (should_stop_iteration()) - break; - } - free_page((unsigned long)kbuf); - } - *ppos = p; - return read ? read : err; -} - - -static ssize_t do_write_kmem(unsigned long p, const char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t written, sz; - unsigned long copied; - - written = 0; -#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED - /* we don't have page 0 mapped on sparc and m68k.. */ - if (p < PAGE_SIZE) { - sz = size_inside_page(p, count); - /* Hmm. Do something? */ - buf += sz; - p += sz; - count -= sz; - written += sz; - } -#endif - - while (count > 0) { - void *ptr; - - sz = size_inside_page(p, count); - - /* - * On ia64 if a page has been mapped somewhere as uncached, then - * it must also be accessed uncached by the kernel or data - * corruption may occur. - */ - ptr = xlate_dev_kmem_ptr((void *)p); - if (!virt_addr_valid(ptr)) - return -ENXIO; - - copied = copy_from_user(ptr, buf, sz); - if (copied) { - written += sz - copied; - if (written) - break; - return -EFAULT; - } - buf += sz; - p += sz; - count -= sz; - written += sz; - if (should_stop_iteration()) - break; - } - - *ppos += written; - return written; -} - -/* - * This function writes to the *virtual* memory as seen by the kernel. - */ -static ssize_t write_kmem(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - unsigned long p = *ppos; - ssize_t wrote = 0; - ssize_t virtr = 0; - char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ - int err = 0; - - if (p < (unsigned long) high_memory) { - unsigned long to_write = min_t(unsigned long, count, - (unsigned long)high_memory - p); - wrote = do_write_kmem(p, buf, to_write, ppos); - if (wrote != to_write) - return wrote; - p += wrote; - buf += wrote; - count -= wrote; - } - - if (count > 0) { - kbuf = (char *)__get_free_page(GFP_KERNEL); - if (!kbuf) - return wrote ? wrote : -ENOMEM; - while (count > 0) { - unsigned long sz = size_inside_page(p, count); - unsigned long n; - - if (!is_vmalloc_or_module_addr((void *)p)) { - err = -ENXIO; - break; - } - n = copy_from_user(kbuf, buf, sz); - if (n) { - err = -EFAULT; - break; - } - vwrite(kbuf, (char *)p, sz); - count -= sz; - buf += sz; - virtr += sz; - p += sz; - if (should_stop_iteration()) - break; - } - free_page((unsigned long)kbuf); - } - - *ppos = p; - return virtr + wrote ? : err; -} - static ssize_t read_port(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -855,7 +640,6 @@ static int open_port(struct inode *inode, struct file *filp) #define write_zero write_null #define write_iter_zero write_iter_null #define open_mem open_port -#define open_kmem open_mem static const struct file_operations __maybe_unused mem_fops = { .llseek = memory_lseek, @@ -869,18 +653,6 @@ static const struct file_operations __maybe_unused mem_fops = { #endif }; -static const struct file_operations __maybe_unused kmem_fops = { - .llseek = memory_lseek, - .read = read_kmem, - .write = write_kmem, - .mmap = mmap_kmem, - .open = open_kmem, -#ifndef CONFIG_MMU - .get_unmapped_area = get_unmapped_area_mem, - .mmap_capabilities = memory_mmap_capabilities, -#endif -}; - static const struct file_operations null_fops = { .llseek = null_lseek, .read = read_null, @@ -924,9 +696,6 @@ static const struct memdev { } devlist[] = { #ifdef CONFIG_DEVMEM [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, -#endif -#ifdef CONFIG_DEVKMEM - [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, #endif [3] = { "null", 0666, &null_fops, 0 }, #ifdef CONFIG_DEVPORT diff --git a/include/linux/fs.h b/include/linux/fs.h index acef282b97c6..c3c88fdb9b2a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -145,7 +145,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* Expect random access pattern */ #define FMODE_RANDOM ((__force fmode_t)0x1000) -/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ +/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) /* File is opened with O_PATH; almost nothing can be done with it */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 394d03cc0e92..f31ba59fb1ef 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -227,7 +227,7 @@ static inline void set_vm_flush_reset_perms(void *addr) } #endif -/* for /dev/kmem */ +/* for /proc/kcore */ extern long vread(char *buf, char *addr, unsigned long count); extern long vwrite(char *buf, char *addr, unsigned long count); diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config index d3fd428f4b92..eb701b2ac72f 100644 --- a/kernel/configs/android-base.config +++ b/kernel/configs/android-base.config @@ -1,5 +1,4 @@ # KEEP ALPHABETICALLY SORTED -# CONFIG_DEVKMEM is not set # CONFIG_DEVMEM is not set # CONFIG_FHANDLE is not set # CONFIG_INET_LRO is not set diff --git a/mm/ksm.c b/mm/ksm.c index b321a67ebaa9..b7cbcc7d4977 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -459,7 +459,7 @@ static inline bool ksm_test_exit(struct mm_struct *mm) * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, * in case the application has unmapped and remapped mm,addr meanwhile. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP - * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. + * mmap of /dev/mem, where we would not want to touch it. * * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context * of the process that owns 'vma'. We also do not want to enforce diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9c539f0730a5..2868692c6807 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3219,7 +3219,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count) * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without - * any information, as /dev/kmem. + * any information, as /proc/kcore. * * Return: number of bytes for which addr and buf should be increased * (same number as @count) or %0 if [addr...addr+count) doesn't -- cgit v1.2.3 From f7c8ce44ebb113b83135ada6e496db33d8a535e3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 6 May 2021 18:06:06 -0700 Subject: mm/vmalloc: remove vwrite() The last user (/dev/kmem) is gone. Let's drop it. Link: https://lkml.kernel.org/r/20210324102351.6932-4-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Michal Hocko Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: Hillf Danton Cc: Matthew Wilcox Cc: Oleksiy Avramchenko Cc: Steven Rostedt Cc: Minchan Kim Cc: huang ying Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmalloc.h | 1 - mm/nommu.c | 10 ----- mm/vmalloc.c | 116 +----------------------------------------------- 3 files changed, 1 insertion(+), 126 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index f31ba59fb1ef..b6ff16393bf6 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -229,7 +229,6 @@ static inline void set_vm_flush_reset_perms(void *addr) /* for /proc/kcore */ extern long vread(char *buf, char *addr, unsigned long count); -extern long vwrite(char *buf, char *addr, unsigned long count); /* * Internals. Dont't use.. diff --git a/mm/nommu.c b/mm/nommu.c index 5c9ab799c0e6..85a3a68dffb6 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -210,16 +210,6 @@ long vread(char *buf, char *addr, unsigned long count) return count; } -long vwrite(char *buf, char *addr, unsigned long count) -{ - /* Don't allow overflow */ - if ((unsigned long) addr + count < count) - count = -(unsigned long) addr; - - memcpy(addr, buf, count); - return count; -} - /* * vmalloc - allocate virtually contiguous memory * diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2868692c6807..a7f318c9e426 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3146,10 +3146,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) * kmap() and get small overhead in this access function. */ if (p) { - /* - * we can expect USER0 is not used (see vread/vwrite's - * function description) - */ + /* We can expect USER0 is not used -- see vread() */ void *map = kmap_atomic(p); memcpy(buf, map + offset, length); kunmap_atomic(map); @@ -3164,43 +3161,6 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) return copied; } -static int aligned_vwrite(char *buf, char *addr, unsigned long count) -{ - struct page *p; - int copied = 0; - - while (count) { - unsigned long offset, length; - - offset = offset_in_page(addr); - length = PAGE_SIZE - offset; - if (length > count) - length = count; - p = vmalloc_to_page(addr); - /* - * To do safe access to this _mapped_ area, we need - * lock. But adding lock here means that we need to add - * overhead of vmalloc()/vfree() calles for this _debug_ - * interface, rarely used. Instead of that, we'll use - * kmap() and get small overhead in this access function. - */ - if (p) { - /* - * we can expect USER0 is not used (see vread/vwrite's - * function description) - */ - void *map = kmap_atomic(p); - memcpy(map + offset, buf, length); - kunmap_atomic(map); - } - addr += length; - buf += length; - copied += length; - count -= length; - } - return copied; -} - /** * vread() - read vmalloc area in a safe way. * @buf: buffer for reading data @@ -3283,80 +3243,6 @@ finished: return buflen; } -/** - * vwrite() - write vmalloc area in a safe way. - * @buf: buffer for source data - * @addr: vm address. - * @count: number of bytes to be read. - * - * This function checks that addr is a valid vmalloc'ed area, and - * copy data from a buffer to the given addr. If specified range of - * [addr...addr+count) includes some valid address, data is copied from - * proper area of @buf. If there are memory holes, no copy to hole. - * IOREMAP area is treated as memory hole and no copy is done. - * - * If [addr...addr+count) doesn't includes any intersects with alive - * vm_struct area, returns 0. @buf should be kernel's buffer. - * - * Note: In usual ops, vwrite() is never necessary because the caller - * should know vmalloc() area is valid and can use memcpy(). - * This is for routines which have to access vmalloc area without - * any information, as /dev/kmem. - * - * Return: number of bytes for which addr and buf should be - * increased (same number as @count) or %0 if [addr...addr+count) - * doesn't include any intersection with valid vmalloc area - */ -long vwrite(char *buf, char *addr, unsigned long count) -{ - struct vmap_area *va; - struct vm_struct *vm; - char *vaddr; - unsigned long n, buflen; - int copied = 0; - - /* Don't allow overflow */ - if ((unsigned long) addr + count < count) - count = -(unsigned long) addr; - buflen = count; - - spin_lock(&vmap_area_lock); - list_for_each_entry(va, &vmap_area_list, list) { - if (!count) - break; - - if (!va->vm) - continue; - - vm = va->vm; - vaddr = (char *) vm->addr; - if (addr >= vaddr + get_vm_area_size(vm)) - continue; - while (addr < vaddr) { - if (count == 0) - goto finished; - buf++; - addr++; - count--; - } - n = vaddr + get_vm_area_size(vm) - addr; - if (n > count) - n = count; - if (!(vm->flags & VM_IOREMAP)) { - aligned_vwrite(buf, addr, n); - copied++; - } - buf += n; - addr += n; - count -= n; - } -finished: - spin_unlock(&vmap_area_lock); - if (!copied) - return 0; - return buflen; -} - /** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover -- cgit v1.2.3 From f0953a1bbaca71e1ebbcb9864eb1b273156157ed Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 6 May 2021 18:06:47 -0700 Subject: mm: fix typos in comments Fix ~94 single-word typos in locking code comments, plus a few very obvious grammar mistakes. Link: https://lkml.kernel.org/r/20210322212624.GA1963421@gmail.com Link: https://lore.kernel.org/r/20210322205203.GB1959563@gmail.com Signed-off-by: Ingo Molnar Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Randy Dunlap Cc: Bhaskar Chowdhury Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- include/linux/vmalloc.h | 4 ++-- mm/balloon_compaction.c | 4 ++-- mm/compaction.c | 4 ++-- mm/filemap.c | 2 +- mm/gup.c | 2 +- mm/highmem.c | 2 +- mm/huge_memory.c | 6 +++--- mm/hugetlb.c | 6 +++--- mm/internal.h | 2 +- mm/kasan/kasan.h | 8 ++++---- mm/kasan/quarantine.c | 4 ++-- mm/kasan/shadow.c | 4 ++-- mm/kfence/report.c | 2 +- mm/khugepaged.c | 2 +- mm/ksm.c | 4 ++-- mm/madvise.c | 4 ++-- mm/memcontrol.c | 18 +++++++++--------- mm/memory-failure.c | 2 +- mm/memory.c | 10 +++++----- mm/mempolicy.c | 4 ++-- mm/migrate.c | 8 ++++---- mm/mmap.c | 4 ++-- mm/mprotect.c | 2 +- mm/mremap.c | 2 +- mm/oom_kill.c | 2 +- mm/page-writeback.c | 4 ++-- mm/page_alloc.c | 14 +++++++------- mm/page_owner.c | 2 +- mm/percpu-internal.h | 2 +- mm/percpu.c | 2 +- mm/pgalloc-track.h | 6 +++--- mm/slab.c | 6 +++--- mm/slub.c | 2 +- mm/swap_slots.c | 2 +- mm/vmalloc.c | 6 +++--- mm/vmstat.c | 2 +- mm/zpool.c | 2 +- mm/zsmalloc.c | 2 +- 39 files changed, 83 insertions(+), 83 deletions(-) (limited to 'include/linux/vmalloc.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 76e27ebb28a3..322ec61d0da7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -106,7 +106,7 @@ extern int mmap_rnd_compat_bits __read_mostly; * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. - * It's defined as noop for arcitectures that don't support memory tagging. + * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b6ff16393bf6..4d668abb6391 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -33,7 +33,7 @@ struct notifier_block; /* in notifier.h */ * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that - * we don't try to poision shadow on free if it was never allocated. + * we don't try to poison shadow on free if it was never allocated. * * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to * determine which allocations need the module shadow freed. @@ -43,7 +43,7 @@ struct notifier_block; /* in notifier.h */ /* * Maximum alignment for ioremap() regions. - * Can be overriden by arch-specific value. + * Can be overridden by arch-specific value. */ #ifndef IOREMAP_MAX_ORDER #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 26de020aae7b..907fefde2572 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); /** * balloon_page_list_dequeue() - removes pages from balloon's page list and * returns a list of the pages. - * @b_dev_info: balloon device decriptor where we will grab a page from. + * @b_dev_info: balloon device descriptor where we will grab a page from. * @pages: pointer to the list of pages that would be returned to the caller. * @n_req_pages: number of requested pages. * @@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); /* * balloon_page_dequeue - removes a page from balloon's page list and returns * its address to allow the driver to release the page. - * @b_dev_info: balloon device decriptor where we will grab a page from. + * @b_dev_info: balloon device descriptor where we will grab a page from. * * Driver must call this function to properly dequeue a previously enqueued page * before definitively releasing it back to the guest system. diff --git a/mm/compaction.c b/mm/compaction.c index 3a6c6b821f80..84fde270ae74 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2012,8 +2012,8 @@ static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) unsigned int wmark_low; /* - * Cap the low watermak to avoid excessive compaction - * activity in case a user sets the proactivess tunable + * Cap the low watermark to avoid excessive compaction + * activity in case a user sets the proactiveness tunable * close to 100 (maximum). */ wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); diff --git a/mm/filemap.c b/mm/filemap.c index 7fadf211643c..66f7e9fdfbc4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2755,7 +2755,7 @@ unsigned int seek_page_size(struct xa_state *xas, struct page *page) * entirely memory-based such as tmpfs, and filesystems which support * unwritten extents. * - * Return: The requested offset on successs, or -ENXIO if @whence specifies + * Return: The requested offset on success, or -ENXIO if @whence specifies * SEEK_DATA and there is no data after @start. There is an implicit hole * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start * and @end contain data. diff --git a/mm/gup.c b/mm/gup.c index aa09535cf4d4..0697134b6a12 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1575,7 +1575,7 @@ finish_or_fault: * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - - * allowing a hole to be left in the corefile to save diskspace. + * allowing a hole to be left in the corefile to save disk space. * * Called without mmap_lock (takes and releases the mmap_lock by itself). */ diff --git a/mm/highmem.c b/mm/highmem.c index e389337e00b4..4fb51d735aa6 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -519,7 +519,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) /* * Disable migration so resulting virtual address is stable - * accross preemption. + * across preemption. */ migrate_disable(); preempt_disable(); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 98456017744d..63ed6b25deaa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1792,8 +1792,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, /* * Returns * - 0 if PMD could not be locked - * - 1 if PMD was locked but protections unchange and TLB flush unnecessary - * - HPAGE_PMD_NR is protections changed and TLB flush necessary + * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary + * - HPAGE_PMD_NR if protections changed and TLB flush necessary */ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) @@ -2469,7 +2469,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, xa_lock(&swap_cache->i_pages); } - /* lock lru list/PageCompound, ref freezed by page_ref_freeze */ + /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ lruvec = lock_page_lruvec(head); for (i = nr - 1; i >= 1; i--) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 629aa4c2259c..3db405dea3dc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -466,7 +466,7 @@ static int allocate_file_region_entries(struct resv_map *resv, resv->region_cache_count; /* At this point, we should have enough entries in the cache - * for all the existings adds_in_progress. We should only be + * for all the existing adds_in_progress. We should only be * needing to allocate for regions_needed. */ VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); @@ -5536,8 +5536,8 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); /* - * vma need span at least one aligned PUD size and the start,end range - * must at least partialy within it. + * vma needs to span at least one aligned PUD size, and the range + * must be at least partially within in. */ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || (*end <= v_start) || (*start >= v_end)) diff --git a/mm/internal.h b/mm/internal.h index feeaaf06705d..54bd0dc2c23c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -334,7 +334,7 @@ static inline bool is_exec_mapping(vm_flags_t flags) } /* - * Stack area - atomatically grows in one direction + * Stack area - automatically grows in one direction * * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: * do_mmap() forbids all other combinations. diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 3820ca54743b..8f450bc28045 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -55,9 +55,9 @@ extern bool kasan_flag_async __ro_after_init; #define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ #ifdef CONFIG_KASAN_HW_TAGS -#define KASAN_TAG_MIN 0xF0 /* mimimum value for random tags */ +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ #else -#define KASAN_TAG_MIN 0x00 /* mimimum value for random tags */ +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ #endif #ifdef CONFIG_KASAN_GENERIC @@ -403,7 +403,7 @@ static inline bool kasan_byte_accessible(const void *addr) #else /* CONFIG_KASAN_HW_TAGS */ /** - * kasan_poison - mark the memory range as unaccessible + * kasan_poison - mark the memory range as inaccessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size, must be aligned to KASAN_GRANULE_SIZE * @value - value that's written to metadata for the range @@ -434,7 +434,7 @@ bool kasan_byte_accessible(const void *addr); /** * kasan_poison_last_granule - mark the last granule of the memory range as - * unaccessible + * inaccessible * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE * @size - range size * diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 728fb24c5683..d8ccff4c1275 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -27,7 +27,7 @@ /* Data structure and operations for quarantine queues. */ /* - * Each queue is a signle-linked list, which also stores the total size of + * Each queue is a single-linked list, which also stores the total size of * objects inside of it. */ struct qlist_head { @@ -138,7 +138,7 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) local_irq_save(flags); /* - * As the object now gets freed from the quaratine, assume that its + * As the object now gets freed from the quarantine, assume that its * free track is no longer valid. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE; diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 727ad4629173..082ee5b6d9a1 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -316,7 +316,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) * // rest of vmalloc process * STORE p, a LOAD shadow(x+99) * - * If there is no barrier between the end of unpoisioning the shadow + * If there is no barrier between the end of unpoisoning the shadow * and the store of the result to p, the stores could be committed * in a different order by CPU#0, and CPU#1 could erroneously observe * poison in the shadow. @@ -384,7 +384,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, * How does this work? * ------------------- * - * We have a region that is page aligned, labelled as A. + * We have a region that is page aligned, labeled as A. * That might not map onto the shadow in a way that is page-aligned: * * start end diff --git a/mm/kfence/report.c b/mm/kfence/report.c index e3f71451ad9e..2a319c21c939 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -263,6 +263,6 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (panic_on_warn) panic("panic_on_warn set ...\n"); - /* We encountered a memory unsafety error, taint the kernel! */ + /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ea74da3232ab..6c0185fdd815 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -667,7 +667,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * * The page table that maps the page has been already unlinked * from the page table tree and this process cannot get - * an additinal pin on the page. + * an additional pin on the page. * * New pins can come later if the page is shared across fork, * but not from this process. The other process cannot write to diff --git a/mm/ksm.c b/mm/ksm.c index b7cbcc7d4977..6bbe314c5260 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1065,7 +1065,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make - * with the pagecount against the mapcount is racey and + * with the pagecount against the mapcount is racy and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check @@ -1435,7 +1435,7 @@ static struct page *stable_node_dup(struct stable_node **_stable_node_dup, */ *_stable_node = found; /* - * Just for robustneess as stable_node is + * Just for robustness, as stable_node is * otherwise left as a stable pointer, the * compiler shall optimize it away at build * time. diff --git a/mm/madvise.c b/mm/madvise.c index 01fef79ac761..63e489e5bfdb 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -799,7 +799,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, if (end > vma->vm_end) { /* * Don't fail if end > vma->vm_end. If the old - * vma was splitted while the mmap_lock was + * vma was split while the mmap_lock was * released the effect of the concurrent * operation may not cause madvise() to * have an undefined result. There may be an @@ -1039,7 +1039,7 @@ process_madvise_behavior_valid(int behavior) * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. * MADV_COLD - the application is not expected to use this memory soon, * deactivate pages in this range so that they can be reclaimed - * easily if memory pressure hanppens. + * easily if memory pressure happens. * MADV_PAGEOUT - the application is not expected to use this memory soon, * page out the pages in this range immediately. * diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3004afb6d090..64ada9e650a5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -215,7 +215,7 @@ enum res_type { #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) -/* Used for OOM nofiier */ +/* Used for OOM notifier */ #define OOM_CONTROL (0) /* @@ -786,7 +786,7 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup * @idx: the event item - * @count: the number of events that occured + * @count: the number of events that occurred */ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) @@ -904,7 +904,7 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) rcu_read_lock(); do { /* - * Page cache insertions can happen withou an + * Page cache insertions can happen without an * actual mm context, e.g. during disk probing * on boot, loopback IO, acct() writes etc. */ @@ -1712,7 +1712,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) struct mem_cgroup *iter; /* - * Be careful about under_oom underflows becase a child memcg + * Be careful about under_oom underflows because a child memcg * could have been added after mem_cgroup_mark_under_oom. */ spin_lock(&memcg_oom_lock); @@ -1884,7 +1884,7 @@ bool mem_cgroup_oom_synchronize(bool handle) /* * There is no guarantee that an OOM-lock contender * sees the wakeups triggered by the OOM kill - * uncharges. Wake any sleepers explicitely. + * uncharges. Wake any sleepers explicitly. */ memcg_oom_recover(memcg); } @@ -4364,7 +4364,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, * Foreign dirty flushing * * There's an inherent mismatch between memcg and writeback. The former - * trackes ownership per-page while the latter per-inode. This was a + * tracks ownership per-page while the latter per-inode. This was a * deliberate design decision because honoring per-page ownership in the * writeback path is complicated, may lead to higher CPU and IO overheads * and deemed unnecessary given that write-sharing an inode across @@ -4379,9 +4379,9 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, * triggering background writeback. A will be slowed down without a way to * make writeback of the dirty pages happen. * - * Conditions like the above can lead to a cgroup getting repatedly and + * Conditions like the above can lead to a cgroup getting repeatedly and * severely throttled after making some progress after each - * dirty_expire_interval while the underyling IO device is almost + * dirty_expire_interval while the underlying IO device is almost * completely idle. * * Solving this problem completely requires matching the ownership tracking @@ -5774,7 +5774,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) return 0; /* - * We are now commited to this value whatever it is. Changes in this + * We are now committed to this value whatever it is. Changes in this * tunable will only affect upcoming migrations, not the current one. * So we need to save it, and keep it going. */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index bd3945446d47..85ad98c00fd9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -75,7 +75,7 @@ static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, boo if (dissolve_free_huge_page(page) || !take_page_off_buddy(page)) /* * We could fail to take off the target page from buddy - * for example due to racy page allocaiton, but that's + * for example due to racy page allocation, but that's * acceptable because soft-offlined page is not broken * and if someone really want to use it, they should * take it. diff --git a/mm/memory.c b/mm/memory.c index 8c491f813687..730daa00952b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3727,7 +3727,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) return ret; /* - * Archs like ppc64 need additonal space to store information + * Archs like ppc64 need additional space to store information * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { @@ -4503,7 +4503,7 @@ retry_pud: } /** - * mm_account_fault - Do page fault accountings + * mm_account_fault - Do page fault accounting * * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting * of perf event counters, but we'll still do the per-task accounting to @@ -4512,9 +4512,9 @@ retry_pud: * @flags: the fault flags. * @ret: the fault retcode. * - * This will take care of most of the page fault accountings. Meanwhile, it + * This will take care of most of the page fault accounting. Meanwhile, it * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter - * updates. However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should + * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should * still be in per-arch page fault handlers at the entry of page fault. */ static inline void mm_account_fault(struct pt_regs *regs, @@ -4848,7 +4848,7 @@ out: /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access - * @addr: userspace addres, not relative offset within @vma + * @addr: userspace address, not relative offset within @vma * @buf: buffer to read/write * @len: length of transfer * @write: set to FOLL_WRITE when writing, otherwise reading diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3ebe2cfc64af..5690513c5668 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1867,7 +1867,7 @@ static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->v.nodes is intersect with node_states[N_MEMORY]. - * so if the following test faile, it implies + * so if the following test fails, it implies * policy->v.nodes has movable memory only. */ if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) @@ -2098,7 +2098,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask) * * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default * policy. Otherwise, check for intersection between mask and the policy - * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' + * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local' * policy, always return true since it may allocate elsewhere on fallback. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. diff --git a/mm/migrate.c b/mm/migrate.c index 6b37d00890ca..b234c3f3acb7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2779,11 +2779,11 @@ restore: * * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus - * allowing the caller to allocate device memory for those unback virtual - * address. For this the caller simply has to allocate device memory and + * allowing the caller to allocate device memory for those unbacked virtual + * addresses. For this the caller simply has to allocate device memory and * properly set the destination entry like for regular migration. Note that - * this can still fails and thus inside the device driver must check if the - * migration was successful for those entries after calling migrate_vma_pages() + * this can still fail, and thus inside the device driver you must check if the + * migration was successful for those entries after calling migrate_vma_pages(), * just like for regular migration. * * After that, the callers must call migrate_vma_pages() to go over each entry diff --git a/mm/mmap.c b/mm/mmap.c index c1b848fa7da6..0584e540246e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -612,7 +612,7 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long nr_pages = 0; struct vm_area_struct *vma; - /* Find first overlaping mapping */ + /* Find first overlapping mapping */ vma = find_vma_intersection(mm, addr, end); if (!vma) return 0; @@ -2875,7 +2875,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas - * will remain splitted, but userland will get a + * will remain split, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first diff --git a/mm/mprotect.c b/mm/mprotect.c index 94188df1ee55..e7a443157988 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -699,7 +699,7 @@ SYSCALL_DEFINE1(pkey_free, int, pkey) mmap_write_unlock(current->mm); /* - * We could provie warnings or errors if any VMA still + * We could provide warnings or errors if any VMA still * has the pkey set here. */ return ret; diff --git a/mm/mremap.c b/mm/mremap.c index d22629ff8f3c..47c255b60150 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -730,7 +730,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, * So, to avoid such scenario we can pre-compute if the whole * operation has high chances to success map-wise. * Worst-scenario case is when both vma's (new_addr and old_addr) get - * split in 3 before unmaping it. + * split in 3 before unmapping it. * That means 2 more maps (1 for each) to the ones we already hold. * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3df2ac6b8686..eefd3f5fde46 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -74,7 +74,7 @@ static inline bool is_memcg_oom(struct oom_control *oc) #ifdef CONFIG_NUMA /** - * oom_cpuset_eligible() - check task eligiblity for kill + * oom_cpuset_eligible() - check task eligibility for kill * @start: task struct of which task to consider * @oc: pointer to struct oom_control * diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5e761fb62800..0062d5c57d41 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1806,7 +1806,7 @@ pause: break; /* - * In the case of an unresponding NFS server and the NFS dirty + * In the case of an unresponsive NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * @@ -2216,7 +2216,7 @@ int write_cache_pages(struct address_space *mapping, * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no - * real expectation of this data interity operation + * real expectation of this data integrity operation * even if there is now a new, dirty page at the same * pagecache address. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bcdc0c6f21f1..0582c85da08c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -893,7 +893,7 @@ compaction_capture(struct capture_control *capc, struct page *page, return false; /* - * Do not let lower order allocations polluate a movable pageblock. + * Do not let lower order allocations pollute a movable pageblock. * This might let an unmovable request use a reclaimable pageblock * and vice-versa but no more than normal fallback logic which can * have trouble finding a high-order free page. @@ -2776,7 +2776,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, /* * In page freeing path, migratetype change is racy so * we can counter several free pages in a pageblock - * in this loop althoug we changed the pageblock type + * in this loop although we changed the pageblock type * from highatomic to ac->migratetype. So we should * adjust the count once. */ @@ -3080,7 +3080,7 @@ static void drain_local_pages_wq(struct work_struct *work) * drain_all_pages doesn't use proper cpu hotplug protection so * we can race with cpu offline when the WQ can move this from * a cpu pinned worker to an unbound one. We can operate on a different - * cpu which is allright but we also have to make sure to not move to + * cpu which is alright but we also have to make sure to not move to * a different one. */ preempt_disable(); @@ -5929,7 +5929,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) static int __parse_numa_zonelist_order(char *s) { /* - * We used to support different zonlists modes but they turned + * We used to support different zonelists modes but they turned * out to be just not useful. Let's keep the warning in place * if somebody still use the cmd line parameter so that we do * not fail it silently @@ -7670,7 +7670,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid) } /* - * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For + * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For * such cases we allow max_zone_pfn sorted in the descending order */ bool __weak arch_has_descending_max_zone_pfns(void) @@ -8728,7 +8728,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate - * @migratetype: migratetype of the underlaying pageblocks (either + * @migratetype: migratetype of the underlying pageblocks (either * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. @@ -8988,7 +8988,7 @@ EXPORT_SYMBOL(free_contig_range); /* * The zone indicated has a new number of managed_pages; batch sizes and percpu - * page high values need to be recalulated. + * page high values need to be recalculated. */ void __meminit zone_pcp_update(struct zone *zone) { diff --git a/mm/page_owner.c b/mm/page_owner.c index 9661d5320a07..adfabb560eb9 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -233,7 +233,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) /* * We don't clear the bit on the oldpage as it's going to be freed * after migration. Until then, the info can be useful in case of - * a bug, and the overal stats will be off a bit only temporarily. + * a bug, and the overall stats will be off a bit only temporarily. * Also, migrate_misplaced_transhuge_page() can still fail the * migration and then we want the oldpage to retain the info. But * in that case we also don't need to explicitly clear the info from diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index 095d7eaa0db4..ae26b118e246 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -170,7 +170,7 @@ struct percpu_stats { u64 nr_max_alloc; /* max # of live allocations */ u32 nr_chunks; /* current # of live chunks */ u32 nr_max_chunks; /* max # of live chunks */ - size_t min_alloc_size; /* min allocaiton size */ + size_t min_alloc_size; /* min allocation size */ size_t max_alloc_size; /* max allocation size */ }; diff --git a/mm/percpu.c b/mm/percpu.c index 23308113a5ff..f99e9306b939 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1862,7 +1862,7 @@ fail: pr_info("limit reached, disable warning\n"); } if (is_atomic) { - /* see the flag handling in pcpu_blance_workfn() */ + /* see the flag handling in pcpu_balance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); } else { diff --git a/mm/pgalloc-track.h b/mm/pgalloc-track.h index 1dcc865029a2..e9e879de8649 100644 --- a/mm/pgalloc-track.h +++ b/mm/pgalloc-track.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_PGALLLC_TRACK_H -#define _LINUX_PGALLLC_TRACK_H +#ifndef _LINUX_PGALLOC_TRACK_H +#define _LINUX_PGALLOC_TRACK_H #if defined(CONFIG_MMU) static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, @@ -48,4 +48,4 @@ static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ NULL: pte_offset_kernel(pmd, address)) -#endif /* _LINUX_PGALLLC_TRACK_H */ +#endif /* _LINUX_PGALLOC_TRACK_H */ diff --git a/mm/slab.c b/mm/slab.c index d56607a80fa6..d0f725637663 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -259,7 +259,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent) #define BATCHREFILL_LIMIT 16 /* - * Optimization question: fewer reaps means less probability for unnessary + * Optimization question: fewer reaps means less probability for unnecessary * cpucache drain/refill cycles. * * OTOH the cpuarrays can contain lots of objects, @@ -2381,8 +2381,8 @@ union freelist_init_state { }; /* - * Initialize the state based on the randomization methode available. - * return true if the pre-computed list is available, false otherwize. + * Initialize the state based on the randomization method available. + * return true if the pre-computed list is available, false otherwise. */ static bool freelist_state_initialize(union freelist_init_state *state, struct kmem_cache *cachep, diff --git a/mm/slub.c b/mm/slub.c index 68123b21e65f..feda53ae62ba 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3391,7 +3391,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); */ /* - * Mininum / Maximum order of slab pages. This influences locking overhead + * Minimum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to * take the list_lock. diff --git a/mm/swap_slots.c b/mm/swap_slots.c index be9de6d5b516..6248d1030a9b 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -16,7 +16,7 @@ * to local caches without needing to acquire swap_info * lock. We do not reuse the returned slots directly but * move them back to the global pool in a batch. This - * allows the slots to coaellesce and reduce fragmentation. + * allows the slots to coalesce and reduce fragmentation. * * The swap entry allocated is marked with SWAP_HAS_CACHE * flag in map_count that prevents it from being allocated diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a7f318c9e426..a13ac524f6ff 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1583,7 +1583,7 @@ static unsigned long lazy_max_pages(void) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* - * Serialize vmap purging. There is no actual criticial section protected + * Serialize vmap purging. There is no actual critical section protected * by this look, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ @@ -2628,7 +2628,7 @@ static void __vfree(const void *addr) * May sleep if called *not* from interrupt context. * Must not be called in NMI context (strictly speaking, it could be * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling - * conventions for vfree() arch-depenedent would be a really bad idea). + * conventions for vfree() arch-dependent would be a really bad idea). */ void vfree(const void *addr) { @@ -3141,7 +3141,7 @@ static int aligned_vread(char *buf, char *addr, unsigned long count) /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add - * overhead of vmalloc()/vfree() calles for this _debug_ + * overhead of vmalloc()/vfree() calls for this _debug_ * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ diff --git a/mm/vmstat.c b/mm/vmstat.c index 5ba118521ded..cccee36b289c 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -934,7 +934,7 @@ void cpu_vm_stats_fold(int cpu) /* * this is only called if !populated_zone(zone), which implies no other users of - * pset->vm_stat_diff[] exsist. + * pset->vm_stat_diff[] exist. */ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { diff --git a/mm/zpool.c b/mm/zpool.c index 5ed71207ced7..6d9ed48141e5 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -336,7 +336,7 @@ int zpool_shrink(struct zpool *zpool, unsigned int pages, * This may hold locks, disable interrupts, and/or preemption, * and the zpool_unmap_handle() must be called to undo those * actions. The code that uses the mapped handle should complete - * its operatons on the mapped handle memory quickly and unmap + * its operations on the mapped handle memory quickly and unmap * as soon as possible. As the implementation may use per-cpu * data, multiple handles should not be mapped concurrently on * any cpu. diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5004c176b045..19b563bc6c48 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1227,7 +1227,7 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages); * zs_map_object - get address of allocated object from handle. * @pool: pool from which the object was allocated * @handle: handle returned from zs_malloc - * @mm: maping mode to use + * @mm: mapping mode to use * * Before using an object allocated from zs_malloc, it must be mapped using * this function. When done with the object, it must be unmapped using -- cgit v1.2.3