diff options
Diffstat (limited to 'arch')
65 files changed, 205 insertions, 117 deletions
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index 8ebacf37a8cf..1f13995d00d7 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -490,3 +490,4 @@ 558 common process_mrelease sys_process_mrelease 559 common futex_waitv sys_futex_waitv 560 common set_mempolicy_home_node sys_ni_syscall +561 common cachestat sys_cachestat diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index e4c2677cc1e9..2f6163f05e93 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -74,6 +74,9 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) return 0; pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); + if (unlikely(!pte)) + return 0; + if (unlikely(!pte_present(*pte) || !pte_young(*pte) || !pte_write(*pte) || !pte_dirty(*pte))) { pte_unmap_unlock(pte, ptl); diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 0e49154454a6..ca5302b0b7ee 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -117,8 +117,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, * must use the nested version. This also means we need to * open-code the spin-locking. */ - ptl = pte_lockptr(vma->vm_mm, pmd); pte = pte_offset_map(pmd, address); + if (!pte) + return 0; + + ptl = pte_lockptr(vma->vm_mm, pmd); do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2418f1efabd8..83598649a094 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -85,6 +85,9 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) break; pte = pte_offset_map(pmd, addr); + if (!pte) + break; + pr_cont(", *pte=%08llx", (long long)pte_val(*pte)); #ifndef CONFIG_ARM_LPAE pr_cont(", *ppte=%08llx", diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index ac964612d8b0..8ebed8a13874 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -464,3 +464,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a05652ac66ce..d91829189dd4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -120,6 +120,7 @@ config ARM64 select CRC32 select DCACHE_WORD_ACCESS select DYNAMIC_FTRACE if FUNCTION_TRACER + select DMA_BOUNCE_UNALIGNED_KMALLOC select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index a51e6e8f3171..ceb368d33bf4 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -33,6 +33,7 @@ * the CPU. */ #define ARCH_DMA_MINALIGN (128) +#define ARCH_KMALLOC_MINALIGN (8) #ifndef __ASSEMBLY__ @@ -90,6 +91,8 @@ static inline int cache_line_size_of_cpu(void) int cache_line_size(void); +#define dma_get_cache_alignment cache_line_size + /* * Read the effective value of CTR_EL0. * diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 037feba03a51..64a514f90131 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -39,7 +39,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 451 +#define __NR_compat_syscalls 452 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 604a2053d006..d952a28463e0 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -907,6 +907,8 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) +#define __NR_cachestat 451 +__SYSCALL(__NR_cachestat, sys_cachestat) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 7e89968bd282..4c5ef9b20065 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -416,10 +416,9 @@ long get_mte_ctrl(struct task_struct *task) static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, struct iovec *kiov, unsigned int gup_flags) { - struct vm_area_struct *vma; void __user *buf = kiov->iov_base; size_t len = kiov->iov_len; - int ret; + int err = 0; int write = gup_flags & FOLL_WRITE; if (!access_ok(buf, len)) @@ -429,14 +428,16 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, return -EIO; while (len) { + struct vm_area_struct *vma; unsigned long tags, offset; void *maddr; - struct page *page = NULL; + struct page *page = get_user_page_vma_remote(mm, addr, + gup_flags, &vma); - ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, - &vma, NULL); - if (ret <= 0) + if (IS_ERR_OR_NULL(page)) { + err = page == NULL ? -EIO : PTR_ERR(page); break; + } /* * Only copy tags if the page has been mapped as PROT_MTE @@ -446,7 +447,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, * was never mapped with PROT_MTE. */ if (!(vma->vm_flags & VM_MTE)) { - ret = -EOPNOTSUPP; + err = -EOPNOTSUPP; put_page(page); break; } @@ -479,7 +480,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, kiov->iov_len = buf - kiov->iov_base; if (!kiov->iov_len) { /* check for error accessing the tracee's address space */ - if (ret <= 0) + if (err) return -EIO; else return -EFAULT; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 794a2dd3659a..8b70759cdbb9 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -1103,7 +1103,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr) bool recover = esr & KASAN_ESR_RECOVER; bool write = esr & KASAN_ESR_WRITE; size_t size = KASAN_ESR_SIZE(esr); - u64 addr = regs->regs[0]; + void *addr = (void *)regs->regs[0]; u64 pc = regs->pc; kasan_report(addr, size, write, pc); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c85b6d70b222..c60100791bcc 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -188,6 +188,9 @@ static void show_pte(unsigned long addr) break; ptep = pte_offset_map(pmdp, addr); + if (!ptep) + break; + pte = READ_ONCE(*ptep); pr_cont(", pte=%016llx", pte_val(pte)); pte_unmap(ptep); @@ -328,7 +331,7 @@ static void report_tag_fault(unsigned long addr, unsigned long esr, * find out access size. */ bool is_write = !!(esr & ESR_ELx_WNR); - kasan_report(addr, 0, is_write, regs->pc); + kasan_report((void *)addr, 0, is_write, regs->pc); } #else /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 95364e8bdc19..21716c940682 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -307,14 +307,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; WARN_ON(addr & (sz - 1)); - /* - * Note that if this code were ever ported to the - * 32-bit arm platform then it will cause trouble in - * the case where CONFIG_HIGHPTE is set, since there - * will be no pte_unmap() to correspond with this - * pte_alloc_map(). - */ - ptep = pte_alloc_map(mm, pmdp, addr); + ptep = pte_alloc_huge(mm, pmdp, addr); } else if (sz == PMD_SIZE) { if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) ptep = huge_pmd_share(mm, vma, addr, pudp); @@ -366,7 +359,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return (pte_t *)pmdp; if (sz == CONT_PTE_SIZE) - return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK)); + return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK)); return NULL; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index c28c2c8483cc..d31c3a9290c5 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -466,7 +466,12 @@ void __init bootmem_init(void) */ void __init mem_init(void) { - swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE); + bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit); + + if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) + swiotlb = true; + + swiotlb_init(swiotlb, SWIOTLB_VERBOSE); /* this will put all unused low memory onto the freelists */ memblock_free_all(); diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 72c929d9902b..f8c74ffeeefb 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -371,3 +371,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 78a02e026164..adc49f2d22e8 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -41,7 +41,7 @@ huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) - pte = pte_alloc_map(mm, pmd, taddr); + pte = pte_alloc_huge(mm, pmd, taddr); } return pte; } @@ -64,7 +64,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz) if (pud_present(*pud)) { pmd = pmd_offset(pud, taddr); if (pmd_present(*pmd)) - pte = pte_offset_map(pmd, taddr); + pte = pte_offset_huge(pmd, taddr); } } } diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h index 8ed6ac14d99f..141bbdfad960 100644 --- a/arch/m68k/include/asm/mmu_context.h +++ b/arch/m68k/include/asm/mmu_context.h @@ -99,7 +99,7 @@ static inline void load_ksp_mmu(struct task_struct *task) p4d_t *p4d; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; unsigned long mmuar; local_irq_save(flags); @@ -139,7 +139,7 @@ static inline void load_ksp_mmu(struct task_struct *task) pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); - if (pte_none(*pte) || !pte_present(*pte)) + if (!pte || pte_none(*pte) || !pte_present(*pte)) goto bug; set_pte(pte, pte_mkyoung(*pte)); @@ -161,6 +161,8 @@ static inline void load_ksp_mmu(struct task_struct *task) bug: pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar); end: + if (pte && mmuar < PAGE_OFFSET) + pte_unmap(pte); local_irq_restore(flags); } diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index bd0274c7592e..c586034d2a7a 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -488,6 +488,8 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, if (!pmd_present(*pmd)) goto bad_access; pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); + if (!pte) + goto bad_access; if (!pte_present(*pte) || !pte_dirty(*pte) || !pte_write(*pte)) { pte_unmap_unlock(pte, ptl); diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index b1f3940bc298..4f504783371f 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -450,3 +450,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 70aa0979e027..42f45abea37a 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -91,7 +91,8 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) p4d_t *p4d; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; + int ret = -1; int asid; local_irq_save(flags); @@ -100,47 +101,33 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) regs->pc + (extension_word * sizeof(long)); mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; - if (!mm) { - local_irq_restore(flags); - return -1; - } + if (!mm) + goto out; pgd = pgd_offset(mm, mmuar); - if (pgd_none(*pgd)) { - local_irq_restore(flags); - return -1; - } + if (pgd_none(*pgd)) + goto out; p4d = p4d_offset(pgd, mmuar); - if (p4d_none(*p4d)) { - local_irq_restore(flags); - return -1; - } + if (p4d_none(*p4d)) + goto out; pud = pud_offset(p4d, mmuar); - if (pud_none(*pud)) { - local_irq_restore(flags); - return -1; - } + if (pud_none(*pud)) + goto out; pmd = pmd_offset(pud, mmuar); - if (pmd_none(*pmd)) { - local_irq_restore(flags); - return -1; - } + if (pmd_none(*pmd)) + goto out; pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); - if (pte_none(*pte) || !pte_present(*pte)) { - local_irq_restore(flags); - return -1; - } + if (!pte || pte_none(*pte) || !pte_present(*pte)) + goto out; if (write) { - if (!pte_write(*pte)) { - local_irq_restore(flags); - return -1; - } + if (!pte_write(*pte)) + goto out; set_pte(pte, pte_mkdirty(*pte)); } @@ -161,9 +148,12 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word) mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); else mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); - + ret = 0; +out: + if (pte && !KMAPAREA(mmuar)) + pte_unmap(pte); local_irq_restore(flags); - return 0; + return ret; } void __init cf_bootmem_alloc(void) diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h index a149b3e711ec..1903988b9e23 100644 --- a/arch/microblaze/include/asm/cache.h +++ b/arch/microblaze/include/asm/cache.h @@ -18,4 +18,9 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES +/* MS be sure that SLAB allocates aligned objects */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + +#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES + #endif /* _ASM_MICROBLAZE_CACHE_H */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 7b9861bcd458..337f23eabc71 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -30,11 +30,6 @@ #ifndef __ASSEMBLY__ -/* MS be sure that SLAB allocates aligned objects */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES - -#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES - /* * PAGE_OFFSET -- the first address of the first page of memory. With MMU * it is set to the kernel start address (aligned on a page boundary). diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index c3aebec71c0c..c78a0ff48066 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -194,7 +194,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, preempt_disable(); ptep = pte_offset_map(pmdp, address); - if (pte_present(*ptep)) { + if (ptep && pte_present(*ptep)) { address = (unsigned long) page_address(pte_page(*ptep)); /* MS: I need add offset in page */ address += ((unsigned long)frame->tramp) & ~PAGE_MASK; @@ -203,7 +203,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, invalidate_icache_range(address, address + 8); flush_dcache_range(address, address + 8); } - pte_unmap(ptep); + if (ptep) + pte_unmap(ptep); preempt_enable(); if (err) return -EFAULT; diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index 820145e47350..858d22bf275c 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -456,3 +456,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 253ff994ed2e..1976317d4e8b 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -389,3 +389,4 @@ 448 n32 process_mrelease sys_process_mrelease 449 n32 futex_waitv sys_futex_waitv 450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node +451 n32 cachestat sys_cachestat diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 3f1886ad9d80..cfda2511badf 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -365,3 +365,4 @@ 448 n64 process_mrelease sys_process_mrelease 449 n64 futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 n64 cachestat sys_cachestat diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 8f243e35a7b2..7692234c3768 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -438,3 +438,4 @@ 448 o32 process_mrelease sys_process_mrelease 449 o32 futex_waitv sys_futex_waitv 450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node +451 o32 cachestat sys_cachestat diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 1b939abbe4ca..93c2d695588a 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -297,7 +297,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; - pte_t *ptep; + pte_t *ptep, *ptemap = NULL; int idx, pid; /* @@ -344,7 +344,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) } else #endif { - ptep = pte_offset_map(pmdp, address); + ptemap = ptep = pte_offset_map(pmdp, address); + /* + * update_mmu_cache() is called between pte_offset_map_lock() + * and pte_unmap_unlock(), so we can assume that ptep is not + * NULL here: and what should be done below if it were NULL? + */ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #ifdef CONFIG_XPA @@ -373,6 +378,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlbw_use_hazard(); htw_start(); flush_micro_tlb_vm(vma); + + if (ptemap) + pte_unmap(ptemap); local_irq_restore(flags); } diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index ca4a302d4365..501160250bb7 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -426,10 +426,15 @@ void flush_dcache_page(struct page *page) offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; if (parisc_requires_coherency()) { + bool needs_flush = false; pte_t *ptep; ptep = get_ptep(mpnt->vm_mm, addr); - if (ptep && pte_needs_flush(*ptep)) + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pte_unmap(ptep); + } + if (needs_flush) flush_user_cache_page(mpnt, addr); } else { /* @@ -561,14 +566,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr); static void flush_cache_page_if_present(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); + bool needs_flush = false; + pte_t *ptep; /* * The pte check is racy and sometimes the flush will trigger * a non-access TLB miss. Hopefully, the page has already been * flushed. */ - if (ptep && pte_needs_flush(*ptep)) + ptep = get_ptep(vma->vm_mm, vmaddr); + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pte_unmap(ptep); + } + if (needs_flush) flush_cache_page(vma, vmaddr, pfn); } @@ -635,17 +646,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, u pte_t *ptep; for (addr = start; addr < end; addr += PAGE_SIZE) { + bool needs_flush = false; /* * The vma can contain pages that aren't present. Although * the pte search is expensive, we need the pte to find the * page pfn and to check whether the page should be flushed. */ ptep = get_ptep(vma->vm_mm, addr); - if (ptep && pte_needs_flush(*ptep)) { + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pfn = pte_pfn(*ptep); + pte_unmap(ptep); + } + if (needs_flush) { if (parisc_requires_coherency()) { flush_user_cache_page(vma, addr); } else { - pfn = pte_pfn(*ptep); if (WARN_ON(!pfn_valid(pfn))) return; __flush_cache_page(vma, addr, PFN_PHYS(pfn)); diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 71ed5391f29d..415f12d5bab3 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -164,7 +164,7 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, pmd_clear(pmd); return; } - pte = pte_offset_map(pmd, vaddr); + pte = pte_offset_kernel(pmd, vaddr); vaddr &= ~PMD_MASK; end = vaddr + size; if (end > PMD_SIZE) diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 0e42fceb2d5e..3c71fad78318 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -448,3 +448,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d1d3990b83f6..a8a1a7c1e16e 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -66,7 +66,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr); } return pte; } @@ -90,7 +90,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!pud_none(*pud)) { pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) - pte = pte_offset_map(pmd, addr); + pte = pte_offset_huge(pmd, addr); } } } diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index ae0a68a838e8..69232231d270 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -33,6 +33,10 @@ #define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT) +#ifdef CONFIG_NOT_COHERENT_CACHE +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#endif + #if !defined(__ASSEMBLY__) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index 56f217606327..b9ac9e3a771c 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -12,10 +12,6 @@ #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 -#ifdef CONFIG_NOT_COHERENT_CACHE -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES -#endif - #if defined(CONFIG_PPC_256K_PAGES) || \ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index a0be127475b1..8c0b08b7a80e 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -537,3 +537,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 461307b89c3a..572707858d65 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -509,7 +509,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, } else { pte_t *pte; - pte = pte_offset_map(p, 0); + pte = pte_offset_kernel(p, 0); kvmppc_unmap_free_pte(kvm, pte, full, lpid); pmd_clear(p); } diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index a64ea0a7ef96..21fcad97ae80 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -239,12 +239,16 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long local_irq_save(flags); arch_enter_lazy_mmu_mode(); start_pte = pte_offset_map(pmd, addr); + if (!start_pte) + goto out; for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { unsigned long pteval = pte_val(*pte); if (pteval & H_PAGE_HASHPTE) hpte_need_flush(mm, addr, pte, pteval, 0); addr += PAGE_SIZE; } + pte_unmap(start_pte); +out: arch_leave_lazy_mmu_mode(); local_irq_restore(flags); } diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 81d7185e2ae8..d19fb1f3007d 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, FOLL_WRITE | FOLL_LONGTERM, - mem->hpages + entry, NULL); + mem->hpages + entry); if (ret == n) { pinned += n; continue; diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index b75a9fb99599..0dc85556dec5 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -71,6 +71,8 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, if (pmd_none(*pmd)) return; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return; arch_enter_lazy_mmu_mode(); for (; npages > 0; --npages) { pte_update(mm, addr, pte, 0, 0, 0); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index b900933507da..f7c683b672c1 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -183,7 +183,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT) - return pte_alloc_map(mm, (pmd_t *)hpdp, addr); + return pte_alloc_huge(mm, (pmd_t *)hpdp, addr); BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 70c4c59a1a8f..fae747cc57d2 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -3376,12 +3376,15 @@ static void show_pte(unsigned long addr) printf("pmdp @ 0x%px = 0x%016lx\n", pmdp, pmd_val(*pmdp)); ptep = pte_offset_map(pmdp, addr); - if (pte_none(*ptep)) { + if (!ptep || pte_none(*ptep)) { + if (ptep) + pte_unmap(ptep); printf("no valid PTE\n"); return; } format_pte(ptep, pte_val(*ptep)); + pte_unmap(ptep); sync(); __delay(200); diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index e0ef56dc57b9..542883b3b49b 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -67,7 +67,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, for_each_napot_order(order) { if (napot_cont_size(order) == sz) { - pte = pte_alloc_map(mm, pmd, addr & napot_cont_mask(order)); + pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); break; } } @@ -114,7 +114,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, for_each_napot_order(order) { if (napot_cont_size(order) == sz) { - pte = pte_offset_kernel(pmd, addr & napot_cont_mask(order)); + pte = pte_offset_huge(pmd, addr & napot_cont_mask(order)); break; } } diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index b68f47541169..a6935af2235c 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -453,3 +453,4 @@ 448 common process_mrelease sys_process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat sys_cachestat diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index cb2ee06df286..3c62d1b218b1 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -294,6 +294,8 @@ again: rc = -ENXIO; ptep = get_locked_pte(gmap->mm, uaddr, &ptelock); + if (!ptep) + goto out; if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) { page = pte_page(*ptep); rc = -EAGAIN; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index da6dac36e959..9bd0a873f3b1 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2777,7 +2777,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) mmap_read_lock(kvm->mm); get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, - &page, NULL, NULL); + &page, NULL); mmap_read_unlock(kvm->mm); return page; } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index dc90d1eb0d55..f4b6fc746fce 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr, /** * gmap_pte_op_end - release the page table lock - * @ptl: pointer to the spinlock pointer + * @ptep: pointer to the locked pte + * @ptl: pointer to the page table spinlock */ -static void gmap_pte_op_end(spinlock_t *ptl) +static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl) { - if (ptl) - spin_unlock(ptl); + pte_unmap_unlock(ptep, ptl); } /** @@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, { int rc; pte_t *ptep; - spinlock_t *ptl = NULL; + spinlock_t *ptl; unsigned long pbits = 0; if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) @@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr, pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0; /* Protect and unlock. */ rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits); - gmap_pte_op_end(ptl); + gmap_pte_op_end(ptep, ptl); return rc; } @@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) /* Do *NOT* clear the _PAGE_INVALID bit! */ rc = 0; } - gmap_pte_op_end(ptl); + gmap_pte_op_end(ptep, ptl); } if (!rc) break; @@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, if (!rc) gmap_insert_rmap(sg, vmaddr, rmap); spin_unlock(&sg->guest_table_lock); - gmap_pte_op_end(ptl); + gmap_pte_op_end(ptep, ptl); } radix_tree_preload_end(); if (rc) { @@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) tptep = (pte_t *) gmap_table_walk(sg, saddr, 0); if (!tptep) { spin_unlock(&sg->guest_table_lock); - gmap_pte_op_end(ptl); + gmap_pte_op_end(sptep, ptl); radix_tree_preload_end(); break; } @@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) rmap = NULL; rc = 0; } - gmap_pte_op_end(ptl); + gmap_pte_op_end(sptep, ptl); spin_unlock(&sg->guest_table_lock); } radix_tree_preload_end(); @@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4], continue; if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep)) set_bit(i, bitmap); - spin_unlock(ptl); + pte_unmap_unlock(ptep, ptl); } } gmap_pmd_op_end(gmap, pmdp); @@ -2537,7 +2537,12 @@ static inline void thp_split_mm(struct mm_struct *mm) * Remove all empty zero pages from the mapping for lazy refaulting * - This must be called after mm->context.has_pgste is set, to avoid * future creation of zero pages - * - This must be called after THP was enabled + * - This must be called after THP was disabled. + * + * mm contracts with s390, that even if mm were to remove a page table, + * racing with the loop below and so causing pte_offset_map_lock() to fail, + * it will never insert a page table containing empty zero pages once + * mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set. */ static int __zap_zero_pages(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) @@ -2549,6 +2554,8 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start, spinlock_t *ptl; ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!ptep) + break; if (is_zero_pfn(pte_pfn(*ptep))) ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID)); pte_unmap_unlock(ptep, ptl); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 6effb24de6d9..3bd2ab2a9a34 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -829,7 +829,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, default: return -EFAULT; } - +again: ptl = pmd_lock(mm, pmdp); if (!pmd_present(*pmdp)) { spin_unlock(ptl); @@ -850,6 +850,8 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, spin_unlock(ptl); ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!ptep) + goto again; new = old = pgste_get_lock(ptep); pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | PGSTE_ACC_BITS | PGSTE_FP_BIT); @@ -938,7 +940,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) default: return -EFAULT; } - +again: ptl = pmd_lock(mm, pmdp); if (!pmd_present(*pmdp)) { spin_unlock(ptl); @@ -955,6 +957,8 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr) spin_unlock(ptl); ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!ptep) + goto again; new = old = pgste_get_lock(ptep); /* Reset guest reference bit only */ pgste_val(new) &= ~PGSTE_GR_BIT; @@ -1000,7 +1004,7 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, default: return -EFAULT; } - +again: ptl = pmd_lock(mm, pmdp); if (!pmd_present(*pmdp)) { spin_unlock(ptl); @@ -1017,6 +1021,8 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, spin_unlock(ptl); ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!ptep) + goto again; pgste = pgste_get_lock(ptep); *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; paddr = pte_val(*ptep) & PAGE_MASK; diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index 32dfa6b82ec6..b38dbc975581 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -14,6 +14,12 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +/* + * Some drivers need to perform DMA into kmalloc'ed buffers + * and so we have to increase the kmalloc minalign for this. + */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES + #define __read_mostly __section(".data..read_mostly") #ifndef __ASSEMBLY__ diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 09ac6c7faee0..62f4b9edcb98 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -174,10 +174,4 @@ typedef struct page *pgtable_t; #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> -/* - * Some drivers need to perform DMA into kmalloc'ed buffers - * and so we have to increase the kmalloc minalign for this. - */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES - #endif /* __ASM_SH_PAGE_H */ diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 2de85c977f54..97377e8c5025 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -453,3 +453,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 999ab5916e69..6cb0ad73dbb9 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -38,7 +38,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_map(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr); } } } @@ -63,7 +63,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (pud) { pmd = pmd_offset(pud, addr); if (pmd) - pte = pte_offset_map(pmd, addr); + pte = pte_offset_huge(pmd, addr); } } } diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index dad38960d1a8..ca450c7bc53f 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -328,6 +328,8 @@ static void flush_signal_insns(unsigned long address) goto out_irqs_on; ptep = pte_offset_map(pmdp, address); + if (!ptep) + goto out_irqs_on; pte = *ptep; if (!pte_present(pte)) goto out_unmap; diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 4398cc6fb68d..faa835f3c54a 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -496,3 +496,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index d91305de694c..d8a407fbe350 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -99,6 +99,7 @@ static unsigned int get_user_insn(unsigned long tpc) local_irq_disable(); pmdp = pmd_offset(pudp, tpc); +again: if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) goto out_irq_enable; @@ -115,6 +116,8 @@ static unsigned int get_user_insn(unsigned long tpc) #endif { ptep = pte_offset_map(pmdp, tpc); + if (!ptep) + goto again; pte = *ptep; if (pte_present(pte)) { pa = (pte_pfn(pte) << PAGE_SHIFT); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index d8e0e3c7038d..d7018823206c 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -298,7 +298,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; if (sz >= PMD_SIZE) return (pte_t *)pmd; - return pte_alloc_map(mm, pmd, addr); + return pte_alloc_huge(mm, pmd, addr); } pte_t *huge_pte_offset(struct mm_struct *mm, @@ -325,7 +325,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return NULL; if (is_hugetlb_pmd(*pmd)) return (pte_t *)pmd; - return pte_offset_map(pmd, addr); + return pte_offset_huge(pmd, addr); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index bf3e6d2fe5d9..133dd42570d6 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -244,7 +244,7 @@ static void *iounit_alloc(struct device *dev, size_t len, long i; pmdp = pmd_off_k(addr); - ptep = pte_offset_map(pmdp, addr); + ptep = pte_offset_kernel(pmdp, addr); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 9e3f6933ca13..3a6caef68348 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -358,7 +358,7 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len, __flush_page_to_ram(page); pmdp = pmd_off_k(addr); - ptep = pte_offset_map(pmdp, addr); + ptep = pte_offset_kernel(pmdp, addr); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); } diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 9a725547578e..7ecf8556947a 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -149,6 +149,8 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, pte_t *pte; pte = pte_offset_map(&pmd, vaddr); + if (!pte) + return; end = vaddr + HPAGE_SIZE; while (vaddr < end) { if (pte_val(*pte) & _PAGE_VALID) { diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 320480a8db4f..bc0a3c941b35 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -455,3 +455,4 @@ 448 i386 process_mrelease sys_process_mrelease 449 i386 futex_waitv sys_futex_waitv 450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node +451 i386 cachestat sys_cachestat diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index c84d12608cd2..227538b0ce80 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -372,6 +372,7 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat # # Due to a historical design error, certain syscalls are numbered differently diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index 21ca0a831b70..5d390df21440 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -214,7 +214,7 @@ static int __sgx_encl_add_page(struct sgx_encl *encl, if (!(vma->vm_flags & VM_MAYEXEC)) return -EACCES; - ret = get_user_pages(src, 1, 0, &src_page, NULL); + ret = get_user_pages(src, 1, 0, &src_page); if (ret < 1) return -EFAULT; diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 525876e7b9f4..adc67f98819a 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -367,8 +367,10 @@ static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) va = (unsigned long)ldt_slot_va(ldt->slot) + offset; ptep = get_locked_pte(mm, va, &ptl); - pte_clear(mm, va, ptep); - pte_unmap_unlock(ptep, ptl); + if (!WARN_ON_ONCE(!ptep)) { + pte_clear(mm, va, ptep); + pte_unmap_unlock(ptep, ptl); + } } va = (unsigned long)ldt_slot_va(ldt->slot); diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index bfe22fd5a1d7..d73aeb16417f 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -188,7 +188,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) if (pmd_large(*pmd)) return; - pte = pte_offset_map(pmd, ppd->vaddr); + pte = pte_offset_kernel(pmd, ppd->vaddr); if (pte_none(*pte)) set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); } diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 52c94ab5c205..2b69c3c035b6 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -421,3 +421,4 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 27a477dae232..0a11fc5f185b 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -179,6 +179,7 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) pud_t *pud; pmd_t *pmd; pte_t *pte; + unsigned int pteval; if (!mm) mm = task->active_mm; @@ -197,7 +198,9 @@ static unsigned get_pte_for_vaddr(unsigned vaddr) pte = pte_offset_map(pmd, vaddr); if (!pte) return 0; - return pte_val(*pte); + pteval = pte_val(*pte); + pte_unmap(pte); + return pteval; } enum { |