diff options
author | Mike Rapoport <rppt@linux.ibm.com> | 2020-06-09 07:33:05 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 19:39:13 +0300 |
commit | e05c7b1f2bc4b7b28199b9a7572f73436d97317e (patch) | |
tree | e0aafcf941689ba227fe55e2af9e915925c812cd /arch/powerpc | |
parent | 88107d330de4f175705a3ea03147feb0d7e68499 (diff) | |
download | linux-e05c7b1f2bc4b7b28199b9a7572f73436d97317e.tar.xz |
mm: pgtable: add shortcuts for accessing kernel PMD and PTE
The powerpc 32-bit implementation of pgtable has nice shortcuts for
accessing kernel PMD and PTE for a given virtual address. Make these
helpers available for all architectures.
[rppt@linux.ibm.com: microblaze: fix page table traversal in setup_rt_frame()]
Link: http://lkml.kernel.org/r/20200518191511.GD1118872@kernel.org
[akpm@linux-foundation.org: s/pmd_ptr_k/pmd_off_k/ in various powerpc places]
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-9-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s32/mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s32/tlb.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/kasan/8xx.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/kasan/book3s_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/kasan/kasan_init_32.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/40x.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/8xx.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 4 |
9 files changed, 15 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f74ef41920b7..3f50dc4c2d80 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -41,25 +41,6 @@ struct mm_struct; #ifndef __ASSEMBLY__ -#ifdef CONFIG_PPC32 -static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va) -{ - return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); -} - -static inline pmd_t *pmd_ptr_k(unsigned long va) -{ - return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); -} - -static inline pte_t *virt_to_kpte(unsigned long vaddr) -{ - pmd_t *pmd = pmd_ptr_k(vaddr); - - return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); -} -#endif - #include <asm/tlbflush.h> /* Keep these as a macros to avoid include dependency mess */ diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index a6dcc708eee3..03b6ba54460e 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -320,7 +320,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) if (!Hash) return; - pmd = pmd_ptr(mm, ea); + pmd = pmd_off(mm, ea); if (!pmd_none(*pmd)) add_hash_page(mm->context.id, ea, pmd_val(*pmd)); } diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c index dc9039a170aa..157f98f6aea9 100644 --- a/arch/powerpc/mm/book3s32/tlb.c +++ b/arch/powerpc/mm/book3s32/tlb.c @@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, if (start >= end) return; end = (end - 1) | ~PAGE_MASK; - pmd = pmd_ptr(mm, start); + pmd = pmd_off(mm, start); for (;;) { pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; if (pmd_end > end) @@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) return; } mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; - pmd = pmd_ptr(mm, vmaddr); + pmd = pmd_off(mm, vmaddr); if (!pmd_none(*pmd)) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); } diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c index db4ef44af22f..569d98a41881 100644 --- a/arch/powerpc/mm/kasan/8xx.c +++ b/arch/powerpc/mm/kasan/8xx.c @@ -10,7 +10,7 @@ static int __init kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block) { - pmd_t *pmd = pmd_ptr_k(k_start); + pmd_t *pmd = pmd_off_k(k_start); unsigned long k_cur, k_next; for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) { @@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size) return ret; for (; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c index 4bc491a4a1fd..a32b4640b9de 100644 --- a/arch/powerpc/mm/kasan/book3s_32.c +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size) kasan_update_early_region(k_start, k_cur, __pte(0)); for (; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index c42085801c04..0760e1e754e4 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -33,7 +33,7 @@ int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_ pmd_t *pmd; unsigned long k_cur, k_next; - pmd = pmd_ptr_k(k_start); + pmd = pmd_off_k(k_start); for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { pte_t *new; @@ -69,7 +69,7 @@ int __init __weak kasan_init_region(void *start, size_t size) return -ENOMEM; for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); @@ -86,7 +86,7 @@ kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) phys_addr_t pa = __pa(kasan_early_shadow_page); for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { - pmd_t *pmd = pmd_ptr_k(k_cur); + pmd_t *pmd = pmd_off_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) @@ -184,7 +184,7 @@ void __init kasan_early_init(void) unsigned long addr = KASAN_SHADOW_START; unsigned long end = KASAN_SHADOW_END; unsigned long next; - pmd_t *pmd = pmd_ptr_k(addr); + pmd_t *pmd = pmd_off_k(addr); BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK); diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c index 7c1aedd4c867..13e74bc39ba5 100644 --- a/arch/powerpc/mm/nohash/40x.c +++ b/arch/powerpc/mm/nohash/40x.c @@ -103,7 +103,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW; - pmdp = pmd_ptr_k(v); + pmdp = pmd_off_k(v); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); *pmdp++ = __pmd(val); @@ -118,7 +118,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW; - pmdp = pmd_ptr_k(v); + pmdp = pmd_off_k(v); *pmdp = __pmd(val); v += LARGE_PAGE_SIZE_4M; diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 286441bbbe49..92e8929cbe3e 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, pgprot_t prot, int psize, bool new) { - pmd_t *pmdp = pmd_ptr_k(va); + pmd_t *pmdp = pmd_off_k(va); pte_t *ptep; if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 9c0063b851ff..6eb4eab79385 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -40,7 +40,7 @@ notrace void __init early_ioremap_init(void) { unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); pte_t *ptep = (pte_t *)early_fixmap_pagetable; - pmd_t *pmdp = pmd_ptr_k(addr); + pmd_t *pmdp = pmd_off_k(addr); for (; (s32)(FIXADDR_TOP - addr) > 0; addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) @@ -78,7 +78,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ - pd = pmd_ptr_k(va); + pd = pmd_off_k(va); /* Use middle 10 bits of VA to index the second-level map */ if (likely(slab_is_available())) pg = pte_alloc_kernel(pd, va); |