diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> | 2018-10-17 12:39:21 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2018-10-18 16:56:17 +0300 |
commit | b9fb4480a3af85552d88561a2fea9c4d5f54c917 (patch) | |
tree | ee1296843e5688479f98f103e26fec33e6f30103 /arch/powerpc/include/asm/book3s | |
parent | 4ffe713b7587b14695c9bec26a000fc88ef54895 (diff) | |
download | linux-b9fb4480a3af85552d88561a2fea9c4d5f54c917.tar.xz |
powerpc/mm: Make pte_pgprot return all pte bits
Other archs do the same and instead of adding required pte bits (which
got masked out) in __ioremap_at(), make sure we filter only pfn bits
out.
Fixes: 26973fa5ac0e ("powerpc/mm: use pte helpers in generic code")
Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 8 |
2 files changed, 0 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 0fbd4c642b51..e61dd3ae5bc0 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -48,11 +48,6 @@ static inline bool pte_user(pte_t pte) #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \ _PAGE_ACCESSED | _PAGE_SPECIAL) -/* Mask of bits returned by pte_pgprot() */ -#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ - _PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \ - _PAGE_RW | _PAGE_DIRTY) - /* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable @@ -396,7 +391,6 @@ static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSE static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } static inline bool pte_exec(pte_t pte) { return true; } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } static inline int pte_present(pte_t pte) { diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 9db2b8eba61d..d33421648d39 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -129,13 +129,6 @@ #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) /* - * Mask of bits returned by pte_pgprot() - */ -#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \ - H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \ - _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \ - _PAGE_SOFT_DIRTY | H_PTE_PKEY) -/* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable * pages. We always set _PAGE_COHERENT when SMP is enabled or @@ -496,7 +489,6 @@ static inline bool pte_exec(pte_t pte) return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC)); } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline bool pte_soft_dirty(pte_t pte) |