diff options
Diffstat (limited to 'arch/powerpc/include/asm/book3s/32/pgtable.h')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 46 |
1 files changed, 15 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index c21d33704633..49d76adb9bc5 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -10,9 +10,9 @@ /* And here we include common definitions */ #define _PAGE_KERNEL_RO 0 -#define _PAGE_KERNEL_ROX 0 +#define _PAGE_KERNEL_ROX (_PAGE_EXEC) #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) -#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) #define _PAGE_HPTEFLAGS _PAGE_HASHPTE @@ -66,11 +66,11 @@ static inline bool pte_user(pte_t pte) */ #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) @@ -318,7 +318,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, int psize) { unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); pte_update(ptep, 0, set); @@ -328,24 +328,10 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) -/* - * Note that on Book E processors, the pmd contains the kernel virtual - * (lowmem) address of the pte page. The physical address is less useful - * because everything runs with translation enabled (even the TLB miss - * handler). On everything else the pmd contains the physical address - * of the pte page. -- paulus - */ -#ifndef CONFIG_BOOKE #define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) + ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) -#else -#define pmd_page_vaddr(pmd) \ - ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) -#define pmd_page(pmd) \ - pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) -#endif /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) @@ -360,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir, addr) \ - ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) + ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ + (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) #define pte_unmap(pte) kunmap_atomic(pte) /* @@ -384,7 +371,7 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline bool pte_exec(pte_t pte) { return true; } +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } static inline int pte_present(pte_t pte) { @@ -451,7 +438,7 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_exprotect(pte_t pte) { - return pte; + return __pte(pte_val(pte) & ~_PAGE_EXEC); } static inline pte_t pte_mkclean(pte_t pte) @@ -466,7 +453,7 @@ static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkexec(pte_t pte) { - return pte; + return __pte(pte_val(pte) | _PAGE_EXEC); } static inline pte_t pte_mkpte(pte_t pte) @@ -524,7 +511,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, int percpu) { -#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) +#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the * helper pte_update() which does an atomic update. We need to do that * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a @@ -537,7 +524,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, else pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#elif defined(CONFIG_PTE_64BIT) /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. This is possible because we take care, @@ -560,7 +547,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); -#elif defined(CONFIG_PPC_STD_MMU_32) +#else /* Third case is 32-bit hash table in UP mode, we need to preserve * the _PAGE_HASHPTE bit since we may not have invalidated the previous * translation in the hash yet (done in a subsequent flush_tlb_xxx()) @@ -568,9 +555,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | (pte_val(pte) & ~_PAGE_HASHPTE)); - -#else -#error "Not supported " #endif } |