diff options
Diffstat (limited to 'arch/powerpc/include/asm/nohash/32/pgtable.h')
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 41 |
1 files changed, 14 insertions, 27 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 03bbd1149530..7c46a98cc7f4 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -133,7 +133,7 @@ extern int icache_44x_need_flush; #ifndef __ASSEMBLY__ #define pte_clear(mm, addr, ptep) \ - do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) + do { pte_update(ptep, ~0, 0); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) @@ -146,21 +146,6 @@ static inline void pmd_clear(pmd_t *pmdp) /* - * When flushing the tlb entry for a page, we also need to flush the hash - * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. - */ -extern int flush_hash_pages(unsigned context, unsigned long va, - unsigned long pmdval, int count); - -/* Add an HPTE to the hash table */ -extern void add_hash_page(unsigned context, unsigned long va, - unsigned long pmdval); - -/* Flush an entry from the TLB/hash table */ -extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, - unsigned long address); - -/* * PTE updates. This function is called whenever an existing * valid PTE is updated. This does -not- include set_pte_at() * which nowadays only sets a new PTE. @@ -246,12 +231,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon { unsigned long old; old = pte_update(ptep, _PAGE_ACCESSED, 0); -#if _PAGE_HASHPTE != 0 - if (old & _PAGE_HASHPTE) { - unsigned long ptephys = __pa(ptep) & PAGE_MASK; - flush_hash_pages(context, addr, ptephys, 1); - } -#endif return (old & _PAGE_ACCESSED) != 0; } #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ @@ -261,7 +240,7 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); + return __pte(pte_update(ptep, ~0, 0)); } #define __HAVE_ARCH_PTEP_SET_WRPROTECT @@ -277,19 +256,27 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, } -static inline void __ptep_set_access_flags(struct mm_struct *mm, +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, - unsigned long address) + unsigned long address, + int psize) { unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA); pte_update(ptep, clr, set); + + flush_tlb_page(vma, address); +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; } #define __HAVE_ARCH_PTE_SAME -#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) +#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0) /* * Note that on Book E processors, the pmd contains the kernel virtual @@ -330,7 +317,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm, /* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry - * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). + * must not include the _PAGE_PRESENT bit. * -- paulus */ #define __swp_type(entry) ((entry).val & 0x1f) |