diff options
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash-4k.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/mmu.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgalloc.h | 26 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_book3s64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-book3s64.c | 11 |
5 files changed, 15 insertions, 44 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 4b5423030d4b..00c4db2a7682 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -38,8 +38,10 @@ #define H_PAGE_4K_PFN 0x0 #define H_PAGE_THP_HUGE 0x0 #define H_PAGE_COMBO 0x0 -#define H_PTE_FRAG_NR 0 -#define H_PTE_FRAG_SIZE_SHIFT 0 + +/* 8 bytes per each pte entry */ +#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3) +#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT) /* memory key bits, only 8 keys supported */ #define H_PTE_PKEY_BIT0 0 diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 5094696eecd6..fde7803a2261 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -134,10 +134,10 @@ typedef struct { #ifdef CONFIG_PPC_SUBPAGE_PROT struct subpage_prot_table spt; #endif /* CONFIG_PPC_SUBPAGE_PROT */ -#ifdef CONFIG_PPC_64K_PAGES - /* for 4K PTE fragment support */ + /* + * pagetable fragment support + */ void *pte_frag; -#endif #ifdef CONFIG_SPAPR_TCE_IOMMU struct list_head iommu_group_mem_list; #endif diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 558a159600ad..826171568192 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -173,31 +173,6 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd) return (pgtable_t)pmd_page_vaddr(pmd); } -#ifdef CONFIG_PPC_4K_PAGES -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); -} - -static inline pgtable_t pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - struct page *page; - pte_t *pte; - - pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); - if (!pte) - return NULL; - page = virt_to_page(pte); - if (!pgtable_page_ctor(page)) { - __free_page(page); - return NULL; - } - return pte; -} -#else /* if CONFIG_PPC_64K_PAGES */ - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { @@ -209,7 +184,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, { return (pgtable_t)pte_fragment_alloc(mm, address, 0); } -#endif static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index b75194dff64c..87ee78973a35 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -159,9 +159,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.id = index; -#ifdef CONFIG_PPC_64K_PAGES mm->context.pte_frag = NULL; -#endif #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_init(mm); #endif @@ -192,7 +190,6 @@ static void destroy_contexts(mm_context_t *ctx) spin_unlock(&mmu_context_lock); } -#ifdef CONFIG_PPC_64K_PAGES static void destroy_pagetable_page(struct mm_struct *mm) { int count; @@ -213,13 +210,6 @@ static void destroy_pagetable_page(struct mm_struct *mm) } } -#else -static inline void destroy_pagetable_page(struct mm_struct *mm) -{ - return; -} -#endif - void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index e4e1b2d4ca27..fc42cccb96c7 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -225,7 +225,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); -#ifdef CONFIG_PPC_64K_PAGES + static pte_t *get_pte_from_cache(struct mm_struct *mm) { void *pte_frag, *ret; @@ -264,7 +264,14 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) return NULL; } + ret = page_address(page); + /* + * if we support only one fragment just return the + * allocated page. + */ + if (PTE_FRAG_NR == 1) + return ret; spin_lock(&mm->page_table_lock); /* * If we find pgtable_page set, we return @@ -291,8 +298,6 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel return __alloc_for_ptecache(mm, kernel); } -#endif /* CONFIG_PPC_64K_PAGES */ - void pte_fragment_free(unsigned long *table, int kernel) { struct page *page = virt_to_page(table); |