summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLEROY Christophe <christophe.leroy@c-s.fr>2015-01-20 12:57:34 +0300
committerScott Wood <scottwood@freescale.com>2015-01-30 06:59:02 +0300
commitce67f5d0a00cce231e62334c3624737623c32d6a (patch)
treec723a98f0acfe0e685727ff602f901ea70ce984e
parent5ddb75cee5afab3bdaf6eb4efefc8029923a9cc7 (diff)
downloadlinux-ce67f5d0a00cce231e62334c3624737623c32d6a.tar.xz
powerpc32: Use kmem_cache memory for PGDIR
When pages are not 4K, PGDIR table is allocated with kmalloc(). In order to optimise TLB handlers, aligned memory is needed. kmalloc() doesn't provide aligned memory blocks, so lets use a kmem_cache pool instead. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Scott Wood <scottwood@freescale.com>
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h4
-rw-r--r--arch/powerpc/mm/pgtable_32.c16
2 files changed, 18 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 9cde3c1522e3..26ce0ab0a9e4 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -347,10 +347,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
+#ifndef CONFIG_PPC_4K_PAGES
+void pgtable_cache_init(void);
+#else
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
+#endif
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 833139620431..03b1a3b0fbd5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -72,13 +72,25 @@ extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa);
#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
+#ifndef CONFIG_PPC_4K_PAGES
+static struct kmem_cache *pgtable_cache;
+
+void pgtable_cache_init(void)
+{
+ pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
+ 1 << PGDIR_ORDER, 0, NULL);
+ if (pgtable_cache == NULL)
+ panic("Couldn't allocate pgtable caches");
+}
+#endif
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
/* pgdir take page or two with 4K pages and a page fraction otherwise */
#ifndef CONFIG_PPC_4K_PAGES
- ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+ ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
#else
ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
PGDIR_ORDER - PAGE_SHIFT);
@@ -89,7 +101,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifndef CONFIG_PPC_4K_PAGES
- kfree((void *)pgd);
+ kmem_cache_free(pgtable_cache, (void *)pgd);
#else
free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
#endif