diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-01-31 15:45:37 +0300 |
---|---|---|
committer | Geert Uytterhoeven <geert@linux-m68k.org> | 2020-02-10 12:57:48 +0300 |
commit | ef9285f69f0efbc75d01cbb09fe65882effd0a25 (patch) | |
tree | 79be6657cea036325005b2be529dac70299383e8 | |
parent | ef22d8abd876e805b604e8f655127de2beee2869 (diff) | |
download | linux-ef9285f69f0efbc75d01cbb09fe65882effd0a25.tar.xz |
m68k: mm: Improve kernel_page_table()
With the PTE-tables now only being 256 bytes, allocating a full page
for them is a giant waste. Start by improving the boot time allocator
such that init_mm initialization will at least have optimal memory
density.
Much thanks to Will Deacon in help with debugging and ferreting out
lost information on these dusty MMUs.
Notes:
- _TABLE_MASK is reduced to account for the shorter (256 byte)
alignment of pte-tables, per the manual, table entries should only
ever have state in the low 4 bits (Used,WrProt,Desc1,Desc0) so it is
still longer than strictly required. (Thanks Will!!!)
- Also use kernel_page_table() for the 020/030 zero_pgtable case and
consequently remove the zero_pgtable init hack (will fix up later).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.768263973@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
-rw-r--r-- | arch/m68k/include/asm/motorola_pgtable.h | 13 | ||||
-rw-r--r-- | arch/m68k/mm/init.c | 5 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 51 |
3 files changed, 41 insertions, 28 deletions
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 4d94e462bb2b..2ad0f4166841 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -23,7 +23,18 @@ #define _DESCTYPE_MASK 0x003 #define _CACHEMASK040 (~0x060) -#define _TABLE_MASK (0xfffffe00) + +/* + * Currently set to the minimum alignment of table pointers (256 bytes). + * The hardware only uses the low 4 bits for state: + * + * 3 - Used + * 2 - Write Protected + * 0,1 - Descriptor Type + * + * and has the rest of the bits reserved. + */ +#define _TABLE_MASK (0xffffff00) #define _PAGE_TABLE (_PAGE_SHORT) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE) diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 27c453f4fffe..a9b6d26cff8a 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -42,7 +42,6 @@ EXPORT_SYMBOL(empty_zero_page); #if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) extern void init_pointer_table(unsigned long ptable); -extern pmd_t *zero_pgtable; #endif #ifdef CONFIG_MMU @@ -135,10 +134,6 @@ static inline void init_pointer_tables(void) if (pud_present(*pud)) init_pointer_table(pgd_page_vaddr(kernel_pg_dir[i])); } - - /* insert also pointer table that we used to unmap the zero page */ - if (zero_pgtable) - init_pointer_table((unsigned long)zero_pgtable); #endif } diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index c888ef46da3e..600f9c1d96f8 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -174,27 +174,35 @@ extern __initdata unsigned long m68k_init_mapped_size; extern unsigned long availmem; +static pte_t *last_pte_table __initdata = NULL; + static pte_t * __init kernel_page_table(void) { - pte_t *ptablep; + pte_t *pte_table = last_pte_table; - ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!ptablep) - panic("%s: Failed to allocate %lu bytes align=%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) { + pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte_table) { + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + } - clear_page(ptablep); - mmu_page_ctor(ptablep); + clear_page(pte_table); + mmu_page_ctor(pte_table); - return ptablep; + last_pte_table = pte_table; + } + + last_pte_table += PTRS_PER_PTE; + + return pte_table; } -static pmd_t *last_pgtable __initdata = NULL; -pmd_t *zero_pgtable __initdata = NULL; +static pmd_t *last_pmd_table __initdata = NULL; static pmd_t * __init kernel_ptr_table(void) { - if (!last_pgtable) { + if (!last_pmd_table) { unsigned long pmd, last; int i; @@ -213,25 +221,25 @@ static pmd_t * __init kernel_ptr_table(void) last = pmd; } - last_pgtable = (pmd_t *)last; + last_pmd_table = (pmd_t *)last; #ifdef DEBUG - printk("kernel_ptr_init: %p\n", last_pgtable); + printk("kernel_ptr_init: %p\n", last_pmd_table); #endif } - last_pgtable += PTRS_PER_PMD; - if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { - last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE, + last_pmd_table += PTRS_PER_PMD; + if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) { + last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!last_pgtable) + if (!last_pmd_table) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); - clear_page(last_pgtable); - mmu_page_ctor(last_pgtable); + clear_page(last_pmd_table); + mmu_page_ctor(last_pmd_table); } - return last_pgtable; + return last_pmd_table; } static void __init map_node(int node) @@ -294,8 +302,7 @@ static void __init map_node(int node) #ifdef DEBUG printk ("[zero map]"); #endif - zero_pgtable = kernel_ptr_table(); - pte_dir = (pte_t *)zero_pgtable; + pte_dir = kernel_page_table(); pmd_set(pmd_dir, pte_dir); pte_val(*pte_dir++) = 0; |