diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-01-31 15:45:36 +0300 |
---|---|---|
committer | Geert Uytterhoeven <geert@linux-m68k.org> | 2020-02-10 12:57:48 +0300 |
commit | ef22d8abd876e805b604e8f655127de2beee2869 (patch) | |
tree | d5a4ba746cf6d03a129d13183934f0a20dd1ee95 /arch/m68k | |
parent | 5ad272abee9fe0a781d49b03f334c0a3b3e418c1 (diff) | |
download | linux-ef22d8abd876e805b604e8f655127de2beee2869.tar.xz |
m68k: mm: Restructure Motorola MMU page-table layout
The Motorola 68xxx MMUs, 040 (and later) have a fixed 7,7,{5,6}
page-table setup, where the last depends on the page-size selected (8k
vs 4k resp.), and head.S selects 4K pages. For 030 (and earlier) we
explicitly program 7,7,6 and 4K pages in %tc.
However, the current code implements this mightily weird. What it does
is group 16 of those (6 bit) pte tables into one 4k page to not waste
space. The down-side is that that forces pmd_t to be a 16-tuple
pointing to consecutive pte tables.
This breaks the generic code which assumes READ_ONCE(*pmd) will be
word sized.
Therefore implement a straight forward 7,7,6 3 level page-table setup,
with the addition (for 020/030) of (partial) large-page support. For
now this increases the memory footprint for pte-tables 15 fold.
Tested with ARAnyM/68040 emulation.
Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.711478295@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Diffstat (limited to 'arch/m68k')
-rw-r--r-- | arch/m68k/include/asm/motorola_pgtable.h | 15 | ||||
-rw-r--r-- | arch/m68k/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/m68k/include/asm/pgtable_mm.h | 10 | ||||
-rw-r--r-- | arch/m68k/mm/kmap.c | 36 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 28 |
5 files changed, 39 insertions, 56 deletions
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 62bedc61f110..4d94e462bb2b 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -108,13 +108,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) { - unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED; - unsigned long *ptr = pmdp->pmd; - short i = 16; - while (--i >= 0) { - *ptr++ = ptbl; - ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16); - } + pmd_val(*pmdp) = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED; } static inline void pud_set(pud_t *pudp, pmd_t *pmdp) @@ -138,12 +132,7 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE) #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE) -#define pmd_clear(pmdp) ({ \ - unsigned long *__ptr = pmdp->pmd; \ - short __i = 16; \ - while (--__i >= 0) \ - *__ptr++ = 0; \ -}) +#define pmd_clear(pmdp) ({ pmd_val(*pmdp) = 0; }) #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index 05e1e1e77a9a..c02326b56ae2 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -22,9 +22,9 @@ * These are used to make use of C type-checking.. */ #if !defined(CONFIG_MMU) || CONFIG_PGTABLE_LEVELS == 3 -typedef struct { unsigned long pmd[16]; } pmd_t; -#define pmd_val(x) ((&x)->pmd[0]) -#define __pmd(x) ((pmd_t) { { (x) }, }) +typedef struct { unsigned long pmd; } pmd_t; +#define pmd_val(x) ((&x)->pmd) +#define __pmd(x) ((pmd_t) { (x) } ) #endif typedef struct { unsigned long pte; } pte_t; diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 2bf5c3501e78..f0e5167de834 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -36,7 +36,7 @@ /* PMD_SHIFT determines the size of the area a second-level page table can map */ #if CONFIG_PGTABLE_LEVELS == 3 -#define PMD_SHIFT 22 +#define PMD_SHIFT 18 #endif #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -67,8 +67,8 @@ #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else -#define PTRS_PER_PTE 1024 -#define PTRS_PER_PMD 8 +#define PTRS_PER_PTE 64 +#define PTRS_PER_PMD 128 #define PTRS_PER_PGD 128 #endif #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) @@ -76,8 +76,8 @@ /* Virtual address region for use by kernel_map() */ #ifdef CONFIG_SUN3 -#define KMAP_START 0x0DC00000 -#define KMAP_END 0x0E000000 +#define KMAP_START 0x0dc00000 +#define KMAP_END 0x0e000000 #elif defined(CONFIG_COLDFIRE) #define KMAP_START 0xe0000000 #define KMAP_END 0xf0000000 diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index 120030ad8dc4..14d31d216cef 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -24,8 +24,6 @@ #undef DEBUG -#define PTRTREESIZE (256*1024) - /* * For 040/060 we can use the virtual memory area like other architectures, * but for 020/030 we want to use early termination page descriptors and we @@ -50,7 +48,7 @@ static inline void free_io_area(void *addr) #else -#define IO_SIZE (256*1024) +#define IO_SIZE PMD_SIZE static struct vm_struct *iolist; @@ -81,14 +79,13 @@ static void __free_io_area(void *addr, unsigned long size) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; - int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; + int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK; if (pmd_type == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = 0; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; - continue; + pmd_clear(pmd_dir); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; + } else if (pmd_type == 0) continue; } @@ -249,7 +246,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla while ((long)size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); @@ -263,10 +260,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; + virtaddr += PMD_SIZE; + size -= PMD_SIZE; } else #endif { @@ -367,13 +364,12 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) #if CONFIG_PGTABLE_LEVELS == 3 if (CPU_IS_020_OR_030) { - int pmd_off = (virtaddr/PTRTREESIZE) & 15; + unsigned long pmd = pmd_val(*pmd_dir); - if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { - pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & - _CACHEMASK040) | cmode; - virtaddr += PTRTREESIZE; - size -= PTRTREESIZE; + if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) { + *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode); + virtaddr += PMD_SIZE; + size -= PMD_SIZE; continue; } } diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 2102f9397c94..c888ef46da3e 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -236,8 +236,6 @@ static pmd_t * __init kernel_ptr_table(void) static void __init map_node(int node) { -#define PTRTREESIZE (256*1024) -#define ROOTTREESIZE (32*1024*1024) unsigned long physaddr, virtaddr, size; pgd_t *pgd_dir; p4d_t *p4d_dir; @@ -255,21 +253,21 @@ static void __init map_node(int node) while (size > 0) { #ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) + if (!(virtaddr & (PMD_SIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); if (virtaddr && CPU_IS_020_OR_030) { - if (!(virtaddr & (ROOTTREESIZE-1)) && - size >= ROOTTREESIZE) { + if (!(virtaddr & (PGDIR_SIZE-1)) && + size >= PGDIR_SIZE) { #ifdef DEBUG printk ("[very early term]"); #endif pgd_val(*pgd_dir) = physaddr; - size -= ROOTTREESIZE; - virtaddr += ROOTTREESIZE; - physaddr += ROOTTREESIZE; + size -= PGDIR_SIZE; + virtaddr += PGDIR_SIZE; + physaddr += PGDIR_SIZE; continue; } } @@ -289,8 +287,8 @@ static void __init map_node(int node) #ifdef DEBUG printk ("[early term]"); #endif - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; + pmd_val(*pmd_dir) = physaddr; + physaddr += PMD_SIZE; } else { int i; #ifdef DEBUG @@ -298,15 +296,15 @@ static void __init map_node(int node) #endif zero_pgtable = kernel_ptr_table(); pte_dir = (pte_t *)zero_pgtable; - pmd_dir->pmd[0] = virt_to_phys(pte_dir) | - _PAGE_TABLE | _PAGE_ACCESSED; + pmd_set(pmd_dir, pte_dir); + pte_val(*pte_dir++) = 0; physaddr += PAGE_SIZE; - for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) + for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) pte_val(*pte_dir++) = physaddr; } - size -= PTRTREESIZE; - virtaddr += PTRTREESIZE; + size -= PMD_SIZE; + virtaddr += PMD_SIZE; } else { if (!pmd_present(*pmd_dir)) { #ifdef DEBUG |