summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/cache.S24
-rw-r--r--arch/arm64/mm/ioremap.c85
-rw-r--r--arch/arm64/mm/mmu.c44
-rw-r--r--arch/arm64/mm/proc.S25
4 files changed, 120 insertions, 58 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index c46f48b33c14..fda756875fa6 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -168,6 +168,14 @@ ENTRY(__flush_dcache_area)
ENDPROC(__flush_dcache_area)
/*
+ * __inval_cache_range(start, end)
+ * - start - start address of region
+ * - end - end address of region
+ */
+ENTRY(__inval_cache_range)
+ /* FALLTHROUGH */
+
+/*
* __dma_inv_range(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
@@ -175,14 +183,22 @@ ENDPROC(__flush_dcache_area)
__dma_inv_range:
dcache_line_size x2, x3
sub x3, x2, #1
- bic x0, x0, x3
+ tst x1, x3 // end cache line aligned?
bic x1, x1, x3
-1: dc ivac, x0 // invalidate D / U line
- add x0, x0, x2
+ b.eq 1f
+ dc civac, x1 // clean & invalidate D / U line
+1: tst x0, x3 // start cache line aligned?
+ bic x0, x0, x3
+ b.eq 2f
+ dc civac, x0 // clean & invalidate D / U line
+ b 3f
+2: dc ivac, x0 // invalidate D / U line
+3: add x0, x0, x2
cmp x0, x1
- b.lo 1b
+ b.lo 2b
dsb sy
ret
+ENDPROC(__inval_cache_range)
ENDPROC(__dma_inv_range)
/*
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 2bb1d586664c..7ec328392ae0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -25,6 +25,10 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
pgprot_t prot, void *caller)
{
@@ -98,3 +102,84 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
+
+#ifndef CONFIG_ARM64_64K_PAGES
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#endif
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ pgd = pgd_offset_k(addr);
+ BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+
+ pud = pud_offset(pgd, addr);
+ BUG_ON(pud_none(*pud) || pud_bad(*pud));
+
+ return pmd_offset(pud, addr);
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+ pmd_t *pmd = early_ioremap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_ioremap_init(void)
+{
+ pmd_t *pmd;
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+#ifndef CONFIG_ARM64_64K_PAGES
+ /* need to populate pmd for 4k pagesize only */
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+#endif
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmd %p != %p\n",
+ pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n",
+ FIX_BTMAP_BEGIN);
+ }
+
+ early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ pte = early_ioremap_pte(addr);
+
+ if (pgprot_val(flags))
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f8dc7e8fce6f..6b7e89569a3a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -125,7 +125,7 @@ early_param("cachepolicy", early_cachepolicy);
/*
* Adjust the PMD section entries according to the CPU in use.
*/
-static void __init init_mem_pgprot(void)
+void __init init_mem_pgprot(void)
{
pteval_t default_pgprot;
int i;
@@ -260,47 +260,6 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
} while (pgd++, addr = next, addr != end);
}
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Create an early I/O mapping using the pgd/pmd entries already populated
- * in head.S as this function is called too early to allocated any memory. The
- * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
- */
-void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
-{
- unsigned long size, mask;
- bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- /*
- * No early pte entries with !ARM64_64K_PAGES configuration, so using
- * sections (pmd).
- */
- size = page64k ? PAGE_SIZE : SECTION_SIZE;
- mask = ~(size - 1);
-
- pgd = pgd_offset_k(virt);
- pud = pud_offset(pgd, virt);
- if (pud_none(*pud))
- return NULL;
- pmd = pmd_offset(pud, virt);
-
- if (page64k) {
- if (pmd_none(*pmd))
- return NULL;
- pte = pte_offset_kernel(pmd, virt);
- set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
- } else {
- set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
- }
-
- return (void __iomem *)((virt & mask) + (phys & ~mask));
-}
-#endif
-
static void __init map_mem(void)
{
struct memblock_region *reg;
@@ -357,7 +316,6 @@ void __init paging_init(void)
{
void *zero_page;
- init_mem_pgprot();
map_mem();
/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index e085ee6ef4e2..9042aff5e9e3 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -28,14 +28,21 @@
#include "proc-macros.S"
-#ifndef CONFIG_SMP
-/* PTWs cacheable, inner/outer WBWA not shareable */
-#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
+#ifdef CONFIG_ARM64_64K_PAGES
+#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
+#else
+#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
+#endif
+
+#ifdef CONFIG_SMP
+#define TCR_SMP_FLAGS TCR_SHARED
#else
-/* PTWs cacheable, inner/outer WBWA shareable */
-#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED
+#define TCR_SMP_FLAGS 0
#endif
+/* PTWs cacheable, inner/outer WBWA */
+#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
+
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
@@ -209,18 +216,14 @@ ENTRY(__cpu_setup)
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
- ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \
- TCR_ASID16 | TCR_TBI0 | (1 << 31)
+ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
/*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
* TCR_EL1.
*/
mrs x9, ID_AA64MMFR0_EL1
bfi x10, x9, #32, #3
-#ifdef CONFIG_ARM64_64K_PAGES
- orr x10, x10, TCR_TG0_64K
- orr x10, x10, TCR_TG1_64K
-#endif
msr tcr_el1, x10
ret // return to head.S
ENDPROC(__cpu_setup)