diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/cache.S | 52 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 38 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 114 |
4 files changed, 111 insertions, 95 deletions
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index db767b072601..2d881f34dd9d 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -24,7 +24,7 @@ * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(__flush_icache_range) +SYM_FUNC_START(__flush_icache_range) /* FALLTHROUGH */ /* @@ -37,7 +37,7 @@ ENTRY(__flush_icache_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(__flush_cache_user_range) +SYM_FUNC_START(__flush_cache_user_range) uaccess_ttbr0_enable x2, x3, x4 alternative_if ARM64_HAS_CACHE_IDC dsb ishst @@ -66,8 +66,8 @@ alternative_else_nop_endif 9: mov x0, #-EFAULT b 1b -ENDPROC(__flush_icache_range) -ENDPROC(__flush_cache_user_range) +SYM_FUNC_END(__flush_icache_range) +SYM_FUNC_END(__flush_cache_user_range) /* * invalidate_icache_range(start,end) @@ -77,7 +77,7 @@ ENDPROC(__flush_cache_user_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(invalidate_icache_range) +SYM_FUNC_START(invalidate_icache_range) alternative_if ARM64_HAS_CACHE_DIC mov x0, xzr isb @@ -94,7 +94,7 @@ alternative_else_nop_endif 2: mov x0, #-EFAULT b 1b -ENDPROC(invalidate_icache_range) +SYM_FUNC_END(invalidate_icache_range) /* * __flush_dcache_area(kaddr, size) @@ -105,10 +105,10 @@ ENDPROC(invalidate_icache_range) * - kaddr - kernel address * - size - size in question */ -ENTRY(__flush_dcache_area) +SYM_FUNC_START_PI(__flush_dcache_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__flush_dcache_area) +SYM_FUNC_END_PI(__flush_dcache_area) /* * __clean_dcache_area_pou(kaddr, size) @@ -119,14 +119,14 @@ ENDPIPROC(__flush_dcache_area) * - kaddr - kernel address * - size - size in question */ -ENTRY(__clean_dcache_area_pou) +SYM_FUNC_START(__clean_dcache_area_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret -ENDPROC(__clean_dcache_area_pou) +SYM_FUNC_END(__clean_dcache_area_pou) /* * __inval_dcache_area(kaddr, size) @@ -138,7 +138,8 @@ ENDPROC(__clean_dcache_area_pou) * - kaddr - kernel address * - size - size in question */ -ENTRY(__inval_dcache_area) +SYM_FUNC_START_LOCAL(__dma_inv_area) +SYM_FUNC_START_PI(__inval_dcache_area) /* FALLTHROUGH */ /* @@ -146,7 +147,6 @@ ENTRY(__inval_dcache_area) * - start - virtual start address of region * - size - size in question */ -__dma_inv_area: add x1, x1, x0 dcache_line_size x2, x3 sub x3, x2, #1 @@ -165,8 +165,8 @@ __dma_inv_area: b.lo 2b dsb sy ret -ENDPIPROC(__inval_dcache_area) -ENDPROC(__dma_inv_area) +SYM_FUNC_END_PI(__inval_dcache_area) +SYM_FUNC_END(__dma_inv_area) /* * __clean_dcache_area_poc(kaddr, size) @@ -177,7 +177,8 @@ ENDPROC(__dma_inv_area) * - kaddr - kernel address * - size - size in question */ -ENTRY(__clean_dcache_area_poc) +SYM_FUNC_START_LOCAL(__dma_clean_area) +SYM_FUNC_START_PI(__clean_dcache_area_poc) /* FALLTHROUGH */ /* @@ -185,11 +186,10 @@ ENTRY(__clean_dcache_area_poc) * - start - virtual start address of region * - size - size in question */ -__dma_clean_area: dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__clean_dcache_area_poc) -ENDPROC(__dma_clean_area) +SYM_FUNC_END_PI(__clean_dcache_area_poc) +SYM_FUNC_END(__dma_clean_area) /* * __clean_dcache_area_pop(kaddr, size) @@ -200,13 +200,13 @@ ENDPROC(__dma_clean_area) * - kaddr - kernel address * - size - size in question */ -ENTRY(__clean_dcache_area_pop) +SYM_FUNC_START_PI(__clean_dcache_area_pop) alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret -ENDPIPROC(__clean_dcache_area_pop) +SYM_FUNC_END_PI(__clean_dcache_area_pop) /* * __dma_flush_area(start, size) @@ -216,10 +216,10 @@ ENDPIPROC(__clean_dcache_area_pop) * - start - virtual start address of region * - size - size in question */ -ENTRY(__dma_flush_area) +SYM_FUNC_START_PI(__dma_flush_area) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -ENDPIPROC(__dma_flush_area) +SYM_FUNC_END_PI(__dma_flush_area) /* * __dma_map_area(start, size, dir) @@ -227,11 +227,11 @@ ENDPIPROC(__dma_flush_area) * - size - size of region * - dir - DMA direction */ -ENTRY(__dma_map_area) +SYM_FUNC_START_PI(__dma_map_area) cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_area b __dma_clean_area -ENDPIPROC(__dma_map_area) +SYM_FUNC_END_PI(__dma_map_area) /* * __dma_unmap_area(start, size, dir) @@ -239,8 +239,8 @@ ENDPIPROC(__dma_map_area) * - size - size of region * - dir - DMA direction */ -ENTRY(__dma_unmap_area) +SYM_FUNC_START_PI(__dma_unmap_area) cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_area ret -ENDPIPROC(__dma_unmap_area) +SYM_FUNC_END_PI(__dma_unmap_area) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b5e329fde2dd..8ef73e89d514 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) #define ASID_FIRST_VERSION (1UL << asid_bits) -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1) -#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1) -#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK) -#else -#define NUM_USER_ASIDS (ASID_FIRST_VERSION) +#define NUM_USER_ASIDS ASID_FIRST_VERSION #define asid2idx(asid) ((asid) & ~ASID_MASK) #define idx2asid(idx) asid2idx(idx) -#endif /* Get the ASIDBits supported by the current CPU */ static u32 get_cpu_asid_bits(void) @@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void) } } +static void set_kpti_asid_bits(void) +{ + unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); + /* + * In case of KPTI kernel/user ASIDs are allocated in + * pairs, the bottom bit distinguishes the two: if it + * is set, then the ASID will map only userspace. Thus + * mark even as reserved for kernel. + */ + memset(asid_map, 0xaa, len); +} + +static void set_reserved_asid_bits(void) +{ + if (arm64_kernel_unmapped_at_el0()) + set_kpti_asid_bits(); + else + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); +} + static void flush_context(void) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ - bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + set_reserved_asid_bits(); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); @@ -261,6 +275,14 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); + /* + * We cannot call set_reserved_asid_bits() here because CPU + * caps are not finalized yet, so it is safer to assume KPTI + * and reserve kernel ASID's from beginning. + */ + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + set_kpti_asid_bits(); + pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 9ce7bd9d4d9c..250c49008d73 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -54,7 +54,7 @@ static int change_memory_common(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { unsigned long start = addr; - unsigned long size = PAGE_SIZE*numpages; + unsigned long size = PAGE_SIZE * numpages; unsigned long end = start + size; struct vm_struct *area; int i; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a1e0592d1fbc..aafed6902411 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -42,7 +42,14 @@ #define TCR_KASAN_FLAGS 0 #endif -#define MAIR(attr, mt) ((attr) << ((mt) * 8)) +/* Default MAIR_EL1 */ +#define MAIR_EL1_SET \ + (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) #ifdef CONFIG_CPU_PM /** @@ -50,7 +57,7 @@ * * x0: virtual address of context pointer */ -ENTRY(cpu_do_suspend) +SYM_FUNC_START(cpu_do_suspend) mrs x2, tpidr_el0 mrs x3, tpidrro_el0 mrs x4, contextidr_el1 @@ -74,7 +81,7 @@ alternative_endif stp x10, x11, [x0, #64] stp x12, x13, [x0, #80] ret -ENDPROC(cpu_do_suspend) +SYM_FUNC_END(cpu_do_suspend) /** * cpu_do_resume - restore CPU register context @@ -82,7 +89,7 @@ ENDPROC(cpu_do_suspend) * x0: Address of context pointer */ .pushsection ".idmap.text", "awx" -ENTRY(cpu_do_resume) +SYM_FUNC_START(cpu_do_resume) ldp x2, x3, [x0] ldp x4, x5, [x0, #16] ldp x6, x8, [x0, #32] @@ -131,7 +138,7 @@ alternative_else_nop_endif isb ret -ENDPROC(cpu_do_resume) +SYM_FUNC_END(cpu_do_resume) .popsection #endif @@ -142,7 +149,7 @@ ENDPROC(cpu_do_resume) * * - pgd_phys - physical address of new TTB */ -ENTRY(cpu_do_switch_mm) +SYM_FUNC_START(cpu_do_switch_mm) mrs x2, ttbr1_el1 mmid x1, x1 // get mm->context.id phys_to_ttbr x3, x0 @@ -161,7 +168,7 @@ alternative_else_nop_endif msr ttbr0_el1, x3 // now update TTBR0 isb b post_ttbr_update_workaround // Back to C code... -ENDPROC(cpu_do_switch_mm) +SYM_FUNC_END(cpu_do_switch_mm) .pushsection ".idmap.text", "awx" @@ -182,7 +189,7 @@ ENDPROC(cpu_do_switch_mm) * This is the low-level counterpart to cpu_replace_ttbr1, and should not be * called by anything else. It can only be executed from a TTBR0 mapping. */ -ENTRY(idmap_cpu_replace_ttbr1) +SYM_FUNC_START(idmap_cpu_replace_ttbr1) save_and_disable_daif flags=x2 __idmap_cpu_set_reserved_ttbr1 x1, x3 @@ -194,7 +201,7 @@ ENTRY(idmap_cpu_replace_ttbr1) restore_daif x2 ret -ENDPROC(idmap_cpu_replace_ttbr1) +SYM_FUNC_END(idmap_cpu_replace_ttbr1) .popsection #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 @@ -222,7 +229,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) */ __idmap_kpti_flag: .long 1 -ENTRY(idmap_kpti_install_ng_mappings) +SYM_FUNC_START(idmap_kpti_install_ng_mappings) cpu .req w0 num_cpus .req w1 swapper_pa .req x2 @@ -250,15 +257,15 @@ ENTRY(idmap_kpti_install_ng_mappings) /* We're the boot CPU. Wait for the others to catch up */ sevl 1: wfe - ldaxr w18, [flag_ptr] - eor w18, w18, num_cpus - cbnz w18, 1b + ldaxr w17, [flag_ptr] + eor w17, w17, num_cpus + cbnz w17, 1b /* We need to walk swapper, so turn off the MMU. */ pre_disable_mmu_workaround - mrs x18, sctlr_el1 - bic x18, x18, #SCTLR_ELx_M - msr sctlr_el1, x18 + mrs x17, sctlr_el1 + bic x17, x17, #SCTLR_ELx_M + msr sctlr_el1, x17 isb /* Everybody is enjoying the idmap, so we can rewrite swapper. */ @@ -281,9 +288,9 @@ skip_pgd: isb /* We're done: fire up the MMU again */ - mrs x18, sctlr_el1 - orr x18, x18, #SCTLR_ELx_M - msr sctlr_el1, x18 + mrs x17, sctlr_el1 + orr x17, x17, #SCTLR_ELx_M + msr sctlr_el1, x17 isb /* @@ -353,47 +360,48 @@ skip_pte: b.ne do_pte b next_pmd + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte + /* Secondary CPUs end up here */ __idmap_kpti_secondary: /* Uninstall swapper before surgery begins */ - __idmap_cpu_set_reserved_ttbr1 x18, x17 + __idmap_cpu_set_reserved_ttbr1 x16, x17 /* Increment the flag to let the boot CPU we're ready */ -1: ldxr w18, [flag_ptr] - add w18, w18, #1 - stxr w17, w18, [flag_ptr] +1: ldxr w16, [flag_ptr] + add w16, w16, #1 + stxr w17, w16, [flag_ptr] cbnz w17, 1b /* Wait for the boot CPU to finish messing around with swapper */ sevl 1: wfe - ldxr w18, [flag_ptr] - cbnz w18, 1b + ldxr w16, [flag_ptr] + cbnz w16, 1b /* All done, act like nothing happened */ - offset_ttbr1 swapper_ttb, x18 + offset_ttbr1 swapper_ttb, x16 msr ttbr1_el1, swapper_ttb isb ret - .unreq cpu - .unreq num_cpus - .unreq swapper_pa .unreq swapper_ttb .unreq flag_ptr - .unreq cur_pgdp - .unreq end_pgdp - .unreq pgd - .unreq cur_pudp - .unreq end_pudp - .unreq pud - .unreq cur_pmdp - .unreq end_pmdp - .unreq pmd - .unreq cur_ptep - .unreq end_ptep - .unreq pte -ENDPROC(idmap_kpti_install_ng_mappings) +SYM_FUNC_END(idmap_kpti_install_ng_mappings) .popsection #endif @@ -404,7 +412,7 @@ ENDPROC(idmap_kpti_install_ng_mappings) * value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" -ENTRY(__cpu_setup) +SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh @@ -416,23 +424,9 @@ ENTRY(__cpu_setup) enable_dbg // since this is per-cpu reset_pmuserenr_el0 x0 // Disable PMU access from EL0 /* - * Memory region attributes for LPAE: - * - * n = AttrIndx[2:0] - * n MAIR - * DEVICE_nGnRnE 000 00000000 - * DEVICE_nGnRE 001 00000100 - * DEVICE_GRE 010 00001100 - * NORMAL_NC 011 01000100 - * NORMAL 100 11111111 - * NORMAL_WT 101 10111011 + * Memory region attributes */ - ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ - MAIR(0x04, MT_DEVICE_nGnRE) | \ - MAIR(0x0c, MT_DEVICE_GRE) | \ - MAIR(0x44, MT_NORMAL_NC) | \ - MAIR(0xff, MT_NORMAL) | \ - MAIR(0xbb, MT_NORMAL_WT) + mov_q x5, MAIR_EL1_SET msr mair_el1, x5 /* * Prepare SCTLR @@ -475,4 +469,4 @@ ENTRY(__cpu_setup) #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 ret // return to head.S -ENDPROC(__cpu_setup) +SYM_FUNC_END(__cpu_setup) |