diff options
| author | Catalin Marinas <catalin.marinas@arm.com> | 2024-03-07 22:05:29 +0300 | 
|---|---|---|
| committer | Catalin Marinas <catalin.marinas@arm.com> | 2024-03-07 22:05:29 +0300 | 
| commit | 88f0912253ea47a2bde36e0820f0b9c025d389ad (patch) | |
| tree | fb477daef419d79cc960de26157e5de2a6fc9b69 /arch/arm64/include/asm/assembler.h | |
| parent | 0c5ade742e91d7bf3a508bf6223deb7410009b6d (diff) | |
| parent | 27f2b9fcddc76d542ac339febf2af55b67f610ca (diff) | |
| download | linux-88f0912253ea47a2bde36e0820f0b9c025d389ad.tar.xz | |
Merge branch 'for-next/stage1-lpa2' into for-next/core
* for-next/stage1-lpa2: (48 commits)
  : Add support for LPA2 and WXN and stage 1
  arm64/mm: Avoid ID mapping of kpti flag if it is no longer needed
  arm64/mm: Use generic __pud_free() helper in pud_free() implementation
  arm64: gitignore: ignore relacheck
  arm64: Use Signed/Unsigned enums for TGRAN{4,16,64} and VARange
  arm64: mm: Make PUD folding check in set_pud() a runtime check
  arm64: mm: add support for WXN memory translation attribute
  mm: add arch hook to validate mmap() prot flags
  arm64: defconfig: Enable LPA2 support
  arm64: Enable 52-bit virtual addressing for 4k and 16k granule configs
  arm64: kvm: avoid CONFIG_PGTABLE_LEVELS for runtime levels
  arm64: ptdump: Deal with translation levels folded at runtime
  arm64: ptdump: Disregard unaddressable VA space
  arm64: mm: Add support for folding PUDs at runtime
  arm64: kasan: Reduce minimum shadow alignment and enable 5 level paging
  arm64: mm: Add 5 level paging support to fixmap and swapper handling
  arm64: Enable LPA2 at boot if supported by the system
  arm64: mm: add LPA2 and 5 level paging support to G-to-nG conversion
  arm64: mm: Add definitions to support 5 levels of paging
  arm64: mm: Add LPA2 support to phys<->pte conversion routines
  arm64: mm: Wire up TCR.DS bit to PTE shareability fields
  ...
Diffstat (limited to 'arch/arm64/include/asm/assembler.h')
| -rw-r--r-- | arch/arm64/include/asm/assembler.h | 55 | 
1 files changed, 19 insertions, 36 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 96b18a707507..ab8b396428da 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -342,20 +342,6 @@ alternative_cb_end  	.endm  /* - * idmap_get_t0sz - get the T0SZ value needed to cover the ID map - * - * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the - * entire ID map region can be mapped. As T0SZ == (64 - #bits used), - * this number conveniently equals the number of leading zeroes in - * the physical address of _end. - */ -	.macro	idmap_get_t0sz, reg -	adrp	\reg, _end -	orr	\reg, \reg, #(1 << VA_BITS_MIN) - 1 -	clz	\reg, \reg -	.endm - -/*   * tcr_compute_pa_size - set TCR.(I)PS to the highest supported   * ID_AA64MMFR0_EL1.PARange value   * @@ -586,18 +572,27 @@ alternative_endif  	.endm  /* - * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD. + * If the kernel is built for 52-bit virtual addressing but the hardware only + * supports 48 bits, we cannot program the pgdir address into TTBR1 directly, + * but we have to add an offset so that the TTBR1 address corresponds with the + * pgdir entry that covers the lowest 48-bit addressable VA. + * + * Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an + * additional paging level, and on LPA2/16k pages, we would end up with a root + * level table with only 2 entries, which is suboptimal in terms of TLB + * utilization, so there we fall back to 47 bits of translation if LPA2 is not + * supported. + *   * orr is used as it can cover the immediate value (and is idempotent). - * In future this may be nop'ed out when dealing with 52-bit kernel VAs.   * 	ttbr: Value of ttbr to set, modified.   */  	.macro	offset_ttbr1, ttbr, tmp -#ifdef CONFIG_ARM64_VA_BITS_52 -	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1 -	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT) -	cbnz	\tmp, .Lskipoffs_\@ -	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET -.Lskipoffs_\@ : +#if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2) +	mrs	\tmp, tcr_el1 +	and	\tmp, \tmp, #TCR_T1SZ_MASK +	cmp	\tmp, #TCR_T1SZ(VA_BITS_MIN) +	orr	\tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET +	csel	\ttbr, \tmp, \ttbr, eq  #endif  	.endm @@ -619,25 +614,13 @@ alternative_endif  	.macro	phys_to_pte, pte, phys  #ifdef CONFIG_ARM64_PA_BITS_52 -	/* -	 * We assume \phys is 64K aligned and this is guaranteed by only -	 * supporting this configuration with 64K pages. -	 */ -	orr	\pte, \phys, \phys, lsr #36 -	and	\pte, \pte, #PTE_ADDR_MASK +	orr	\pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT +	and	\pte, \pte, #PHYS_TO_PTE_ADDR_MASK  #else  	mov	\pte, \phys  #endif  	.endm -	.macro	pte_to_phys, phys, pte -	and	\phys, \pte, #PTE_ADDR_MASK -#ifdef CONFIG_ARM64_PA_BITS_52 -	orr	\phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT -	and	\phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT) -#endif -	.endm -  /*   * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.   */  | 
