diff options
| -rw-r--r-- | arch/arm64/include/asm/mmu.h | 2 | ||||
| -rw-r--r-- | arch/arm64/mm/init.c | 9 | ||||
| -rw-r--r-- | arch/arm64/mm/mmu.c | 45 |
3 files changed, 42 insertions, 14 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 137a173df1ff..472610433aae 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -112,5 +112,7 @@ void kpti_install_ng_mappings(void); static inline void kpti_install_ng_mappings(void) {} #endif +extern bool page_alloc_available; + #endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 96711b8578fd..b9b248d24fd1 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -350,7 +350,6 @@ void __init arch_mm_preinit(void) } swiotlb_init(swiotlb, flags); - swiotlb_update_mem_attributes(); /* * Check boundaries twice: Some fundamental inconsistencies can be @@ -377,6 +376,14 @@ void __init arch_mm_preinit(void) } } +bool page_alloc_available __ro_after_init; + +void __init mem_init(void) +{ + page_alloc_available = true; + swiotlb_update_mem_attributes(); +} + void free_initmem(void) { void *lm_init_begin = lm_alias(__init_begin); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a6a00accf4f9..223947487a22 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -768,30 +768,51 @@ static inline bool force_pte_mapping(void) } static DEFINE_MUTEX(pgtable_split_lock); +static bool linear_map_requires_bbml2; int split_kernel_leaf_mapping(unsigned long start, unsigned long end) { int ret; /* - * !BBML2_NOABORT systems should not be trying to change permissions on - * anything that is not pte-mapped in the first place. Just return early - * and let the permission change code raise a warning if not already - * pte-mapped. - */ - if (!system_supports_bbml2_noabort()) - return 0; - - /* * If the region is within a pte-mapped area, there is no need to try to * split. Additionally, CONFIG_DEBUG_PAGEALLOC and CONFIG_KFENCE may * change permissions from atomic context so for those cases (which are * always pte-mapped), we must not go any further because taking the - * mutex below may sleep. + * mutex below may sleep. Do not call force_pte_mapping() here because + * it could return a confusing result if called from a secondary cpu + * prior to finalizing caps. Instead, linear_map_requires_bbml2 gives us + * what we need. */ - if (force_pte_mapping() || is_kfence_address((void *)start)) + if (!linear_map_requires_bbml2 || is_kfence_address((void *)start)) return 0; + if (!system_supports_bbml2_noabort()) { + /* + * !BBML2_NOABORT systems should not be trying to change + * permissions on anything that is not pte-mapped in the first + * place. Just return early and let the permission change code + * raise a warning if not already pte-mapped. + */ + if (system_capabilities_finalized()) + return 0; + + /* + * Boot-time: split_kernel_leaf_mapping_locked() allocates from + * page allocator. Can't split until it's available. + */ + if (WARN_ON(!page_alloc_available)) + return -EBUSY; + + /* + * Boot-time: Started secondary cpus but don't know if they + * support BBML2_NOABORT yet. Can't allow splitting in this + * window in case they don't. + */ + if (WARN_ON(num_online_cpus() > 1)) + return -EBUSY; + } + /* * Ensure start and end are at least page-aligned since this is the * finest granularity we can split to. @@ -891,8 +912,6 @@ static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp return ret; } -static bool linear_map_requires_bbml2 __initdata; - u32 idmap_kpti_bbml2_flag; static void __init init_idmap_kpti_bbml2_flag(void) |
