summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2025-03-13 12:55:43 +0300
committerVasily Gorbik <gor@linux.ibm.com>2025-03-18 19:13:05 +0300
commitcaa3cd5ccd016cb28294e2b7e21fe08831e9a877 (patch)
tree83df583a37e7eb08064a2afdacd4b9f0ea162f09
parentd93a855c31b72637e19659bca613766eaa89d496 (diff)
downloadlinux-caa3cd5ccd016cb28294e2b7e21fe08831e9a877.tar.xz
s390/kfence: Split kfence pool into 4k mappings in arch_kfence_init_pool()
Since commit d08d4e7cd6bf ("s390/mm: use full 4KB page for 2KB PTE"), there is no longer any reason to avoid splitting the kfence pool into 4k mappings in arch_kfence_init_pool(). Remove the architecture-specific kfence_split_mapping(). Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
-rw-r--r--arch/s390/include/asm/kfence.h17
-rw-r--r--arch/s390/mm/init.c1
2 files changed, 3 insertions, 15 deletions
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
index e47fd8cbe701..e95e35eb8a3f 100644
--- a/arch/s390/include/asm/kfence.h
+++ b/arch/s390/include/asm/kfence.h
@@ -12,27 +12,16 @@ void __kernel_map_pages(struct page *page, int numpages, int enable);
static __always_inline bool arch_kfence_init_pool(void)
{
- return true;
-}
-
-#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
-
-/*
- * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
- * but earlier where page table allocations still happen with memblock.
- * Reason is that arch_kfence_init_pool() gets called when the system
- * is still in a limbo state - disabling and enabling bottom halves is
- * not yet allowed, but that is what our page_table_alloc() would do.
- */
-static __always_inline void kfence_split_mapping(void)
-{
#ifdef CONFIG_KFENCE
unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
set_memory_4k((unsigned long)__kfence_pool, pool_pages);
#endif
+ return true;
}
+#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
+
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 615ee245cb93..f4ac69506608 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -175,7 +175,6 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
pv_init();
- kfence_split_mapping();
/* this will put all low memory onto the freelists */
memblock_free_all();