summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorNicholas Miehlbradt <nicholas@linux.ibm.com>2022-09-26 10:57:26 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2022-09-28 12:22:10 +0300
commita5edf9815dd739fce660b4c8658f61b7d2517042 (patch)
treeee622d9e25b2b4a10d9447c726e7c678ba7871bc /arch
parentd7902d31cbc3bf72722768831a684b0286ccd523 (diff)
downloadlinux-a5edf9815dd739fce660b4c8658f61b7d2517042.tar.xz
powerpc/64s: Enable KFENCE on book3s64
KFENCE support was added for ppc32 in commit 90cbac0e995d ("powerpc: Enable KFENCE for PPC32"). Enable KFENCE on ppc64 architecture with hash and radix MMUs. It uses the same mechanism as debug pagealloc to protect/unprotect pages. All KFENCE kunit tests pass on both MMUs. KFENCE memory is initially allocated using memblock but is later marked as SLAB allocated. This necessitates the change to __pud_free to ensure that the KFENCE pages are freed appropriately. Based on previous work by Christophe Leroy and Jordan Niethe. Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com> Reviewed-by: Russell Currey <ruscur@russell.cc> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220926075726.2846-4-nicholas@linux.ibm.com
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/kfence.h15
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c10
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c6
6 files changed, 30 insertions, 11 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 237c0e071e5e..81c9f895d690 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -195,7 +195,7 @@ config PPC
select HAVE_ARCH_KASAN if PPC_RADIX_MMU
select HAVE_ARCH_KASAN if PPC_BOOK3E_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
- select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
+ select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index e1af0b394ceb..dd2cff53a111 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
/*
* Early pud pages allocated via memblock allocator
- * can't be directly freed to slab
+ * can't be directly freed to slab. KFENCE pages have
+ * both reserved and slab flags set so need to be freed
+ * kmem_cache_free.
*/
- if (PageReserved(page))
+ if (PageReserved(page) && !PageSlab(page))
free_reserved_page(page);
else
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index b5f8264cc980..c436d8422654 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1106,7 +1106,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (radix_enabled())
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index a9846b68c6b9..6fd2b4d486c5 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -11,11 +11,25 @@
#include <linux/mm.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+#define ARCH_FUNC_PREFIX "."
+#endif
+
static inline bool arch_kfence_init_pool(void)
{
return true;
}
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ struct page *page = virt_to_page(addr);
+
+ __kernel_map_pages(page, 1, !protect);
+
+ return true;
+}
+#else
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *kpte = virt_to_kpte(addr);
@@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}
+#endif
#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 6d985acb1d39..df008edf7be0 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -424,7 +424,7 @@ repeat:
break;
cond_resched();
- if (debug_pagealloc_enabled() &&
+ if (debug_pagealloc_enabled_or_kfence() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
}
@@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
bool aligned = true;
init_hpte_page_sizes();
- if (!debug_pagealloc_enabled()) {
+ if (!debug_pagealloc_enabled_or_kfence()) {
/*
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
@@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);
- if (debug_pagealloc_enabled()) {
+ if (debug_pagealloc_enabled_or_kfence()) {
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -1980,7 +1980,7 @@ repeat:
return slot;
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
static DEFINE_SPINLOCK(linear_map_hash_lock);
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -2053,7 +2053,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
}
local_irq_restore(flags);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 94a05c66be95..70f1580afc3a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -34,6 +34,8 @@
#include <trace/events/thp.h>
+#include <mm/mmu_decl.h>
+
unsigned int mmu_base_pid;
unsigned long radix_mem_block_size __ro_after_init;
@@ -276,7 +278,7 @@ static int __meminit create_physical_mapping(unsigned long start,
int psize;
unsigned long max_mapping_size = radix_mem_block_size;
- if (debug_pagealloc_enabled())
+ if (debug_pagealloc_enabled_or_kfence())
max_mapping_size = PAGE_SIZE;
start = ALIGN(start, PAGE_SIZE);
@@ -899,7 +901,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
#endif
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long addr;