summaryrefslogtreecommitdiff
path: root/mm/kfence/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kfence/core.c')
-rw-r--r--mm/kfence/core.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 102048821c22..727c20c94ac5 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -594,30 +594,30 @@ static void rcu_guarded_free(struct rcu_head *h)
*/
static unsigned long kfence_init_pool(void)
{
- unsigned long addr;
- struct page *pages;
+ unsigned long addr, start_pfn;
int i;
if (!arch_kfence_init_pool())
return (unsigned long)__kfence_pool;
addr = (unsigned long)__kfence_pool;
- pages = virt_to_page(__kfence_pool);
+ start_pfn = PHYS_PFN(virt_to_phys(__kfence_pool));
/*
- * Set up object pages: they must have PG_slab set, to avoid freeing
- * these as real pages.
+ * Set up object pages: they must have PGTY_slab set to avoid freeing
+ * them as real pages.
*
* We also want to avoid inserting kfence_free() in the kfree()
* fast-path in SLUB, and therefore need to ensure kfree() correctly
* enters __slab_free() slow-path.
*/
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab = page_slab(nth_page(pages, i));
+ struct slab *slab;
if (!i || (i % 2))
continue;
+ slab = page_slab(pfn_to_page(start_pfn + i));
__folio_set_slab(slab_folio(slab));
#ifdef CONFIG_MEMCG
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
@@ -665,10 +665,12 @@ static unsigned long kfence_init_pool(void)
reset_slab:
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
- struct slab *slab = page_slab(nth_page(pages, i));
+ struct slab *slab;
if (!i || (i % 2))
continue;
+
+ slab = page_slab(pfn_to_page(start_pfn + i));
#ifdef CONFIG_MEMCG
slab->obj_exts = 0;
#endif