summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/mm/contpte.c6
-rw-r--r--arch/arm64/mm/init.c12
-rw-r--r--arch/arm64/mm/trans_pgd.c6
4 files changed, 22 insertions, 4 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 54fb014eba05..0480c61dbb4f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -110,6 +110,8 @@
#define PAGE_END (_PAGE_END(VA_BITS_MIN))
#endif /* CONFIG_KASAN */
+#define PHYSMEM_END __pa(PAGE_END - 1)
+
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/*
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index a3edced29ac1..55107d27d3f8 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -421,6 +421,12 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep);
start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
+ /*
+ * We are not advancing entry because __ptep_set_access_flags()
+ * only consumes access flags from entry. And since we have checked
+ * for the whole contpte block and returned early, pte_same()
+ * within __ptep_set_access_flags() is likely false.
+ */
for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
__ptep_set_access_flags(vma, addr, ptep, entry, 0);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9b5ab6818f7f..a0400b9aa814 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -414,8 +414,16 @@ void __init mem_init(void)
void free_initmem(void)
{
- free_reserved_area(lm_alias(__init_begin),
- lm_alias(__init_end),
+ void *lm_init_begin = lm_alias(__init_begin);
+ void *lm_init_end = lm_alias(__init_end);
+
+ WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
+ WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
+
+ /* Delete __init region from memblock.reserved. */
+ memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
+
+ free_reserved_area(lm_init_begin, lm_init_end,
POISON_FREE_INITMEM, "unused kernel");
/*
* Unmap the __init region but leave the VM area in place. This
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 5139a28130c0..0f7b484cb2ff 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -42,14 +42,16 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* the temporary mappings we use during restore.
*/
__set_pte(dst_ptep, pte_mkwrite_novma(pte));
- } else if ((debug_pagealloc_enabled() ||
- is_kfence_address((void *)addr)) && !pte_none(pte)) {
+ } else if (!pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have
* been in use by the original kernel, in which case we need
* to put it back in our copy to do the restore.
*
+ * Other cases include kfence / vmalloc / memfd_secret which
+ * may call `set_direct_map_invalid_noflush()`.
+ *
* Before marking this entry valid, check the pfn should
* be mapped.
*/