summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/hibernate.c
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@soleen.com>2019-12-04 18:59:23 +0300
committerWill Deacon <will@kernel.org>2020-01-08 19:55:12 +0300
commita2c2e67923ecb73c92028e9b128a9bd48d2e25e9 (patch)
treeed546064587dbefb4a00bb5b8db631a48d651346 /arch/arm64/kernel/hibernate.c
parent7ea4088938b793d9a8d4e205b2fe54fba4cf9739 (diff)
downloadlinux-a2c2e67923ecb73c92028e9b128a9bd48d2e25e9.tar.xz
arm64: hibernate: add trans_pgd public functions
trans_pgd_create_copy() and trans_pgd_map_page() are going to be the basis for new shared code that handles page tables for cases which are between kernels: kexec, and hibernate. Note: Eventually, get_safe_page() will be moved into a function pointer passed via argument, but for now keep it as is. Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> Reviewed-by: James Morse <james.morse@arm.com> [will: Keep these functions static until kexec needs them] Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/hibernate.c')
-rw-r--r--arch/arm64/kernel/hibernate.c93
1 files changed, 60 insertions, 33 deletions
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index ce60bceed357..590963c9c609 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -182,39 +182,15 @@ int arch_hibernation_header_restore(void *addr)
}
EXPORT_SYMBOL(arch_hibernation_header_restore);
-/*
- * Copies length bytes, starting at src_start into an new page,
- * perform cache maintentance, then maps it at the specified address low
- * address as executable.
- *
- * This is used by hibernate to copy the code it needs to execute when
- * overwriting the kernel text. This function generates a new set of page
- * tables, which it loads into ttbr0.
- *
- * Length is provided as we probably only want 4K of data, even on a 64K
- * page system.
- */
-static int create_safe_exec_page(void *src_start, size_t length,
- unsigned long dst_addr,
- phys_addr_t *phys_dst_addr)
+static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
+ unsigned long dst_addr,
+ pgprot_t pgprot)
{
- void *page = (void *)get_safe_page(GFP_ATOMIC);
- pgd_t *trans_pgd;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- if (!page)
- return -ENOMEM;
-
- memcpy(page, src_start, length);
- __flush_icache_range((unsigned long)page, (unsigned long)page + length);
-
- trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
- if (!trans_pgd)
- return -ENOMEM;
-
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = (void *)get_safe_page(GFP_ATOMIC);
@@ -242,6 +218,44 @@ static int create_safe_exec_page(void *src_start, size_t length,
ptep = pte_offset_kernel(pmdp, dst_addr);
set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
+ return 0;
+}
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintenance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+ unsigned long dst_addr,
+ phys_addr_t *phys_dst_addr)
+{
+ void *page = (void *)get_safe_page(GFP_ATOMIC);
+ pgd_t *trans_pgd;
+ int rc;
+
+ if (!page)
+ return -ENOMEM;
+
+ memcpy(page, src_start, length);
+ __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+
+ trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
+ if (!trans_pgd)
+ return -ENOMEM;
+
+ rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
+ PAGE_KERNEL_EXEC);
+ if (rc)
+ return rc;
+
/*
* Load our new page tables. A strict BBM approach requires that we
* ensure that TLBs are free of any entries that may overlap with the
@@ -462,6 +476,24 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
return 0;
}
+static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
+ unsigned long end)
+{
+ int rc;
+ pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+
+ if (!trans_pgd) {
+ pr_err("Failed to allocate memory for temporary page tables.\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_page_tables(trans_pgd, start, end);
+ if (!rc)
+ *dst_pgdp = trans_pgd;
+
+ return rc;
+}
+
/*
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
*
@@ -483,12 +515,7 @@ int swsusp_arch_resume(void)
* Create a second copy of just the linear map, and use this when
* restoring.
*/
- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.\n");
- return -ENOMEM;
- }
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
+ rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
if (rc)
return rc;