summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2016-01-25 14:45:05 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2016-02-16 18:10:46 +0300
commit316b39db06718d59d82736df9fc65cf05b467cc7 (patch)
tree2c82f58465afb2c8427d802d7f78e4d6657e3b9e /arch/arm64/mm
parentdca56dca7124709f3dfca81afe61b4d98eb9cacf (diff)
downloadlinux-316b39db06718d59d82736df9fc65cf05b467cc7.tar.xz
arm64: mm: avoid redundant __pa(__va(x))
When we "upgrade" to a section mapping, we free any table we made redundant by giving it back to memblock. To get the PA, we acquire the physical address and convert this to a VA, then subsequently convert this back to a PA. This works currently, but will not work if the tables are not accessed via linear map VAs (e.g. is we use fixmap slots). This patch uses {pmd,pud}_page_paddr to acquire the PA. This avoids the __pa(__va()) round trip, saving some work and avoiding reliance on the linear mapping. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Cc: Laura Abbott <labbott@fedoraproject.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/mmu.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c3ea9df80712..8879aed4b1cb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -171,7 +171,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
if (!pmd_none(old_pmd)) {
flush_tlb_all();
if (pmd_table(old_pmd)) {
- phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+ phys_addr_t table = pmd_page_paddr(old_pmd);
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}
@@ -232,7 +232,7 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
if (!pud_none(old_pud)) {
flush_tlb_all();
if (pud_table(old_pud)) {
- phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+ phys_addr_t table = pud_page_paddr(old_pud);
if (!WARN_ON_ONCE(slab_is_available()))
memblock_free(table, PAGE_SIZE);
}