From 7ef939054139ef857cebbec07cbd12d7cf7beedd Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sat, 3 Sep 2005 15:57:52 -0700 Subject: [PATCH] uml: fix x86_64 page leak We were leaking pmd pages when 3_LEVEL_PGTABLES was enabled. This fixes that. Signed-off-by: Jeff Dike Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/include/mmu-skas.h | 4 ++++ arch/um/kernel/skas/mmu.c | 9 ++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h index 278b72f1d9ad..09536f81ee42 100644 --- a/arch/um/kernel/skas/include/mmu-skas.h +++ b/arch/um/kernel/skas/include/mmu-skas.h @@ -6,11 +6,15 @@ #ifndef __SKAS_MMU_H #define __SKAS_MMU_H +#include "linux/config.h" #include "mm_id.h" struct mmu_context_skas { struct mm_id id; unsigned long last_page_table; +#ifdef CONFIG_3_LEVEL_PGTABLES + unsigned long last_pmd; +#endif }; extern void switch_mm_skas(struct mm_id * mm_idp); diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index d837223e22af..240143b616a2 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -56,6 +56,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, */ mm->context.skas.last_page_table = pmd_page_kernel(*pmd); +#ifdef CONFIG_3_LEVEL_PGTABLES + mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); +#endif *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkexec(*pte); @@ -144,6 +147,10 @@ void destroy_context_skas(struct mm_struct *mm) if(!proc_mm || !ptrace_faultinfo){ free_page(mmu->id.stack); - free_page(mmu->last_page_table); + pte_free_kernel((pte_t *) mmu->last_page_table); + dec_page_state(nr_page_table_pages); +#ifdef CONFIG_3_LEVEL_PGTABLES + pmd_free((pmd_t *) mmu->last_pmd); +#endif } } -- cgit v1.2.3