summaryrefslogtreecommitdiff
path: root/include/asm-i386/pgtable.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2007-05-02 21:27:15 +0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 21:27:15 +0400
commitce6234b5298902aaec831a67d5f8d9bd2ef5a488 (patch)
tree939c22684e11a4f5f17abb89c4898f016e878e21 /include/asm-i386/pgtable.h
parenta27fe809b82c5e18932fcceded28d0d1481ce7bb (diff)
downloadlinux-ce6234b5298902aaec831a67d5f8d9bd2ef5a488.tar.xz
[PATCH] i386: PARAVIRT: add kmap_atomic_pte for mapping highpte pages
Xen and VMI both have special requirements when mapping a highmem pte page into the kernel address space. These can be dealt with by adding a new kmap_atomic_pte() function for mapping highptes, and hooking it into the paravirt_ops infrastructure. Xen specifically wants to map the pte page RO, so this patch exposes a helper function, kmap_atomic_prot, which maps the page with the specified page protections. This also adds a kmap_flush_unused() function to clear out the cached kmap mappings. Xen needs this to clear out any potential stray RW mappings of pages which will become part of a pagetable. [ Zach - vmi.c will need some attention after this patch. It wasn't immediately obvious to me what needs to be done. ] Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
Diffstat (limited to 'include/asm-i386/pgtable.h')
-rw-r--r--include/asm-i386/pgtable.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 6599f2aa91b7..befc697821e5 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -476,9 +476,9 @@ extern pte_t *lookup_address(unsigned long address);
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
#define pte_offset_map_nested(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else