diff options
author | Jeff Dike <jdike@addtoit.com> | 2008-02-05 09:31:07 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 20:44:29 +0300 |
commit | 0b4e273fb83bce5dd8e166a4defb16ebdd215abf (patch) | |
tree | 18f30fe9092dacf9a7e3474a9b0d692d91962242 /arch/um/kernel/tlb.c | |
parent | 909e90d3c410b684e564729145f7c20dad887757 (diff) | |
download | linux-0b4e273fb83bce5dd8e166a4defb16ebdd215abf.tar.xz |
uml: customize tlb.h
Customize the hooks in tlb.h to optimize TLB flushing some more.
Add start and end fields to tlb_gather_mmu, which are used to limit
the address space range scanned when a region is unmapped.
The interfaces which just free page tables, without actually changing
mappings, don't need to cause a TLB flush.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r-- | arch/um/kernel/tlb.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 8127ca8d5957..0b6a77def311 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -193,18 +193,18 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, if (!pte_young(*pte)) { r = 0; w = 0; - } else if (!pte_dirty(*pte)) { + } else if (!pte_dirty(*pte)) w = 0; - } + prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | (x ? UM_PROT_EXEC : 0)); if (hvc->force || pte_newpage(*pte)) { if (pte_present(*pte)) ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, PAGE_SIZE, prot, hvc); - else ret = add_munmap(addr, PAGE_SIZE, hvc); - } - else if (pte_newprot(*pte)) + else + ret = add_munmap(addr, PAGE_SIZE, hvc); + } else if (pte_newprot(*pte)) ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); *pte = pte_mkuptodate(*pte); } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret)); @@ -500,7 +500,8 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, else fix_range(vma->vm_mm, start, end, 0); } -void flush_tlb_mm(struct mm_struct *mm) +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end) { /* * Don't bother flushing if this address space is about to be @@ -509,7 +510,17 @@ void flush_tlb_mm(struct mm_struct *mm) if (atomic_read(&mm->mm_users) == 0) return; - fix_range(mm, 0, TASK_SIZE, 0); + fix_range(mm, start, end, 0); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + struct vm_area_struct *vma = mm->mmap; + + while (vma != NULL) { + fix_range(mm, vma->vm_start, vma->vm_end, 0); + vma = vma->vm_next; + } } void force_flush_all(void) |