diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-18 19:40:18 +0300 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-02-20 19:41:46 +0300 |
commit | 4b3073e1c53a256275f1079c0fbfbe85883d9275 (patch) | |
tree | a0fa98cb75edbbc58c43bbe38ac4c6da0913ae6d | |
parent | ed42acaef1a9d51631a31b55e9ed52d400430492 (diff) | |
download | linux-4b3073e1c53a256275f1079c0fbfbe85883d9275.tar.xz |
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
40 files changed, 69 insertions, 62 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index da42ab414c48..74a8b6fefa29 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt @@ -88,12 +88,12 @@ changes occur: This is used primarily during fault processing. 5) void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) At the end of every page fault, this routine is invoked to tell the architecture specific code that a translation - described by "pte" now exists at virtual address "address" - for address space "vma->vm_mm", in the software page tables. + now exists at virtual address "address" for address space + "vma->vm_mm", in the software page tables. A port may use this information in any way it so chooses. For example, it could use this event to pre-load TLB diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 3f0c59f6d8aa..71a243294142 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -329,7 +329,7 @@ extern pgd_t swapper_pg_dir[1024]; * tables contain all the necessary information. */ extern inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c2f1605de359..e085e2c545eb 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); * cache entries for the kernels virtual memory range are written * back to the page. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); #endif diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index ae88f2c3a6df..c45f9bb318ad 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -149,9 +149,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne * * Note that the pte lock will be held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h index fecdda16f444..a9ae30c41e74 100644 --- a/arch/avr32/include/asm/pgtable.h +++ b/arch/avr32/include/asm/pgtable.h @@ -325,7 +325,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); /* * Encode and decode a swap entry diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c index 06677be98ffb..0da23109f817 100644 --- a/arch/avr32/mm/tlb.c +++ b/arch/avr32/mm/tlb.c @@ -101,7 +101,7 @@ static void update_dtlb(unsigned long address, pte_t pte) } void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { unsigned long flags; @@ -110,7 +110,7 @@ void update_mmu_cache(struct vm_area_struct *vma, return; local_irq_save(flags); - update_dtlb(address, pte); + update_dtlb(address, *ptep); local_irq_restore(flags); } diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 1fcce00f01f4..99ea6cd1b143 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -270,7 +270,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ * Actually I am not sure on what this could be used for. */ static inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 22c60692b551..c18b0d32e636 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -505,7 +505,7 @@ static inline int pte_file(pte_t pte) /* * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache */ -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; unsigned long ampr; diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 69bf13857a9f..c3286f42e501 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -462,7 +462,7 @@ pte_same (pte_t a, pte_t b) return pte_val(a) == pte_val(b); } -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init (void); diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h index 0ef95307784e..92614b0ccf17 100644 --- a/arch/m32r/include/asm/tlbflush.h +++ b/arch/m32r/include/asm/tlbflush.h @@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void) ); } -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #endif /* _ASM_M32R_TLBFLUSH_H */ diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c index 88469178ea6b..888aab1157ed 100644 --- a/arch/m32r/mm/fault-nommu.c +++ b/arch/m32r/mm/fault-nommu.c @@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, * update_mmu_cache() *======================================================================*/ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, - pte_t pte) + pte_t *ptep) { BUG(); } diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 7274b47f4c22..28ee389e5f5a 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -336,7 +336,7 @@ vmalloc_fault: addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - update_mmu_cache(NULL, addr, *pte_k); + update_mmu_cache(NULL, addr, pte_k); set_thread_fault_code(0); return; } @@ -349,7 +349,7 @@ vmalloc_fault: #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, - pte_t pte) + pte_t *ptep) { volatile unsigned long *entry1, *entry2; unsigned long pte_data, flags; @@ -365,7 +365,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, vaddr = (vaddr & PAGE_MASK) | get_asid(); - pte_data = pte_val(pte); + pte_data = pte_val(*ptep); #ifdef CONFIG_CHIP_OPSP entry1 = (unsigned long *)ITLB_BASE; diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index aca0e28581c7..87174c904d2b 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -115,7 +115,7 @@ extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); * they are updated on demand. */ static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index eb31a0e8a772..10ec70cd8735 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h @@ -38,7 +38,7 @@ static inline void local_flush_tlb_range(struct vm_area_struct *vma, #define flush_tlb_kernel_range(start, end) do { } while (0) -#define update_mmu_cache(vma, addr, pte) do { } while (0) +#define update_mmu_cache(vma, addr, ptep) do { } while (0) #define flush_tlb_all local_flush_tlb_all #define flush_tlb_mm local_flush_tlb_mm diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 1854336e56a2..c56bf8afc099 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -362,8 +362,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_tlb(vma, address, pte); __update_cache(vma, address, pte); } diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index 6dc30fc827c4..16d88577f3e0 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -466,7 +466,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable) * the kernel page tables containing the necessary information by tlb-mn10300.S */ extern void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); #endif /* !__ASSEMBLY__ */ diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c index 31c9d27a75ae..36ba02191d40 100644 --- a/arch/mn10300/mm/mmu-context.c +++ b/arch/mn10300/mm/mmu-context.c @@ -51,9 +51,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) /* * preemptively set a TLB entry */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pteu, ptel, cnx, flags; + pte_t pte = *ptep; addr &= PAGE_MASK; ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index a27d2e200fb2..01c15035e783 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -410,7 +410,7 @@ extern void paging_init (void); #define PG_dcache_dirty PG_arch_1 -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); /* Encode and de-code a swap entry */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index b6ed34de14e1..1054baa2fc69 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -68,9 +68,9 @@ flush_cache_all_local(void) EXPORT_SYMBOL(flush_cache_all_local); void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - struct page *page = pte_page(pte); + struct page *page = pte_page(*ptep); if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 21207e54825b..89f158731ce3 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -209,7 +209,7 @@ extern void paging_init(void); * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9b152558f9c..311224cdb7ad 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -494,13 +494,13 @@ EXPORT_SYMBOL(flush_icache_user_range); * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) + pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU unsigned long access = 0, trap; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte) || address >= TASK_SIZE) + if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e2fa79cf0614..9b5b9189c15e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -43,7 +43,7 @@ extern void vmem_map_init(void); * The S390 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 674934b40170..ccf38f06c57d 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -272,8 +272,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_tlb(vma, address, pte); __update_cache(vma, address, pte); } diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba3046e4f06f..1ff93ac1aa44 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -165,8 +165,9 @@ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_cache(vma, address, pte); __update_tlb(vma, address, pte); } diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0ad..1677b5ee191d 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -371,7 +371,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif - update_mmu_cache(NULL, address, entry); + update_mmu_cache(NULL, address, pte); return 0; } diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index e0cabe790ec1..77f906d8cc21 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -330,9 +330,9 @@ BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) #define FAULT_CODE_WRITE 0x2 #define FAULT_CODE_USER 0x4 -BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t) +BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *) -#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte) +#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep) BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long, unsigned long, unsigned int) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index f3cb790fa2ae..f5b5fa76c02d 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -706,7 +706,7 @@ extern unsigned long find_ecache_flush_span(unsigned long size); #define mmu_unlockarea(vaddr, len) do { } while(0) struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b99f81c4906f..43e20efb2511 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -370,7 +370,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { extern void sun4c_update_mmu_cache(struct vm_area_struct *, - unsigned long,pte_t); + unsigned long,pte_t *); extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; @@ -447,7 +447,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, * on the CPU and doing a shrink_mmap() on this vma. */ sun4c_update_mmu_cache (find_vma(current->mm, address), address, - *ptep); + ptep); else do_sparc_fault(regs, text_fault, write, address); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1886d37d411b..9245a822a2f1 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -289,12 +289,13 @@ static void flush_dcache(unsigned long pfn) } } -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; struct tsb *tsb; unsigned long tag, flags; unsigned long tsb_index, tsb_hash_shift; + pte_t pte = *ptep; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c index 196263f895b7..4e62c27147c4 100644 --- a/arch/sparc/mm/nosun4c.c +++ b/arch/sparc/mm/nosun4c.c @@ -62,7 +62,7 @@ pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address) return NULL; } -void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 367321a030dd..df49b200ca4c 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -694,7 +694,7 @@ extern void tsunami_setup_blockops(void); * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */ -static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) +static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) { #if 0 static unsigned long last; @@ -703,10 +703,10 @@ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad if (address == last) { val = srmmu_hwprobe(address); - if (val != 0 && pte_val(pte) != val) { + if (val != 0 && pte_val(*ptep) != val) { printk("swift_update_mmu_cache: " "addr %lx put %08x probed %08x from %p\n", - address, pte_val(pte), val, + address, pte_val(*ptep), val, __builtin_return_address(0)); srmmu_flush_whole_tlb(); } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index a89baf0d875a..18652534b91a 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1887,7 +1887,7 @@ static void sun4c_check_pgt_cache(int low, int high) /* An experiment, turn off by default for now... -DaveM */ #define SUN4C_PRELOAD_PSEG -void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { unsigned long flags; int pseg; @@ -1929,7 +1929,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p start += PAGE_SIZE; } #ifndef SUN4C_PRELOAD_PSEG - sun4c_put_pte(address, pte_val(pte)); + sun4c_put_pte(address, pte_val(*ptep)); #endif local_irq_restore(flags); return; @@ -1940,7 +1940,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p add_lru(entry); } - sun4c_put_pte(address, pte_val(pte)); + sun4c_put_pte(address, pte_val(*ptep)); local_irq_restore(flags); } diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 9ce3f165111a..a9f7251b4a8d 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -345,7 +345,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); -#define update_mmu_cache(vma,address,pte) do ; while (0) +#define update_mmu_cache(vma,address,ptep) do ; while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 4) & 0x3f) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd9461d323..a28668396508 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -80,7 +80,7 @@ do { \ * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c57a30117149..181be528c612 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -129,7 +129,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define pte_unmap(pte) /* NOP */ #define pte_unmap_nested(pte) /* NOP */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index a138770c358e..76bf35554117 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -394,7 +394,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #define kern_addr_valid(addr) (1) extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); /* * remap a physical page `pfn' of size `size' with page protection `prot' diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 3ba990c67676..85df4655d326 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -147,9 +147,9 @@ void flush_cache_page(struct vm_area_struct* vma, unsigned long address, #endif void -update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) +update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct page *page; if (!pfn_valid(pfn)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e91b81b63670..94cd94df56e3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2088,7 +2088,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, ptep); } } @@ -2559,7 +2559,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, address, ptep, entry, flags & FAULT_FLAG_WRITE)) - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, ptep); out_page_table_lock: spin_unlock(&mm->page_table_lock); diff --git a/mm/memory.c b/mm/memory.c index 09e4b1be7b67..72fb5f39bccc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, /* Ok, finally just insert the thing.. */ entry = pte_mkspecial(pfn_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); - update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ + update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ retval = 0; out_unlock: @@ -2116,7 +2116,7 @@ reuse: entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, address, page_table, entry,1)) - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); ret |= VM_FAULT_WRITE; goto unlock; } @@ -2185,7 +2185,7 @@ gotten: * new page to be mapped directly into the secondary page table. */ set_pte_at_notify(mm, address, page_table, entry); - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); if (old_page) { /* * Only after switching the pte to the new page may @@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, pte); + update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); out: @@ -2694,7 +2694,7 @@ setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; @@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, address, page_table, entry); /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); } else { if (charged) mem_cgroup_uncharge_page(page); @@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code diff --git a/mm/migrate.c b/mm/migrate.c index efddbf0926b2..e58e5da25b91 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -134,7 +134,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, page_add_file_rmap(new); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, addr, pte); + update_mmu_cache(vma, addr, ptep); unlock: pte_unmap_unlock(ptep, ptl); out: |