diff options
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r-- | arch/arm/include/asm/arch_timer.h | 25 | ||||
-rw-r--r-- | arch/arm/include/asm/atomic.h | 307 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 16 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/irq_work.h | 11 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_mmu.h | 31 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-2level.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-3level.h | 15 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 38 |
10 files changed, 219 insertions, 235 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 0704e0cf5571..92793ba69c40 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -99,31 +99,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); } -static inline void arch_counter_set_user_access(void) -{ - u32 cntkctl = arch_timer_get_cntkctl(); - - /* Disable user access to both physical/virtual counters/timers */ - /* Also disable virtual event stream */ - cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN - | ARCH_TIMER_USR_VT_ACCESS_EN - | ARCH_TIMER_VIRT_EVT_EN - | ARCH_TIMER_USR_VCT_ACCESS_EN - | ARCH_TIMER_USR_PCT_ACCESS_EN); - arch_timer_set_cntkctl(cntkctl); -} - -static inline void arch_timer_evtstrm_enable(int divider) -{ - u32 cntkctl = arch_timer_get_cntkctl(); - cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; - /* Set the divider and enable virtual event stream */ - cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) - | ARCH_TIMER_VIRT_EVT_EN; - arch_timer_set_cntkctl(cntkctl); - elf_hwcap |= HWCAP_EVTSTRM; -} - #endif #endif diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 3040359094d9..e22c11970b7b 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -27,7 +27,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -37,84 +37,47 @@ * store exclusive to ensure that these are atomic. We may loop * to ensure that the update happens. */ -static inline void atomic_add(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%3]\n" -" add %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); - - return result; -} - -static inline void atomic_sub(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -} - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long tmp; - int result; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%3]\n" -" sub %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); - - smp_mb(); - - return result; +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + prefetchw(&v->counter); \ + __asm__ __volatile__("@ atomic_" #op "\n" \ +"1: ldrex %0, [%3]\n" \ +" " #asm_op " %0, %0, %4\n" \ +" strex %1, %0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "Ir" (i) \ + : "cc"); \ +} \ + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + smp_mb(); \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic_" #op "_return\n" \ +"1: ldrex %0, [%3]\n" \ +" " #asm_op " %0, %0, %4\n" \ +" strex %1, %0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "Ir" (i) \ + : "cc"); \ + \ + smp_mb(); \ + \ + return result; \ } static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) @@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #error SMP not supported on pre-ARMv6 CPUs #endif -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long flags; - int val; - - raw_local_irq_save(flags); - val = v->counter; - v->counter = val += i; - raw_local_irq_restore(flags); - - return val; -} -#define atomic_add(i, v) (void) atomic_add_return(i, v) - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long flags; - int val; - - raw_local_irq_save(flags); - val = v->counter; - v->counter = val -= i; - raw_local_irq_restore(flags); - - return val; +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ +} \ + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + unsigned long flags; \ + int val; \ + \ + raw_local_irq_save(flags); \ + v->counter c_op i; \ + val = v->counter; \ + raw_local_irq_restore(flags); \ + \ + return val; \ } -#define atomic_sub(i, v) (void) atomic_sub_return(i, v) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { @@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #endif /* __LINUX_ARM_ARCH__ */ +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) + +ATOMIC_OPS(add, +=, add) +ATOMIC_OPS(sub, -=, sub) + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_inc(v) atomic_add(1, v) @@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i) } #endif -static inline void atomic64_add(long long i, atomic64_t *v) -{ - long long result; - unsigned long tmp; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic64_add\n" -"1: ldrexd %0, %H0, [%3]\n" -" adds %Q0, %Q0, %Q4\n" -" adc %R0, %R0, %R4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); -} - -static inline long long atomic64_add_return(long long i, atomic64_t *v) -{ - long long result; - unsigned long tmp; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__("@ atomic64_add_return\n" -"1: ldrexd %0, %H0, [%3]\n" -" adds %Q0, %Q0, %Q4\n" -" adc %R0, %R0, %R4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); - - smp_mb(); - - return result; -} - -static inline void atomic64_sub(long long i, atomic64_t *v) -{ - long long result; - unsigned long tmp; - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic64_sub\n" -"1: ldrexd %0, %H0, [%3]\n" -" subs %Q0, %Q0, %Q4\n" -" sbc %R0, %R0, %R4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); +#define ATOMIC64_OP(op, op1, op2) \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + long long result; \ + unsigned long tmp; \ + \ + prefetchw(&v->counter); \ + __asm__ __volatile__("@ atomic64_" #op "\n" \ +"1: ldrexd %0, %H0, [%3]\n" \ +" " #op1 " %Q0, %Q0, %Q4\n" \ +" " #op2 " %R0, %R0, %R4\n" \ +" strexd %1, %0, %H0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "r" (i) \ + : "cc"); \ +} \ + +#define ATOMIC64_OP_RETURN(op, op1, op2) \ +static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ +{ \ + long long result; \ + unsigned long tmp; \ + \ + smp_mb(); \ + prefetchw(&v->counter); \ + \ + __asm__ __volatile__("@ atomic64_" #op "_return\n" \ +"1: ldrexd %0, %H0, [%3]\n" \ +" " #op1 " %Q0, %Q0, %Q4\n" \ +" " #op2 " %R0, %R0, %R4\n" \ +" strexd %1, %0, %H0, [%3]\n" \ +" teq %1, #0\n" \ +" bne 1b" \ + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \ + : "r" (&v->counter), "r" (i) \ + : "cc"); \ + \ + smp_mb(); \ + \ + return result; \ } -static inline long long atomic64_sub_return(long long i, atomic64_t *v) -{ - long long result; - unsigned long tmp; - - smp_mb(); - prefetchw(&v->counter); - - __asm__ __volatile__("@ atomic64_sub_return\n" -"1: ldrexd %0, %H0, [%3]\n" -" subs %Q0, %Q0, %Q4\n" -" sbc %R0, %R0, %R4\n" -" strexd %1, %0, %H0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); +#define ATOMIC64_OPS(op, op1, op2) \ + ATOMIC64_OP(op, op1, op2) \ + ATOMIC64_OP_RETURN(op, op1, op2) - smp_mb(); +ATOMIC64_OPS(add, adds, adc) +ATOMIC64_OPS(sub, subs, sbc) - return result; -} +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, long long new) diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c45b61a4b4a5..85738b200023 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -265,22 +265,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs); -static inline void *dma_alloc_writecombine(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); -} - -static inline void dma_free_writecombine(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); -} - /* * This can be called during early boot to increase the size of the atomic * coherent DMA pool above the default value of 256KiB. It must be called diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3d23418cbddd..180567408ee8 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -178,6 +178,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr) /* PCI fixed i/o mapping */ #define PCI_IO_VIRT_BASE 0xfee00000 +#define PCI_IOBASE ((void __iomem *)PCI_IO_VIRT_BASE) #if defined(CONFIG_PCI) void pci_ioremap_set_mem_type(int mem_type); diff --git a/arch/arm/include/asm/irq_work.h b/arch/arm/include/asm/irq_work.h new file mode 100644 index 000000000000..712d03e5973a --- /dev/null +++ b/arch/arm/include/asm/irq_work.h @@ -0,0 +1,11 @@ +#ifndef __ASM_ARM_IRQ_WORK_H +#define __ASM_ARM_IRQ_WORK_H + +#include <asm/smp_plat.h> + +static inline bool arch_irq_work_has_interrupt(void) +{ + return is_smp(); +} + +#endif /* _ASM_ARM_IRQ_WORK_H */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 3f688b458143..acb0d5712716 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -37,6 +37,11 @@ */ #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. + */ +#define KVM_MMU_CACHE_MIN_PAGES 2 + #ifndef __ASSEMBLY__ #include <asm/cacheflush.h> @@ -50,7 +55,7 @@ void free_hyp_pgds(void); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size); + phys_addr_t pa, unsigned long size, bool writable); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); @@ -83,6 +88,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd) clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); } +static inline void kvm_clean_pmd(pmd_t *pmd) +{ + clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); +} + static inline void kvm_clean_pmd_entry(pmd_t *pmd) { clean_pmd_entry(pmd); @@ -123,10 +133,23 @@ static inline bool kvm_page_empty(void *ptr) } -#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) -#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) -#define kvm_pud_table_empty(pudp) (0) +#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) +#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) +#define kvm_pud_table_empty(kvm, pudp) (0) + +#define KVM_PREALLOC_LEVEL 0 +static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) +{ + return 0; +} + +static inline void kvm_free_hwpgd(struct kvm *kvm) { } + +static inline void *kvm_get_hwpgd(struct kvm *kvm) +{ + return kvm->arch.pgd; +} struct kvm; diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 219ac88a9542..f0279411847d 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -182,6 +182,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_addr_end(addr,end) (end) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_special(pte) (0) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * We don't have huge page support for short descriptors, for the moment diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 06e0bc0f8b00..a31ecdad4b59 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -213,10 +213,19 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) +#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= L_PTE_SPECIAL; + return pte; +} +#define __HAVE_ARCH_PTE_SPECIAL #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) +#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) +#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) @@ -224,6 +233,12 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) #define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING)) + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif #endif #define PMD_BIT_FUNC(fn,op) \ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 01baef07cd0c..3b30062975b2 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -100,7 +100,7 @@ extern pgprot_t pgprot_s2_device; #define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) #define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR) +#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) @@ -226,7 +226,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) -#define pte_special(pte) (0) #define pte_valid_user(pte) \ (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) @@ -245,7 +244,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, unsigned long ext = 0; if (addr < TASK_SIZE && pte_valid_user(pteval)) { - __sync_icache_dcache(pteval); + if (!pte_special(pteval)) + __sync_icache_dcache(pteval); ext |= PTE_EXT_NG; } @@ -264,8 +264,6 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); PTE_BIT_FUNC(mknexec, |= L_PTE_XN); -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f1a0dace3efe..3cadb726ec88 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -35,12 +35,39 @@ #define MMU_GATHER_BUNDLE 8 +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} + +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * TLB handling. This allows us to remove pages from the page * tables, and efficiently handle the TLB issues. */ struct mmu_gather { struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; + unsigned int need_flush; +#endif unsigned int fullmm; struct vm_area_struct *vma; unsigned long start, end; @@ -101,6 +128,9 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { tlb_flush(tlb); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif } static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) @@ -129,6 +159,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start tlb->pages = tlb->local; tlb->nr = 0; __tlb_alloc_page(tlb); + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif } static inline void @@ -205,7 +239,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb_add_flush(tlb, addr + SZ_1M); #endif - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, @@ -213,7 +247,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, { #ifdef CONFIG_ARM_LPAE tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); #endif } |