diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-25 04:12:14 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 19:39:20 +0400 |
commit | 9547d01bfb9c351dc19067f8a4cea9d3955f4125 (patch) | |
tree | 3c32521dbbf380471e1eef3e11ae656b24164255 /include/asm-generic | |
parent | 88c22088bf235f50b09a10bd9f022b0472bcb6b5 (diff) | |
download | linux-9547d01bfb9c351dc19067f8a4cea9d3955f4125.tar.xz |
mm: uninline large generic tlb.h functions
Some of these functions have grown beyond inline sanity, move them
out-of-line.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Requested-by: Andrew Morton <akpm@linux-foundation.org>
Requested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/tlb.h | 135 |
1 files changed, 13 insertions, 122 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 5a946a08ff9d..e58fa777fa09 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -96,134 +96,25 @@ struct mmu_gather { struct page *__pages[MMU_GATHER_BUNDLE]; }; -/* - * For UP we don't need to worry about TLB flush - * and page free order so much.. - */ -#ifdef CONFIG_SMP - #define tlb_fast_mode(tlb) (tlb->fast_mode) -#else - #define tlb_fast_mode(tlb) 1 -#endif +#define HAVE_GENERIC_MMU_GATHER -static inline int tlb_next_batch(struct mmu_gather *tlb) +static inline int tlb_fast_mode(struct mmu_gather *tlb) { - struct mmu_gather_batch *batch; - - batch = tlb->active; - if (batch->next) { - tlb->active = batch->next; - return 1; - } - - batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); - if (!batch) - return 0; - - batch->next = NULL; - batch->nr = 0; - batch->max = MAX_GATHER_BATCH; - - tlb->active->next = batch; - tlb->active = batch; - +#ifdef CONFIG_SMP + return tlb->fast_mode; +#else + /* + * For UP we don't need to worry about TLB flush + * and page free order so much.. + */ return 1; -} - -/* tlb_gather_mmu - * Called to initialize an (on-stack) mmu_gather structure for page-table - * tear-down from @mm. The @fullmm argument is used when @mm is without - * users and we're going to destroy the full address space (exit/execve). - */ -static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) -{ - tlb->mm = mm; - - tlb->fullmm = fullmm; - tlb->need_flush = 0; - tlb->fast_mode = (num_possible_cpus() == 1); - tlb->local.next = NULL; - tlb->local.nr = 0; - tlb->local.max = ARRAY_SIZE(tlb->__pages); - tlb->active = &tlb->local; - -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb->batch = NULL; #endif } -static inline void -tlb_flush_mmu(struct mmu_gather *tlb) -{ - struct mmu_gather_batch *batch; - - if (!tlb->need_flush) - return; - tlb->need_flush = 0; - tlb_flush(tlb); -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb_table_flush(tlb); -#endif - - if (tlb_fast_mode(tlb)) - return; - - for (batch = &tlb->local; batch; batch = batch->next) { - free_pages_and_swap_cache(batch->pages, batch->nr); - batch->nr = 0; - } - tlb->active = &tlb->local; -} - -/* tlb_finish_mmu - * Called at the end of the shootdown operation to free up any resources - * that were required. - */ -static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) -{ - struct mmu_gather_batch *batch, *next; - - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - for (batch = tlb->local.next; batch; batch = next) { - next = batch->next; - free_pages((unsigned long)batch, 0); - } - tlb->local.next = NULL; -} - -/* __tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while - * handling the additional races in SMP caused by other CPUs caching valid - * mappings in their TLBs. Returns the number of free page slots left. - * When out of page slots we must call tlb_flush_mmu(). - */ -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - struct mmu_gather_batch *batch; - - tlb->need_flush = 1; - - if (tlb_fast_mode(tlb)) { - free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu() */ - } - - batch = tlb->active; - batch->pages[batch->nr++] = page; - VM_BUG_ON(batch->nr > batch->max); - if (batch->nr == batch->max) { - if (!tlb_next_batch(tlb)) - return 0; - } - - return batch->max - batch->nr; -} +void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); +void tlb_flush_mmu(struct mmu_gather *tlb); +void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); +int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |