diff options
-rw-r--r-- | arch/x86/mm/tlb.c | 37 |
1 files changed, 32 insertions, 5 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index bf12371db6c4..07b6701a540a 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -788,11 +788,13 @@ done: nr_invalidate); } -static bool tlb_is_not_lazy(int cpu, void *data) +static bool tlb_is_not_lazy(int cpu) { return !per_cpu(cpu_tlbstate.is_lazy, cpu); } +static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask); + STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info) { @@ -813,12 +815,37 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask, * up on the new contents of what used to be page tables, while * doing a speculative memory access. */ - if (info->freed_tables) + if (info->freed_tables) { smp_call_function_many(cpumask, flush_tlb_func, (void *)info, 1); - else - on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func, - (void *)info, 1, cpumask); + } else { + /* + * Although we could have used on_each_cpu_cond_mask(), + * open-coding it has performance advantages, as it eliminates + * the need for indirect calls or retpolines. In addition, it + * allows to use a designated cpumask for evaluating the + * condition, instead of allocating one. + * + * This code works under the assumption that there are no nested + * TLB flushes, an assumption that is already made in + * flush_tlb_mm_range(). + * + * cond_cpumask is logically a stack-local variable, but it is + * more efficient to have it off the stack and not to allocate + * it on demand. Preemption is disabled and this code is + * non-reentrant. + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + smp_call_function_many(cond_cpumask, flush_tlb_func, (void *)info, 1); + } } void flush_tlb_others(const struct cpumask *cpumask, |