diff options
author | Nadav Amit <namit@vmware.com> | 2022-07-11 02:28:37 +0300 |
---|---|---|
committer | Dave Hansen <dave.hansen@linux.intel.com> | 2022-07-19 19:04:52 +0300 |
commit | 8f1d56f64f8d6b80dea2d1978d10071132a695c5 (patch) | |
tree | c2cdb5350b9b76f53f5e9ee653d47b16ca9dbac1 /arch | |
parent | 54ee1844047c1df015ab2679a4f55564a3aa1fa1 (diff) | |
download | linux-8f1d56f64f8d6b80dea2d1978d10071132a695c5.tar.xz |
x86/mm/tlb: Ignore f->new_tlb_gen when zero
Commit aa44284960d5 ("x86/mm/tlb: Avoid reading mm_tlb_gen when
possible") introduced an optimization to skip superfluous TLB
flushes based on the generation provided in flush_tlb_info.
However, arch_tlbbatch_flush() does not provide any generation in
flush_tlb_info and populates the flush_tlb_info generation with
0. This 0 is causes the flush_tlb_info to be interpreted as a
superfluous, old flush. As a result, try_to_unmap_one() would
not perform any TLB flushes.
Fix it by checking whether f->new_tlb_gen is nonzero. Zero value
is anyhow is an invalid generation value. To avoid future
confusion, introduce TLB_GENERATION_INVALID constant and use it
properly. Add warnings to ensure no partial flushes are done with
TLB_GENERATION_INVALID or when f->mm is NULL, since this does not
make any sense.
In addition, add the missing unlikely().
[ dhansen: change VM_BUG_ON() -> VM_WARN_ON(), clarify changelog ]
Fixes: aa44284960d5 ("x86/mm/tlb: Avoid reading mm_tlb_gen when possible")
Reported-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Nadav Amit <namit@vmware.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Tested-by: Hugh Dickins <hughd@google.com>
Link: https://lkml.kernel.org/r/20220710232837.3618-1-namit@vmware.com
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 1 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 15 |
2 files changed, 13 insertions, 3 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 98fa0a114074..21bf105d54f2 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -16,6 +16,7 @@ void __flush_tlb_all(void); #define TLB_FLUSH_ALL -1UL +#define TLB_GENERATION_INVALID 0 void cr4_update_irqsoff(unsigned long set, unsigned long clear); unsigned long cr4_read_shadow(void); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index d9314cc8b81f..c1e31e9a85d7 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -771,7 +771,8 @@ static void flush_tlb_func(void *info) return; } - if (f->new_tlb_gen <= local_tlb_gen) { + if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID && + f->new_tlb_gen <= local_tlb_gen)) { /* * The TLB is already up to date in respect to f->new_tlb_gen. * While the core might be still behind mm_tlb_gen, checking @@ -843,6 +844,12 @@ static void flush_tlb_func(void *info) /* Partial flush */ unsigned long addr = f->start; + /* Partial flush cannot have invalid generations */ + VM_WARN_ON(f->new_tlb_gen == TLB_GENERATION_INVALID); + + /* Partial flush must have valid mm */ + VM_WARN_ON(f->mm == NULL); + nr_invalidate = (f->end - f->start) >> f->stride_shift; while (addr < f->end) { @@ -1045,7 +1052,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) struct flush_tlb_info *info; preempt_disable(); - info = get_flush_tlb_info(NULL, start, end, 0, false, 0); + info = get_flush_tlb_info(NULL, start, end, 0, false, + TLB_GENERATION_INVALID); on_each_cpu(do_kernel_range_flush, info, 1); @@ -1214,7 +1222,8 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) int cpu = get_cpu(); - info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0); + info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, + TLB_GENERATION_INVALID); /* * flush_tlb_multi() is not optimized for the common case in which only * a local TLB flush is needed. Optimize this use-case by calling |