From e7b52ffd45a6d834473f43b349e7d86593d763c7 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Thu, 28 Jun 2012 09:02:17 +0800 Subject: x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range x86 has no flush_tlb_range support in instruction level. Currently the flush_tlb_range just implemented by flushing all page table. That is not the best solution for all scenarios. In fact, if we just use 'invlpg' to flush few lines from TLB, we can get the performance gain from later remain TLB lines accessing. But the 'invlpg' instruction costs much of time. Its execution time can compete with cr3 rewriting, and even a bit more on SNB CPU. So, on a 512 4KB TLB entries CPU, the balance points is at: (512 - X) * 100ns(assumed TLB refill cost) = X(TLB flush entries) * 100ns(assumed invlpg cost) Here, X is 256, that is 1/2 of 512 entries. But with the mysterious CPU pre-fetcher and page miss handler Unit, the assumed TLB refill cost is far lower then 100ns in sequential access. And 2 HT siblings in one core makes the memory access more faster if they are accessing the same memory. So, in the patch, I just do the change when the target entries is less than 1/16 of whole active tlb entries. Actually, I have no data support for the percentage '1/16', so any suggestions are welcomed. As to hugetlb, guess due to smaller page table, and smaller active TLB entries, I didn't see benefit via my benchmark, so no optimizing now. My micro benchmark show in ideal scenarios, the performance improves 70 percent in reading. And in worst scenario, the reading/writing performance is similar with unpatched 3.4-rc4 kernel. Here is the reading data on my 2P * 4cores *HT NHM EP machine, with THP 'always': multi thread testing, '-t' paramter is thread number: with patch unpatched 3.4-rc4 ./mprotect -t 1 14ns 24ns ./mprotect -t 2 13ns 22ns ./mprotect -t 4 12ns 19ns ./mprotect -t 8 14ns 16ns ./mprotect -t 16 28ns 26ns ./mprotect -t 32 54ns 51ns ./mprotect -t 128 200ns 199ns Single process with sequencial flushing and memory accessing: with patch unpatched 3.4-rc4 ./mprotect 7ns 11ns ./mprotect -p 4096 -l 8 -n 10240 21ns 21ns [ hpa: http://lkml.kernel.org/r/1B4B44D9196EFF41AE41FDA404FC0A100BFF94@SHSMSX101.ccr.corp.intel.com has additional performance numbers. ] Signed-off-by: Alex Shi Link: http://lkml.kernel.org/r/1340845344-27557-3-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/tlbflush.h | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'arch/x86/include/asm/tlbflush.h') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 36a1a2ab87d2..33608d96d68b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -73,14 +73,10 @@ static inline void __flush_tlb_one(unsigned long addr) * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus + * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. - * - * x86-64 can only flush individual pages or full VMs. For a range flush - * we always do the full VM. Might be worth trying if for a small - * range a few INVLPGs in a row are a win. */ #ifndef CONFIG_SMP @@ -111,7 +107,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, - unsigned long va) + unsigned long start, + unsigned long end) { } @@ -129,17 +126,14 @@ extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); extern void flush_tlb_mm(struct mm_struct *); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); +extern void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); #define flush_tlb() flush_tlb_current_task() -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - flush_tlb_mm(vma->vm_mm); -} - void native_flush_tlb_others(const struct cpumask *cpumask, - struct mm_struct *mm, unsigned long va); + struct mm_struct *mm, + unsigned long start, unsigned long end); #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 @@ -159,7 +153,8 @@ static inline void reset_lazy_tlbstate(void) #endif /* SMP */ #ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va) +#define flush_tlb_others(mask, mm, start, end) \ + native_flush_tlb_others(mask, mm, start, end) #endif static inline void flush_tlb_kernel_range(unsigned long start, -- cgit v1.2.3 From 611ae8e3f5204f7480b3b405993b3352cfa16662 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Thu, 28 Jun 2012 09:02:22 +0800 Subject: x86/tlb: enable tlb flush range support for x86 Not every tlb_flush execution moment is really need to evacuate all TLB entries, like in munmap, just few 'invlpg' is better for whole process performance, since it leaves most of TLB entries for later accessing. This patch also rewrite flush_tlb_range for 2 purposes: 1, split it out to get flush_blt_mm_range function. 2, clean up to reduce line breaking, thanks for Borislav's input. My micro benchmark 'mummap' http://lkml.org/lkml/2012/5/17/59 show that the random memory access on other CPU has 0~50% speed up on a 2P * 4cores * HT NHM EP while do 'munmap'. Thanks Yongjie's testing on this patch: ------------- I used Linux 3.4-RC6 w/ and w/o his patches as Xen dom0 and guest kernel. After running two benchmarks in Xen HVM guest, I found his patches brought about 1%~3% performance gain in 'kernel build' and 'netperf' testing, though the performance gain was not very stable in 'kernel build' testing. Some detailed testing results are below. Testing Environment: Hardware: Romley-EP platform Xen version: latest upstream Linux kernel: 3.4-RC6 Guest vCPU number: 8 NIC: Intel 82599 (10GB bandwidth) In 'kernel build' testing in guest: Command line | performance gain make -j 4 | 3.81% make -j 8 | 0.37% make -j 16 | -0.52% In 'netperf' testing, we tested TCP_STREAM with default socket size 16384 byte as large packet and 64 byte as small packet. I used several clients to add networking pressure, then 'netperf' server automatically generated several threads to response them. I also used large-size packet and small-size packet in the testing. Packet size | Thread number | performance gain 16384 bytes | 4 | 0.02% 16384 bytes | 8 | 2.21% 16384 bytes | 16 | 2.04% 64 bytes | 4 | 1.07% 64 bytes | 8 | 3.31% 64 bytes | 16 | 0.71% Signed-off-by: Alex Shi Link: http://lkml.kernel.org/r/1340845344-27557-8-git-send-email-alex.shi@intel.com Tested-by: Ren, Yongjie Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/tlb.h | 9 +++- arch/x86/include/asm/tlbflush.h | 17 ++++-- arch/x86/mm/tlb.c | 112 +++++++++++++++++----------------------- 3 files changed, 68 insertions(+), 70 deletions(-) (limited to 'arch/x86/include/asm/tlbflush.h') diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 829215fef9ee..4fef20773b8f 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -4,7 +4,14 @@ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#define tlb_flush(tlb) \ +{ \ + if (tlb->fullmm == 0) \ + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ + else \ + flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ +} #include diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 33608d96d68b..621b959e1dbf 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, __flush_tlb(); } +static inline void flush_tlb_mm_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, unsigned long vmflag) +{ + if (vma->vm_mm == current->active_mm) + __flush_tlb(); +} + static inline void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, @@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void) #define local_flush_tlb() __flush_tlb() +#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) + +#define flush_tlb_range(vma, start, end) \ + flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) + extern void flush_tlb_all(void); extern void flush_tlb_current_task(void); -extern void flush_tlb_mm(struct mm_struct *); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); -extern void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned long vmflag); #define flush_tlb() flush_tlb_current_task() diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5911f61e300e..481737def84a 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -301,23 +301,10 @@ void flush_tlb_current_task(void) preempt_enable(); } -void flush_tlb_mm(struct mm_struct *mm) -{ - preempt_disable(); - - if (current->active_mm == mm) { - if (current->mm) - local_flush_tlb(); - else - leave_mm(smp_processor_id()); - } - if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); - - preempt_enable(); -} - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * It can find out the THP large page, or + * HUGETLB page in tlb_flush when THP disabled + */ static inline unsigned long has_large_page(struct mm_struct *mm, unsigned long start, unsigned long end) { @@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm, } return 0; } -#else -static inline unsigned long has_large_page(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - return 0; -} -#endif -void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - struct mm_struct *mm; - if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) { -flush_all: - flush_tlb_mm(vma->vm_mm); - return; - } +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned long vmflag) +{ + unsigned long addr; + unsigned act_entries, tlb_entries = 0; preempt_disable(); - mm = vma->vm_mm; - if (current->active_mm == mm) { - if (current->mm) { - unsigned long addr, vmflag = vma->vm_flags; - unsigned act_entries, tlb_entries = 0; + if (current->active_mm != mm) + goto flush_all; - if (vmflag & VM_EXEC) - tlb_entries = tlb_lli_4k[ENTRIES]; - else - tlb_entries = tlb_lld_4k[ENTRIES]; - - act_entries = tlb_entries > mm->total_vm ? - mm->total_vm : tlb_entries; + if (!current->mm) { + leave_mm(smp_processor_id()); + goto flush_all; + } - if ((end - start) >> PAGE_SHIFT > - act_entries >> tlb_flushall_shift) - local_flush_tlb(); - else { - if (has_large_page(mm, start, end)) { - preempt_enable(); - goto flush_all; - } - for (addr = start; addr < end; - addr += PAGE_SIZE) - __flush_tlb_single(addr); + if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 + || vmflag == VM_HUGETLB) { + local_flush_tlb(); + goto flush_all; + } - if (cpumask_any_but(mm_cpumask(mm), - smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, - start, end); - preempt_enable(); - return; - } - } else { - leave_mm(smp_processor_id()); + /* In modern CPU, last level tlb used for both data/ins */ + if (vmflag & VM_EXEC) + tlb_entries = tlb_lli_4k[ENTRIES]; + else + tlb_entries = tlb_lld_4k[ENTRIES]; + /* Assume all of TLB entries was occupied by this task */ + act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; + + /* tlb_flushall_shift is on balance point, details in commit log */ + if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) + local_flush_tlb(); + else { + if (has_large_page(mm, start, end)) { + local_flush_tlb(); + goto flush_all; } + /* flush range by one by one 'invlpg' */ + for (addr = start; addr < end; addr += PAGE_SIZE) + __flush_tlb_single(addr); + + if (cpumask_any_but(mm_cpumask(mm), + smp_processor_id()) < nr_cpu_ids) + flush_tlb_others(mm_cpumask(mm), mm, start, end); + preempt_enable(); + return; } + +flush_all: if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } - void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; -- cgit v1.2.3 From effee4b9b3b0aa5770bcd98de5f672b05b27703c Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Thu, 28 Jun 2012 09:02:24 +0800 Subject: x86/tlb: do flush_tlb_kernel_range by 'invlpg' This patch do flush_tlb_kernel_range by 'invlpg'. The performance pay and gain was analyzed in previous patch (x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range). In the testing: http://lkml.org/lkml/2012/6/21/10 The pay is mostly covered by long kernel path, but the gain is still quite clear, memory access in user APP can increase 30+% when kernel execute this funtion. Signed-off-by: Alex Shi Link: http://lkml.kernel.org/r/1340845344-27557-10-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/tlbflush.h | 13 +++++++------ arch/x86/mm/tlb.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 6 deletions(-) (limited to 'arch/x86/include/asm/tlbflush.h') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 621b959e1dbf..b5a27bd77669 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -123,6 +123,12 @@ static inline void reset_lazy_tlbstate(void) { } +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} + #else /* SMP */ #include @@ -139,6 +145,7 @@ extern void flush_tlb_current_task(void); extern void flush_tlb_page(struct vm_area_struct *, unsigned long); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #define flush_tlb() flush_tlb_current_task() @@ -168,10 +175,4 @@ static inline void reset_lazy_tlbstate(void) native_flush_tlb_others(mask, mm, start, end) #endif -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} - #endif /* _ASM_X86_TLBFLUSH_H */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 2b5f506a7655..613cd83e8c0c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -264,6 +264,36 @@ void flush_tlb_all(void) on_each_cpu(do_flush_tlb_all, NULL, 1); } +static void do_kernel_range_flush(void *info) +{ + struct flush_tlb_info *f = info; + unsigned long addr; + + /* flush range by one by one 'invlpg' */ + for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) + __flush_tlb_single(addr); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned act_entries; + struct flush_tlb_info info; + + /* In modern CPU, last level tlb used for both data/ins */ + act_entries = tlb_lld_4k[ENTRIES]; + + /* Balance as user space task's flush, a bit conservative */ + if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || + (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) + + on_each_cpu(do_flush_tlb_all, NULL, 1); + else { + info.flush_start = start; + info.flush_end = end; + on_each_cpu(do_kernel_range_flush, &info, 1); + } +} + #ifdef CONFIG_DEBUG_TLBFLUSH static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) -- cgit v1.2.3 From 7efa1c87963d23cc57ba40c07316d3e28cc75a3a Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Fri, 20 Jul 2012 09:18:23 +0800 Subject: x86/tlb: Fix build warning and crash when building for !SMP The incompatible parameter of flush_tlb_mm_range cause build warning. Fix it by correct parameter. Ingo Molnar found that this could also cause a user space crash. Reported-by: Tetsuo Handa Reported-by: Ingo Molnar Signed-off-by: Alex Shi Link: http://lkml.kernel.org/r/1342747103-19765-1-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/tlbflush.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include/asm/tlbflush.h') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index b5a27bd77669..74a44333545a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -105,10 +105,10 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, __flush_tlb(); } -static inline void flush_tlb_mm_range(struct vm_area_struct *vma, +static inline void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { - if (vma->vm_mm == current->active_mm) + if (mm == current->active_mm) __flush_tlb(); } -- cgit v1.2.3