diff options
author | Nitin Gupta <nitin.m.gupta@oracle.com> | 2016-03-30 21:17:13 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-05-21 04:44:27 +0300 |
commit | 24e49ee3d76b70853a96520e46b8837e5eae65b2 (patch) | |
tree | 4893dd3011dc7a4db11082ce6953d1e1d2c13803 /arch/sparc/mm/init_64.c | |
parent | b1ac6b7b4061f6c92bacf6938f94fb61b2fbf7f3 (diff) | |
download | linux-24e49ee3d76b70853a96520e46b8837e5eae65b2.tar.xz |
sparc64: Reduce TLB flushes during hugepte changes
During hugepage map/unmap, TSB and TLB flushes are currently
issued at every PAGE_SIZE'd boundary which is unnecessary.
We now issue the flush at REAL_HPAGE_SIZE boundaries only.
Without this patch workloads which unmap a large hugepage
backed VMA region get CPU lockups due to excessive TLB
flush calls.
Orabug: 22365539, 22643230, 22995196
Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/init_64.c')
-rw-r--r-- | arch/sparc/mm/init_64.c | 12 |
1 files changed, 0 insertions, 12 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 09e838801e39..652683cb4b4b 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde tsb_insert(tsb, tag, tte); } -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline bool is_hugetlb_pte(pte_t pte) -{ - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) - return true; - return false; -} -#endif - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; |