summaryrefslogtreecommitdiff
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorNitin Gupta <nitin.m.gupta@oracle.com>2017-02-24 14:03:16 +0300
committerDavid S. Miller <davem@davemloft.net>2017-02-24 19:26:21 +0300
commitac65e2828d03ddf84e9fe1fb6d110d8de933dc22 (patch)
tree3e593687634e118c46cbd1beb684277b79201faf /arch/sparc/mm
parentcd429ce2d095041d249ec85feaed608bbf72154f (diff)
downloadlinux-ac65e2828d03ddf84e9fe1fb6d110d8de933dc22.tar.xz
sparc64: Fix build error in flush_tsb_user_page
Patch "sparc64: Add 64K page size support" unconditionally used __flush_huge_tsb_one_entry() which is available only when hugetlb support is enabled. Another issue was incorrect TSB flushing for 64K pages in flush_tsb_user(). Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/hugetlbpage.c5
-rw-r--r--arch/sparc/mm/tsb.c20
2 files changed, 19 insertions, 6 deletions
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 605bfceb7d54..e98a3f2e8f0f 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -309,7 +309,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
addr &= ~(size - 1);
orig = *ptep;
- orig_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig);
+ orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
for (i = 0; i < nptes; i++)
ptep[i] = __pte(pte_val(entry) + (i << shift));
@@ -335,7 +335,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
else
nptes = size >> PAGE_SHIFT;
- hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry);
+ hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
+ huge_tte_to_shift(entry);
if (pte_present(entry))
mm->context.hugetlb_pte_count -= nptes;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index e39fc57ad850..23479c3d39f0 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -120,12 +120,18 @@ void flush_tsb_user(struct tlb_batch *tb)
spin_lock_irqsave(&mm->context.lock, flags);
- if (tb->hugepage_shift == PAGE_SHIFT) {
+ if (tb->hugepage_shift < HPAGE_SHIFT) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+ if (tb->hugepage_shift == PAGE_SHIFT)
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+ else
+ __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
+ tb->hugepage_shift);
+#endif
}
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -152,8 +158,14 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries,
- hugepage_shift);
+ if (hugepage_shift == PAGE_SHIFT)
+ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+ nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+ else
+ __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+ nentries, hugepage_shift);
+#endif
}
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {