diff options
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/fault_32.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/fault_64.c | 12 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 170 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 13 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 14 |
6 files changed, 139 insertions, 78 deletions
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b6c559cbd64d..4714061d6cd3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -241,7 +241,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; @@ -411,7 +411,7 @@ good_area: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(mm, vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index cb841a33da59..e16fdd28a931 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -111,8 +111,8 @@ static unsigned int get_user_insn(unsigned long tpc) if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) goto out_irq_enable; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_trans_huge(*pmdp)) { +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (is_hugetlb_pmd(*pmdp)) { pa = pmd_pfn(*pmdp) << PAGE_SHIFT; pa += tpc & ~HPAGE_MASK; @@ -436,7 +436,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto exit_exception; @@ -476,14 +476,14 @@ good_area: up_read(&mm->mmap_sem); mm_rss = get_mm_rss(mm); -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) + mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE)); #endif if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) tsb_grow(mm, MM_TSB_BASE, mm_rss); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - mm_rss = mm->context.huge_pte_count; + mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { if (mm->context.tsb_block[MM_TSB_HUGE].tsb) diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ba52e6466a82..988acc8b1b80 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -12,6 +12,7 @@ #include <asm/mman.h> #include <asm/pgalloc.h> +#include <asm/pgtable.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> @@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, { pgd_t *pgd; pud_t *pud; - pmd_t *pmd; pte_t *pte = NULL; - /* We must align the address, because our caller will run - * set_huge_pte_at() on whatever we return, which writes out - * all of the sub-ptes for the hugepage range. So we have - * to give it the first such sub-pte. - */ - addr &= HPAGE_MASK; - pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) { - pmd = pmd_alloc(mm, pud, addr); - if (pmd) - pte = pte_alloc_map(mm, pmd, addr); - } + if (pud) + pte = (pte_t *)pmd_alloc(mm, pud, addr); + return pte; } @@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; - pmd_t *pmd; pte_t *pte = NULL; - addr &= HPAGE_MASK; - pgd = pgd_offset(mm, addr); if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) - pte = pte_offset_map(pmd, addr); - } + if (!pud_none(*pud)) + pte = (pte_t *)pmd_offset(pud, addr); } return pte; } @@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - int i; - pte_t orig[2]; - unsigned long nptes; + pte_t orig; if (!pte_present(*ptep) && pte_present(entry)) - mm->context.huge_pte_count++; + mm->context.hugetlb_pte_count++; addr &= HPAGE_MASK; - - nptes = 1 << HUGETLB_PAGE_ORDER; - orig[0] = *ptep; - orig[1] = *(ptep + nptes / 2); - for (i = 0; i < nptes; i++) { - *ptep = entry; - ptep++; - addr += PAGE_SIZE; - pte_val(entry) += PAGE_SIZE; - } + orig = *ptep; + *ptep = entry; /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ - addr -= REAL_HPAGE_SIZE; - ptep -= nptes / 2; - maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0); - addr -= REAL_HPAGE_SIZE; - ptep -= nptes / 2; - maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0); + maybe_tlb_batch_add(mm, addr, ptep, orig, 0); + maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t entry; - int i; - unsigned long nptes; entry = *ptep; if (pte_present(entry)) - mm->context.huge_pte_count--; + mm->context.hugetlb_pte_count--; addr &= HPAGE_MASK; - nptes = 1 << HUGETLB_PAGE_ORDER; - for (i = 0; i < nptes; i++) { - *ptep = __pte(0UL); - addr += PAGE_SIZE; - ptep++; - } + *ptep = __pte(0UL); /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ - addr -= REAL_HPAGE_SIZE; - ptep -= nptes / 2; - maybe_tlb_batch_add(mm, addr, ptep, entry, 0); - addr -= REAL_HPAGE_SIZE; - ptep -= nptes / 2; maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0); return entry; } int pmd_huge(pmd_t pmd) { - return 0; + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; } int pud_huge(pud_t pud) { return 0; } + +static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + pgtable_t token = pmd_pgtable(*pmd); + + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + atomic_long_dec(&tlb->mm->nr_ptes); +} + +static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pmd_t *pmd; + unsigned long next; + unsigned long start; + + start = addr; + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none(*pmd)) + continue; + if (is_hugetlb_pmd(*pmd)) + pmd_clear(pmd); + else + hugetlb_free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); + mm_dec_nr_pmds(tlb->mm); +} + +static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; + + start = addr; + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); + } while (pud++, addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(pgd, start); + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); +} + +void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pgd_t *pgd; + unsigned long next; + + pgd = pgd_offset(tlb->mm, addr); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); + } while (pgd++, addr = next, addr != end); +} diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 14bb0d5ed3c6..65457c9f1365 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -346,10 +346,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) + if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && + is_hugetlb_pte(pte)) { + /* We are fabricating 8MB pages using 4MB real hw pages. */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, address, pte_val(pte)); - else + } else #endif __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); @@ -2704,8 +2707,7 @@ void __flush_tlb_all(void) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | - __GFP_REPEAT | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); pte_t *pte = NULL; if (page) @@ -2717,8 +2719,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | - __GFP_REPEAT | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); if (!page) return NULL; if (!pgtable_page_ctor(page)) { diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index f81cd9736700..3659d37b4d81 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -175,9 +175,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { if (pmd_val(pmd) & _PAGE_PMD_HUGE) - mm->context.huge_pte_count++; + mm->context.thp_pte_count++; else - mm->context.huge_pte_count--; + mm->context.thp_pte_count--; /* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a0604a493a36..6725ed45580e 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -470,7 +470,7 @@ retry_tsb_alloc: int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - unsigned long huge_pte_count; + unsigned long total_huge_pte_count; #endif unsigned int i; @@ -479,12 +479,14 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.sparc64_ctx_val = 0UL; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - /* We reset it to zero because the fork() page copying + /* We reset them to zero because the fork() page copying * will re-increment the counters as the parent PTEs are * copied into the child address space. */ - huge_pte_count = mm->context.huge_pte_count; - mm->context.huge_pte_count = 0; + total_huge_pte_count = mm->context.hugetlb_pte_count + + mm->context.thp_pte_count; + mm->context.hugetlb_pte_count = 0; + mm->context.thp_pte_count = 0; #endif /* copy_mm() copies over the parent's mm_struct before calling @@ -500,8 +502,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (unlikely(huge_pte_count)) - tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); + if (unlikely(total_huge_pte_count)) + tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); #endif if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) |