summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S8
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c13
-rw-r--r--arch/powerpc/mm/book3s64/hash_tlb.c4
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c39
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c10
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c2
-rw-r--r--arch/powerpc/mm/copro_fault.c14
-rw-r--r--arch/powerpc/mm/fault.c39
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/init_64.c2
11 files changed, 61 insertions, 74 deletions
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 6925ce998557..a5a21d444e72 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -199,12 +199,12 @@ _GLOBAL(add_hash_page)
lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
10: lwarx r0,0,r6 /* take the mmu_hash_lock */
- cmpi 0,r0,0
+ cmpwi 0,r0,0
bne- 11f
stwcx. r8,0,r6
beq+ 12f
11: lwz r0,0(r6)
- cmpi 0,r0,0
+ cmpwi 0,r0,0
beq 10b
b 11b
12: isync
@@ -512,12 +512,12 @@ _GLOBAL(flush_hash_pages)
lwz r8, TASK_CPU(r8)
oris r8,r8,9
10: lwarx r0,0,r9
- cmpi 0,r0,0
+ cmpwi 0,r0,0
bne- 11f
stwcx. r8,0,r9
beq+ 12f
11: lwz r0,0(r9)
- cmpi 0,r0,0
+ cmpwi 0,r0,0
beq 10b
b 11b
12: isync
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 9342e79870df..430d1d935a7c 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
static long native_hpte_remove(unsigned long hpte_group)
{
+ unsigned long hpte_v, flags;
struct hash_pte *hptep;
int i;
int slot_offset;
- unsigned long hpte_v;
+
+ local_irq_save(flags);
DBG_LOW(" remove(group=%lx)\n", hpte_group);
@@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
slot_offset &= 0x7;
}
- if (i == HPTES_PER_GROUP)
- return -1;
+ if (i == HPTES_PER_GROUP) {
+ i = -1;
+ goto out;
+ }
/* Invalidate the hpte. NOTE: this also unlocks it */
release_hpte_lock();
hptep->v = 0;
-
+out:
+ local_irq_restore(flags);
return i;
}
diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c
index a64ea0a7ef96..21fcad97ae80 100644
--- a/arch/powerpc/mm/book3s64/hash_tlb.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -239,12 +239,16 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
start_pte = pte_offset_map(pmd, addr);
+ if (!start_pte)
+ goto out;
for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
unsigned long pteval = pte_val(*pte);
if (pteval & H_PAGE_HASHPTE)
hpte_need_flush(mm, addr, pte, pteval, 0);
addr += PAGE_SIZE;
}
+ pte_unmap(start_pte);
+out:
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);
}
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 81d7185e2ae8..d19fb1f3007d 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n,
FOLL_WRITE | FOLL_LONGTERM,
- mem->hpages + entry, NULL);
+ mem->hpages + entry);
if (ret == n) {
pinned += n;
continue;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 2297aa764ecd..e7ea492ac510 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -745,9 +745,9 @@ static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
}
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
- unsigned long end)
+ unsigned long end, bool direct)
{
- unsigned long next;
+ unsigned long next, pages = 0;
pte_t *pte;
pte = pte_start + pte_index(addr);
@@ -769,13 +769,16 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
}
pte_clear(&init_mm, addr, pte);
+ pages++;
}
+ if (direct)
+ update_page_count(mmu_virtual_psize, -pages);
}
static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
- unsigned long end)
+ unsigned long end, bool direct)
{
- unsigned long next;
+ unsigned long next, pages = 0;
pte_t *pte_base;
pmd_t *pmd;
@@ -793,19 +796,22 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
continue;
}
pte_clear(&init_mm, addr, (pte_t *)pmd);
+ pages++;
continue;
}
pte_base = (pte_t *)pmd_page_vaddr(*pmd);
- remove_pte_table(pte_base, addr, next);
+ remove_pte_table(pte_base, addr, next, direct);
free_pte_table(pte_base, pmd);
}
+ if (direct)
+ update_page_count(MMU_PAGE_2M, -pages);
}
static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
- unsigned long end)
+ unsigned long end, bool direct)
{
- unsigned long next;
+ unsigned long next, pages = 0;
pmd_t *pmd_base;
pud_t *pud;
@@ -823,16 +829,20 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
continue;
}
pte_clear(&init_mm, addr, (pte_t *)pud);
+ pages++;
continue;
}
pmd_base = pud_pgtable(*pud);
- remove_pmd_table(pmd_base, addr, next);
+ remove_pmd_table(pmd_base, addr, next, direct);
free_pmd_table(pmd_base, pud);
}
+ if (direct)
+ update_page_count(MMU_PAGE_1G, -pages);
}
-static void __meminit remove_pagetable(unsigned long start, unsigned long end)
+static void __meminit remove_pagetable(unsigned long start, unsigned long end,
+ bool direct)
{
unsigned long addr, next;
pud_t *pud_base;
@@ -861,7 +871,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
}
pud_base = p4d_pgtable(*p4d);
- remove_pud_table(pud_base, addr, next);
+ remove_pud_table(pud_base, addr, next, direct);
free_pud_table(pud_base, p4d);
}
@@ -884,7 +894,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
{
- remove_pagetable(start, end);
+ remove_pagetable(start, end, true);
return 0;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -902,7 +912,6 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long phys)
{
/* Create a PTE encoding */
- unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
int ret;
@@ -911,7 +920,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
return -1;
}
- ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
+ ret = __map_kernel_page_nid(start, phys, PAGE_KERNEL, page_size, nid);
BUG_ON(ret);
return 0;
@@ -920,7 +929,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
#ifdef CONFIG_MEMORY_HOTPLUG
void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
{
- remove_pagetable(start, start + page_size);
+ remove_pagetable(start, start + page_size, false);
}
#endif
#endif
@@ -952,7 +961,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add
assert_spin_locked(pmd_lockptr(mm, pmdp));
#endif
- old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
+ old = radix__pte_update(mm, addr, pmdp_ptep(pmdp), clr, set, 1);
trace_hugepage_update(addr, old, clr, set);
return old;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index ce804b7bf84e..0bd4866d9824 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -795,12 +795,20 @@ void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush)
goto out;
if (current->active_mm == mm) {
+ unsigned long flags;
+
WARN_ON_ONCE(current->mm != NULL);
- /* Is a kernel thread and is using mm as the lazy tlb */
+ /*
+ * It is a kernel thread and is using mm as the lazy tlb, so
+ * switch it to init_mm. This is not always called from IPI
+ * (e.g., flush_type_needed), so must disable irqs.
+ */
+ local_irq_save(flags);
mmgrab_lazy_tlb(&init_mm);
current->active_mm = &init_mm;
switch_mm_irqs_off(mm, &init_mm, current);
mmdrop_lazy_tlb(mm);
+ local_irq_restore(flags);
}
/*
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index b75a9fb99599..0dc85556dec5 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -71,6 +71,8 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
if (pmd_none(*pmd))
return;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ if (!pte)
+ return;
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
pte_update(mm, addr, pte, 0, 0, 0);
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 7c507fb48182..f49fd873df8d 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -33,19 +33,11 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (mm->pgd == NULL)
return -EFAULT;
- mmap_read_lock(mm);
- ret = -EFAULT;
- vma = find_vma(mm, ea);
+ vma = lock_mm_and_find_vma(mm, ea, NULL);
if (!vma)
- goto out_unlock;
-
- if (ea < vma->vm_start) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto out_unlock;
- if (expand_stack(vma, ea))
- goto out_unlock;
- }
+ return -EFAULT;
+ ret = -EFAULT;
is_write = dsisr & DSISR_ISSTORE;
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 531177a4ee08..5bfdf6ecfa96 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
return __bad_area_nosemaphore(regs, address, si_code);
}
-static noinline int bad_area(struct pt_regs *regs, unsigned long address)
-{
- return __bad_area(regs, address, SEGV_MAPERR);
-}
-
static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
struct vm_area_struct *vma)
{
@@ -515,40 +510,12 @@ lock_mmap:
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
- * exceptions table.
- *
- * As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibility of a deadlock.
- * Attempt to lock the address space, if we cannot we then validate the
- * source. If this is invalid we can skip the address space check,
- * thus avoiding the deadlock.
+ * exceptions table. lock_mm_and_find_vma() handles that logic.
*/
- if (unlikely(!mmap_read_trylock(mm))) {
- if (!is_user && !search_exception_tables(regs->nip))
- return bad_area_nosemaphore(regs, address);
-
retry:
- mmap_read_lock(mm);
- } else {
- /*
- * The above down_read_trylock() might have succeeded in
- * which case we'll have missed the might_sleep() from
- * down_read():
- */
- might_sleep();
- }
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
- return bad_area(regs, address);
-
- if (unlikely(vma->vm_start > address)) {
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
- return bad_area(regs, address);
-
- if (unlikely(expand_stack(vma, address)))
- return bad_area(regs, address);
- }
+ return bad_area_nosemaphore(regs, address);
if (unlikely(access_pkey_error(is_write, is_exec,
(error_code & DSISR_KEYFAULT), vma)))
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b900933507da..f7c683b672c1 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -183,7 +183,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
return NULL;
if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
- return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
+ return pte_alloc_huge(mm, (pmd_t *)hpdp, addr);
BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 05b0d584e50b..fe1b83020e0d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -189,7 +189,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
unsigned long nr_pfn = page_size / sizeof(struct page);
unsigned long start_pfn = page_to_pfn((struct page *)start);
- if ((start_pfn + nr_pfn) > altmap->end_pfn)
+ if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
return true;
if (start_pfn < altmap->base_pfn)