diff options
author | John David Anglin <dave.anglin@bell.net> | 2019-04-27 21:15:38 +0300 |
---|---|---|
committer | Helge Deller <deller@gmx.de> | 2019-05-04 00:47:41 +0300 |
commit | 11c03dc85f02901ce9b6a1ced099fdcaeb188572 (patch) | |
tree | d6c8d57c3a8ad26550d6fae3898ae4e67bb9518f /arch/parisc/mm/hugetlbpage.c | |
parent | b37d1c1898b288c69f3dc9267bc2c41af06f4a4b (diff) | |
download | linux-11c03dc85f02901ce9b6a1ced099fdcaeb188572.tar.xz |
parisc: Update huge TLB page support to use per-pagetable spinlock
This patch updates the parisc huge TLB page support to use per-pagetable spinlocks.
This patch requires Mikulas' per-pagetable spinlock patch and the revised TLB
serialization patch from Helge and myself. With Mikulas' patch, we need to use
the per-pagetable spinlock for page table updates. The TLB lock is only used
to serialize TLB flushes on machines with the Merced bus.
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/mm/hugetlbpage.c')
-rw-r--r-- | arch/parisc/mm/hugetlbpage.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index d77479ae3af2..d578809e55cf 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -139,9 +139,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, { unsigned long flags; - purge_tlb_start(flags); + spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); __set_huge_pte_at(mm, addr, ptep, entry); - purge_tlb_end(flags); + spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } @@ -151,10 +151,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, unsigned long flags; pte_t entry; - purge_tlb_start(flags); + spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); entry = *ptep; __set_huge_pte_at(mm, addr, ptep, __pte(0)); - purge_tlb_end(flags); + spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return entry; } @@ -166,10 +166,10 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long flags; pte_t old_pte; - purge_tlb_start(flags); + spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); old_pte = *ptep; __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); - purge_tlb_end(flags); + spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); } int huge_ptep_set_access_flags(struct vm_area_struct *vma, @@ -178,13 +178,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, { unsigned long flags; int changed; + struct mm_struct *mm = vma->vm_mm; - purge_tlb_start(flags); + spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); changed = !pte_same(*ptep, pte); if (changed) { - __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + __set_huge_pte_at(mm, addr, ptep, pte); } - purge_tlb_end(flags); + spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); return changed; } |