summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/gup.c10
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c17
-rw-r--r--arch/powerpc/mm/pgtable.c4
-rw-r--r--arch/powerpc/mm/slb.c13
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
6 files changed, 22 insertions, 28 deletions
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index bc400c78c97f..bc122a120bf0 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -159,7 +159,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
int psize;
#endif
- pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
+ pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
start &= PAGE_MASK;
addr = start;
@@ -170,7 +170,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start, len)))
goto slow_irqon;
- pr_debug(" aligned: %lx .. %lx\n", start, end);
+ pr_devel(" aligned: %lx .. %lx\n", start, end);
#ifdef CONFIG_HUGETLB_PAGE
/* We bail out on slice boundary crossing when hugetlb is
@@ -234,7 +234,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
do {
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
ptep = huge_pte_offset(mm, a);
- pr_debug(" %016lx: huge ptep %p\n", a, ptep);
+ pr_devel(" %016lx: huge ptep %p\n", a, ptep);
if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
&nr))
goto slow;
@@ -249,7 +249,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
#ifdef CONFIG_PPC64
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
#endif
- pr_debug(" %016lx: normal pgd %p\n", addr,
+ pr_devel(" %016lx: normal pgd %p\n", addr,
(void *)pgd_val(pgd));
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
@@ -269,7 +269,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
slow:
local_irq_enable();
slow_irqon:
- pr_debug(" slow path ! nr = %d\n", nr);
+ pr_devel(" slow path ! nr = %d\n", nr);
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9920d6a7cf29..c46ef2ffa3d9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
- pmd_free_tlb(tlb, pmd);
+ pmd_free_tlb(tlb, pmd, start);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
- pud_free_tlb(tlb, pud);
+ pud_free_tlb(tlb, pud, start);
}
/*
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 8343986809c0..b1a727def15b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -89,7 +89,7 @@ static unsigned int steal_context_smp(unsigned int id)
id = first_context;
continue;
}
- pr_debug("[%d] steal context %d from mm @%p\n",
+ pr_devel("[%d] steal context %d from mm @%p\n",
smp_processor_id(), id, mm);
/* Mark this mm has having no context anymore */
@@ -126,7 +126,7 @@ static unsigned int steal_context_up(unsigned int id)
/* Pick up the victim mm */
mm = context_mm[id];
- pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+ pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -180,7 +180,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
spin_lock(&context_lock);
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n",
cpu, next, next->context.active, next->context.id);
#endif
@@ -189,7 +189,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.active++;
if (prev) {
#ifndef DEBUG_STEAL_ONLY
- pr_debug(" old context %p active was: %d\n",
+ pr_devel(" old context %p active was: %d\n",
prev, prev->context.active);
#endif
WARN_ON(prev->context.active < 1);
@@ -217,6 +217,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
+ goto stolen;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
@@ -236,7 +237,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
next->context.id = id;
#ifndef DEBUG_STEAL_ONLY
- pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ pr_devel("[%d] picked up new id %d, nrf is now %d\n",
cpu, id, nr_free_contexts);
#endif
@@ -247,7 +248,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
* local TLB for it and unmark it before we use it
*/
if (test_bit(id, stale_map[cpu])) {
- pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ pr_devel("[%d] flushing stale context %d for mm @%p !\n",
cpu, id, next);
local_flush_tlb_mm(next);
@@ -314,13 +315,13 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
break;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ae1d67cc090c..627767d6169b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -129,12 +129,12 @@ static pte_t do_dcache_icache_coherency(pte_t pte)
page = pfn_to_page(pfn);
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
- pr_debug("do_dcache_icache_coherency... flushing\n");
+ pr_devel("do_dcache_icache_coherency... flushing\n");
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
else
- pr_debug("do_dcache_icache_coherency... already clean\n");
+ pr_devel("do_dcache_icache_coherency... already clean\n");
return __pte(pte_val(pte) | _PAGE_HWEXEC);
}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 3b52c80e5e33..5b7038f248b6 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -14,8 +14,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
-
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -27,11 +25,6 @@
#include <linux/compiler.h>
#include <asm/udbg.h>
-#ifdef DEBUG
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG pr_debug
-#endif
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
@@ -285,13 +278,13 @@ void slb_initialize(void)
patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
- DBG("SLB: linear LLP = %04lx\n", linear_llp);
- DBG("SLB: io LLP = %04lx\n", io_llp);
+ pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
+ pr_devel("SLB: io LLP = %04lx\n", io_llp);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
patch_slb_encoding(slb_miss_kernel_load_vmemmap,
SLB_VSID_KERNEL | vmemmap_llp);
- DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+ pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1be1b5e59796..937eb90677d9 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -72,7 +72,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
- psize = get_slice_psize(mm, addr);;
+ psize = get_slice_psize(mm, addr);
#else
BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */