diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 44 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 4 |
4 files changed, 22 insertions, 42 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index f3afe3d97f6b..a1b2713f6e96 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -72,10 +72,11 @@ unsigned long tlbcam_sz(int idx) return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; } +#ifdef CONFIG_FSL_BOOKE /* * Return PA for this VA if it is mapped by a CAM, or 0 */ -phys_addr_t v_mapped_by_tlbcam(unsigned long va) +phys_addr_t v_block_mapped(unsigned long va) { int b; for (b = 0; b < tlbcam_index; ++b) @@ -87,7 +88,7 @@ phys_addr_t v_mapped_by_tlbcam(unsigned long va) /* * Return VA for a given PA or 0 if not mapped */ -unsigned long p_mapped_by_tlbcam(phys_addr_t pa) +unsigned long p_block_mapped(phys_addr_t pa) { int b; for (b = 0; b < tlbcam_index; ++b) @@ -97,6 +98,7 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa) return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); return 0; } +#endif /* * Set up a variable-size TLB entry (tlbcam). The parameters are not checked; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 718076ff0b8a..4b85077d4828 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -159,3 +159,13 @@ struct tlbcam { u32 MAS7; }; #endif + +#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) +/* 6xx have BATS */ +/* FSL_BOOKE have TLBCAM */ +phys_addr_t v_block_mapped(unsigned long va); +unsigned long p_block_mapped(phys_addr_t pa); +#else +static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } +static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } +#endif diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 7692d1bb1bc6..db0d35e0169b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -41,32 +41,8 @@ unsigned long ioremap_base; unsigned long ioremap_bot; EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ -#ifdef CONFIG_6xx -#define HAVE_BATS 1 -#endif - -#if defined(CONFIG_FSL_BOOKE) -#define HAVE_TLBCAM 1 -#endif - extern char etext[], _stext[]; -#ifdef HAVE_BATS -extern phys_addr_t v_mapped_by_bats(unsigned long va); -extern unsigned long p_mapped_by_bats(phys_addr_t pa); -#else /* !HAVE_BATS */ -#define v_mapped_by_bats(x) (0UL) -#define p_mapped_by_bats(x) (0UL) -#endif /* HAVE_BATS */ - -#ifdef HAVE_TLBCAM -extern phys_addr_t v_mapped_by_tlbcam(unsigned long va); -extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); -#else /* !HAVE_TLBCAM */ -#define v_mapped_by_tlbcam(x) (0UL) -#define p_mapped_by_tlbcam(x) (0UL) -#endif /* HAVE_TLBCAM */ - #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) #ifndef CONFIG_PPC_4K_PAGES @@ -228,19 +204,10 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, /* * Is it already mapped? Perhaps overlapped by a previous - * BAT mapping. If the whole area is mapped then we're done, - * otherwise remap it since we want to keep the virt addrs for - * each request contiguous. - * - * We make the assumption here that if the bottom and top - * of the range we want are mapped then it's mapped to the - * same virt address (and this is contiguous). - * -- Cort + * mapping. */ - if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ ) - goto out; - - if ((v = p_mapped_by_tlbcam(p))) + v = p_block_mapped(p); + if (v) goto out; if (slab_is_available()) { @@ -278,7 +245,8 @@ void iounmap(volatile void __iomem *addr) * If mapped by BATs then there is nothing to do. * Calling vfree() generates a benign warning. */ - if (v_mapped_by_bats((unsigned long)addr)) return; + if (v_block_mapped((unsigned long)addr)) + return; if (addr > high_memory && (unsigned long) addr < ioremap_bot) vunmap((void *) (PAGE_MASK & (unsigned long)addr)); @@ -403,7 +371,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot) BUG_ON(PageHighMem(page)); address = (unsigned long)page_address(page); - if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address)) + if (v_block_mapped(address)) return 0; if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) return -EINVAL; diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 6b2f3e457171..2a049fb8523d 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -49,7 +49,7 @@ struct batrange { /* stores address ranges mapped by BATs */ /* * Return PA for this VA if it is mapped by a BAT, or 0 */ -phys_addr_t v_mapped_by_bats(unsigned long va) +phys_addr_t v_block_mapped(unsigned long va) { int b; for (b = 0; b < 4; ++b) @@ -61,7 +61,7 @@ phys_addr_t v_mapped_by_bats(unsigned long va) /* * Return VA for a given PA or 0 if not mapped */ -unsigned long p_mapped_by_bats(phys_addr_t pa) +unsigned long p_block_mapped(phys_addr_t pa) { int b; for (b = 0; b < 4; ++b) |