diff options
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/cache.c | 41 | ||||
-rw-r--r-- | arch/arc/mm/dma.c | 75 | ||||
-rw-r--r-- | arch/arc/mm/highmem.c | 2 | ||||
-rw-r--r-- | arch/arc/mm/ioremap.c | 37 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 8 |
5 files changed, 105 insertions, 58 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index b65f797e9ad6..9e5eddbb856f 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -24,13 +24,14 @@ static int l2_line_sz; int ioc_exists; volatile int slc_enable = 1, ioc_enable = 1; +unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, unsigned long sz, const int cacheop); -void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); -void (*__dma_cache_inv)(unsigned long start, unsigned long sz); -void (*__dma_cache_wback)(unsigned long start, unsigned long sz); +void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz); +void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz); +void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz); char *arc_cache_mumbojumbo(int c, char *buf, int len) { @@ -75,6 +76,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len) static void read_decode_cache_bcr_arcv2(int cpu) { struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc; + struct bcr_generic uncached_space; struct bcr_generic sbcr; struct bcr_slc_cfg { @@ -104,6 +106,11 @@ static void read_decode_cache_bcr_arcv2(int cpu) READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); if (cbcr.c && ioc_enable) ioc_exists = 1; + + /* Legacy Data Uncached BCR is deprecated from v3 onwards */ + READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); + if (uncached_space.ver > 2) + perip_base = read_aux_reg(AUX_NON_VOL) & 0xF0000000; } void read_decode_cache_bcr(void) @@ -621,7 +628,7 @@ void flush_dcache_page(struct page *page) /* kernel reading from page with U-mapping */ phys_addr_t paddr = (unsigned long)page_address(page); - unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; + unsigned long vaddr = page->index << PAGE_SHIFT; if (addr_not_cache_congruent(paddr, vaddr)) __flush_dcache_page(paddr, vaddr); @@ -633,38 +640,38 @@ EXPORT_SYMBOL(flush_dcache_page); * DMA ops for systems with L1 cache only * Make memory coherent with L1 cache by flushing/invalidating L1 lines */ -static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz) +static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz) { __dc_line_op_k(start, sz, OP_FLUSH_N_INV); } -static void __dma_cache_inv_l1(unsigned long start, unsigned long sz) +static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz) { __dc_line_op_k(start, sz, OP_INV); } -static void __dma_cache_wback_l1(unsigned long start, unsigned long sz) +static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz) { __dc_line_op_k(start, sz, OP_FLUSH); } /* * DMA ops for systems with both L1 and L2 caches, but without IOC - * Both L1 and L2 lines need to be explicity flushed/invalidated + * Both L1 and L2 lines need to be explicitly flushed/invalidated */ -static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz) +static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz) { __dc_line_op_k(start, sz, OP_FLUSH_N_INV); slc_op(start, sz, OP_FLUSH_N_INV); } -static void __dma_cache_inv_slc(unsigned long start, unsigned long sz) +static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz) { __dc_line_op_k(start, sz, OP_INV); slc_op(start, sz, OP_INV); } -static void __dma_cache_wback_slc(unsigned long start, unsigned long sz) +static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) { __dc_line_op_k(start, sz, OP_FLUSH); slc_op(start, sz, OP_FLUSH); @@ -675,26 +682,26 @@ static void __dma_cache_wback_slc(unsigned long start, unsigned long sz) * IOC hardware snoops all DMA traffic keeping the caches consistent with * memory - eliding need for any explicit cache maintenance of DMA buffers */ -static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {} -static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {} -static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {} +static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {} +static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {} +static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {} /* * Exported DMA API */ -void dma_cache_wback_inv(unsigned long start, unsigned long sz) +void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) { __dma_cache_wback_inv(start, sz); } EXPORT_SYMBOL(dma_cache_wback_inv); -void dma_cache_inv(unsigned long start, unsigned long sz) +void dma_cache_inv(phys_addr_t start, unsigned long sz) { __dma_cache_inv(start, sz); } EXPORT_SYMBOL(dma_cache_inv); -void dma_cache_wback(unsigned long start, unsigned long sz) +void dma_cache_wback(phys_addr_t start, unsigned long sz) { __dma_cache_wback(start, sz); } diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 01eaf88bf821..8c8e36fa5659 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -24,22 +24,22 @@ static void *arc_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { - void *paddr, *kvaddr; - - /* This is linear addr (0x8000_0000 based) */ - paddr = alloc_pages_exact(size, gfp); - if (!paddr) + unsigned long order = get_order(size); + struct page *page; + phys_addr_t paddr; + void *kvaddr; + int need_coh = 1, need_kvaddr = 0; + + page = alloc_pages(gfp, order); + if (!page) return NULL; - /* This is bus address, platform dependent */ - *dma_handle = (dma_addr_t)paddr; - /* * IOC relies on all data (even coherent DMA data) being in cache * Thus allocate normal cached memory * * The gains with IOC are two pronged: - * -For streaming data, elides needs for cache maintenance, saving + * -For streaming data, elides need for cache maintenance, saving * cycles in flush code, and bus bandwidth as all the lines of a * buffer need to be flushed out to memory * -For coherent data, Read/Write to buffers terminate early in cache @@ -47,12 +47,31 @@ static void *arc_dma_alloc(struct device *dev, size_t size, */ if ((is_isa_arcv2() && ioc_exists) || dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) - return paddr; + need_coh = 0; + + /* + * - A coherent buffer needs MMU mapping to enforce non-cachability + * - A highmem page needs a virtual handle (hence MMU mapping) + * independent of cachability + */ + if (PageHighMem(page) || need_coh) + need_kvaddr = 1; + + /* This is linear addr (0x8000_0000 based) */ + paddr = page_to_phys(page); + + *dma_handle = plat_phys_to_dma(dev, paddr); /* This is kernel Virtual address (0x7000_0000 based) */ - kvaddr = ioremap_nocache((unsigned long)paddr, size); - if (kvaddr == NULL) - return NULL; + if (need_kvaddr) { + kvaddr = ioremap_nocache(paddr, size); + if (kvaddr == NULL) { + __free_pages(page, order); + return NULL; + } + } else { + kvaddr = (void *)(u32)paddr; + } /* * Evict any existing L1 and/or L2 lines for the backing page @@ -64,7 +83,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size, * Currently flush_cache_vmap nukes the L1 cache completely which * will be optimized as a separate commit */ - dma_cache_wback_inv((unsigned long)paddr, size); + if (need_coh) + dma_cache_wback_inv(paddr, size); return kvaddr; } @@ -72,11 +92,16 @@ static void *arc_dma_alloc(struct device *dev, size_t size, static void arc_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && - !(is_isa_arcv2() && ioc_exists)) + struct page *page = virt_to_page(dma_handle); + int is_non_coh = 1; + + is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) || + (is_isa_arcv2() && ioc_exists); + + if (PageHighMem(page) || !is_non_coh) iounmap((void __force __iomem *)vaddr); - free_pages_exact((void *)dma_handle, size); + __free_pages(page, get_order(size)); } /* @@ -84,7 +109,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, * CPU accesses page via normal paddr, thus needs to explicitly made * consistent before each use */ -static void _dma_cache_sync(unsigned long paddr, size_t size, +static void _dma_cache_sync(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { @@ -98,7 +123,7 @@ static void _dma_cache_sync(unsigned long paddr, size_t size, dma_cache_wback_inv(paddr, size); break; default: - pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); + pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); } } @@ -106,9 +131,9 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - unsigned long paddr = page_to_phys(page) + offset; + phys_addr_t paddr = page_to_phys(page) + offset; _dma_cache_sync(paddr, size, dir); - return (dma_addr_t)paddr; + return plat_phys_to_dma(dev, paddr); } static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, @@ -127,13 +152,13 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, static void arc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); + _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE); } static void arc_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); + _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE); } static void arc_dma_sync_sg_for_cpu(struct device *dev, @@ -144,7 +169,7 @@ static void arc_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); + _dma_cache_sync(sg_phys(sg), sg->length, dir); } static void arc_dma_sync_sg_for_device(struct device *dev, @@ -155,7 +180,7 @@ static void arc_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg; for_each_sg(sglist, sg, nelems, i) - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); + _dma_cache_sync(sg_phys(sg), sg->length, dir); } static int arc_dma_supported(struct device *dev, u64 dma_mask) diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index 92dd92cad7f9..04f83322c9fd 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -18,7 +18,7 @@ /* * HIGHMEM API: * - * kmap() API provides sleep semantics hence refered to as "permanent maps" + * kmap() API provides sleep semantics hence referred to as "permanent maps" * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor * for book-keeping * diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index 739e65f355de..49b8abd1115c 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -14,18 +14,33 @@ #include <linux/slab.h> #include <linux/cache.h> -void __iomem *ioremap(unsigned long paddr, unsigned long size) +static inline bool arc_uncached_addr_space(phys_addr_t paddr) { - unsigned long end; + if (is_isa_arcompact()) { + if (paddr >= ARC_UNCACHED_ADDR_SPACE) + return true; + } else if (paddr >= perip_base && paddr <= 0xFFFFFFFF) { + return true; + } + + return false; +} + +void __iomem *ioremap(phys_addr_t paddr, unsigned long size) +{ + phys_addr_t end; /* Don't allow wraparound or zero size */ end = paddr + size - 1; if (!size || (end < paddr)) return NULL; - /* If the region is h/w uncached, avoid MMU mappings */ - if (paddr >= ARC_UNCACHED_ADDR_SPACE) - return (void __iomem *)paddr; + /* + * If the region is h/w uncached, MMU mapping can be elided as optim + * The cast to u32 is fine as this region can only be inside 4GB + */ + if (arc_uncached_addr_space(paddr)) + return (void __iomem *)(u32)paddr; return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE); } @@ -41,9 +56,9 @@ EXPORT_SYMBOL(ioremap); void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, unsigned long flags) { - void __iomem *vaddr; + unsigned long vaddr; struct vm_struct *area; - unsigned long off, end; + phys_addr_t off, end; pgprot_t prot = __pgprot(flags); /* Don't allow wraparound, zero size */ @@ -70,9 +85,8 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, if (!area) return NULL; area->phys_addr = paddr; - vaddr = (void __iomem *)area->addr; - if (ioremap_page_range((unsigned long)vaddr, - (unsigned long)vaddr + size, paddr, prot)) { + vaddr = (unsigned long)area->addr; + if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) { vunmap((void __force *)vaddr); return NULL; } @@ -83,7 +97,8 @@ EXPORT_SYMBOL(ioremap_prot); void iounmap(const void __iomem *addr) { - if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE) + /* weird double cast to handle phys_addr_t > 32 bits */ + if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) return; vfree((void *)(PAGE_MASK & (unsigned long __force)addr)); diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index daf2bf52b984..7046c12c58ed 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -45,7 +45,7 @@ * in interrupt-safe region. * * Vineetg: April 23rd Bug #93131 - * Problem: tlb_flush_kernel_range() doesnt do anything if the range to + * Problem: tlb_flush_kernel_range() doesn't do anything if the range to * flush is more than the size of TLB itself. * * Rahul Trivedi : Codito Technologies 2004 @@ -167,7 +167,7 @@ static void utlb_invalidate(void) /* MMU v2 introduced the uTLB Flush command. * There was however an obscure hardware bug, where uTLB flush would * fail when a prior probe for J-TLB (both totally unrelated) would - * return lkup err - because the entry didnt exist in MMU. + * return lkup err - because the entry didn't exist in MMU. * The Workround was to set Index reg with some valid value, prior to * flush. This was fixed in MMU v3 hence not needed any more */ @@ -210,7 +210,7 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1) /* * Commit the Entry to MMU - * It doesnt sound safe to use the TLBWriteNI cmd here + * It doesn't sound safe to use the TLBWriteNI cmd here * which doesn't flush uTLBs. I'd rather be safe than sorry. */ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); @@ -636,7 +636,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, * support. * * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a - * new bit "SZ" in TLB page desciptor to distinguish between them. + * new bit "SZ" in TLB page descriptor to distinguish between them. * Super Page size is configurable in hardware (4K to 16M), but fixed once * RTL builds. * |