diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-14 05:24:32 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-14 05:24:32 +0300 |
commit | e5a32b5b21a18d24e9d735891550c194b4c60bd2 (patch) | |
tree | a5c78286f8e6794a5760ab9f3f029c57ea624183 /arch/mips/mm | |
parent | 2280a5360e4ff9320bfb12f760a8e9916ff5e7bc (diff) | |
parent | 22f20a110321efb7cde3e87ae99862e1036ca285 (diff) | |
download | linux-e5a32b5b21a18d24e9d735891550c194b4c60bd2.tar.xz |
Merge tag 'mips_4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
Pull MIPS updates from Paul Burton:
"Here are the main MIPS changes for 4.19.
An overview of the general architecture changes:
- Massive DMA ops refactoring from Christoph Hellwig (huzzah for
deleting crufty code!).
- We introduce NT_MIPS_DSP & NT_MIPS_FP_MODE ELF notes &
corresponding regsets to expose DSP ASE & floating point mode state
respectively, both for live debugging & core dumps.
- We better optimize our code by hard-coding cpu_has_* macros at
compile time where their values are known due to the ISA revision
that the kernel build is targeting.
- The EJTAG exception handler now better handles SMP systems, where
it was previously possible for CPUs to clobber a register value
saved by another CPU.
- Our implementation of memset() gained a couple of fixes for MIPSr6
systems to return correct values in some cases where stores fault.
- We now implement ioremap_wc() using the uncached-accelerated cache
coherency attribute where supported, which is detected during boot,
and fall back to plain uncached access where necessary. The
MIPS-specific (and unused in tree) ioremap_uncached_accelerated() &
ioremap_cacheable_cow() are removed.
- The prctl(PR_SET_FP_MODE, ...) syscall is better supported for SMP
systems by reworking the way we ensure remote CPUs that may be
running threads within the affected process switch mode.
- Systems using the MIPS Coherence Manager will now set the
MIPS_IC_SNOOPS_REMOTE flag to avoid some unnecessary cache
maintenance overhead when flushing the icache.
- A few fixes were made for building with clang/LLVM, which now
sucessfully builds kernels for many of our platforms.
- Miscellaneous cleanups all over.
And some platform-specific changes:
- ar7 gained stubs for a few clock API functions to fix build
failures for some drivers.
- ath79 gained support for a few new SoCs, a few fixes & better
gpio-keys support.
- Ci20 now exposes its SPI bus using the spi-gpio driver.
- The generic platform can now auto-detect a suitable value for
PHYS_OFFSET based upon the memory map described by the device tree,
allowing us to avoid wasting memory on page book-keeping for
systems where RAM starts at a non-zero physical address.
- Ingenic systems using the jz4740 platform code now link their
vmlinuz higher to allow for kernels of a realistic size.
- Loongson32 now builds the kernel targeting MIPSr1 rather than
MIPSr2 to avoid CPU errata.
- Loongson64 gains a couple of fixes, a workaround for a write
buffering issue & support for the Loongson 3A R3.1 CPU.
- Malta now uses the piix4-poweroff driver to handle powering down.
- Microsemi Ocelot gained support for its SPI bus & NOR flash, its
second MDIO bus and can now be supported by a FIT/.itb image.
- Octeon saw a bunch of header cleanups which remove a lot of
duplicate or unused code"
* tag 'mips_4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (123 commits)
MIPS: Remove remnants of UASM_ISA
MIPS: netlogic: xlr: Remove erroneous check in nlm_fmn_send()
MIPS: VDSO: Force link endianness
MIPS: Always specify -EB or -EL when using clang
MIPS: Use dins to simplify __write_64bit_c0_split()
MIPS: Use read-write output operand in __write_64bit_c0_split()
MIPS: Avoid using array as parameter to write_c0_kpgd()
MIPS: vdso: Allow clang's --target flag in VDSO cflags
MIPS: genvdso: Remove GOT checks
MIPS: Remove obsolete MIPS checks for DST node "chosen@0"
MIPS: generic: Remove input symbols from defconfig
MIPS: Delete unused code in linux32.c
MIPS: Remove unused sys_32_mmap2
MIPS: Remove nabi_no_regargs
mips: dts: mscc: enable spi and NOR flash support on ocelot PCB123
mips: dts: mscc: Add spi on Ocelot
MIPS: Loongson: Merge load addresses
MIPS: Loongson: Set Loongson32 to MIPS32R1
MIPS: mscc: ocelot: add interrupt controller properties to GPIO controller
MIPS: generic: Select MIPS_AUTO_PFN_OFFSET
...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 18 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 404 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 208 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 15 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/uasm-micromips.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/uasm-mips.c | 1 |
9 files changed, 227 insertions, 429 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index c463bdad45c7..3e5bb203c95a 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -3,7 +3,7 @@ # Makefile for the Linux/MIPS-specific parts of the memory manager. # -obj-y += cache.o dma-default.o extable.o fault.o \ +obj-y += cache.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o @@ -17,6 +17,7 @@ obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e12dfa48b478..a9ef057c79fe 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -830,12 +830,13 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end) return __r4k_flush_icache_range(start, end, true); } -#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) +#ifdef CONFIG_DMA_NONCOHERENT static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ - BUG_ON(size == 0); + if (WARN_ON(size == 0)) + return; preempt_disable(); if (cpu_has_inclusive_pcaches) { @@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ - BUG_ON(size == 0); + if (WARN_ON(size == 0)) + return; preempt_disable(); if (cpu_has_inclusive_pcaches) { @@ -904,7 +906,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) bc_inv(addr, size); __sync(); } -#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ +#endif /* CONFIG_DMA_NONCOHERENT */ struct flush_cache_sigtramp_args { struct mm_struct *mm; @@ -1505,6 +1507,14 @@ static void probe_pcache(void) if (c->dcache.flags & MIPS_CACHE_PINDEX) c->dcache.flags &= ~MIPS_CACHE_ALIASES; + /* + * In systems with CM the icache fills from L2 or closer caches, and + * thus sees remote stores without needing to write them back any + * further than that. + */ + if (mips_cm_present()) + c->icache.flags |= MIPS_IC_SNOOPS_REMOTE; + switch (current_cpu_type()) { case CPU_20KC: /* diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 0d3c656feba0..70a523151ff3 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page); EXPORT_SYMBOL(flush_data_cache_page); EXPORT_SYMBOL(flush_icache_all); -#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) +#ifdef CONFIG_DMA_NONCOHERENT /* DMA cache operations. */ void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); @@ -65,7 +65,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size); EXPORT_SYMBOL(_dma_cache_wback_inv); -#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ +#endif /* CONFIG_DMA_NONCOHERENT */ /* * We could optimize the case where the cache argument is not BCACHE but diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c deleted file mode 100644 index f9fef0028ca2..000000000000 --- a/arch/mips/mm/dma-default.c +++ /dev/null @@ -1,404 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> - * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> - * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. - */ - -#include <linux/types.h> -#include <linux/dma-mapping.h> -#include <linux/mm.h> -#include <linux/export.h> -#include <linux/scatterlist.h> -#include <linux/string.h> -#include <linux/gfp.h> -#include <linux/highmem.h> -#include <linux/dma-contiguous.h> - -#include <asm/cache.h> -#include <asm/cpu-type.h> -#include <asm/io.h> - -#include <dma-coherence.h> - -#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT) -/* User defined DMA coherency from command line. */ -enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT; -EXPORT_SYMBOL_GPL(coherentio); -int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ - -static int __init setcoherentio(char *str) -{ - coherentio = IO_COHERENCE_ENABLED; - pr_info("Hardware DMA cache coherency (command line)\n"); - return 0; -} -early_param("coherentio", setcoherentio); - -static int __init setnocoherentio(char *str) -{ - coherentio = IO_COHERENCE_DISABLED; - pr_info("Software DMA cache coherency (command line)\n"); - return 0; -} -early_param("nocoherentio", setnocoherentio); -#endif - -static inline struct page *dma_addr_to_page(struct device *dev, - dma_addr_t dma_addr) -{ - return pfn_to_page( - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); -} - -/* - * The affected CPUs below in 'cpu_needs_post_dma_flush()' can - * speculatively fill random cachelines with stale data at any time, - * requiring an extra flush post-DMA. - * - * Warning on the terminology - Linux calls an uncached area coherent; - * MIPS terminology calls memory areas with hardware maintained coherency - * coherent. - * - * Note that the R14000 and R16000 should also be checked for in this - * condition. However this function is only called on non-I/O-coherent - * systems and only the R10000 and R12000 are used in such systems, the - * SGI IP28 Indigo² rsp. SGI IP32 aka O2. - */ -static inline bool cpu_needs_post_dma_flush(struct device *dev) -{ - if (plat_device_is_coherent(dev)) - return false; - - switch (boot_cpu_type()) { - case CPU_R10000: - case CPU_R12000: - case CPU_BMIPS5000: - return true; - - default: - /* - * Presence of MAARs suggests that the CPU supports - * speculatively prefetching data, and therefore requires - * the post-DMA flush/invalidate. - */ - return cpu_has_maar; - } -} - -static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) -{ - gfp_t dma_flag; - -#ifdef CONFIG_ISA - if (dev == NULL) - dma_flag = __GFP_DMA; - else -#endif -#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) - if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32)) - dma_flag = __GFP_DMA; - else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) - dma_flag = __GFP_DMA32; - else -#endif -#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) - if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64)) - dma_flag = __GFP_DMA32; - else -#endif -#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev == NULL || - dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) - dma_flag = __GFP_DMA; - else -#endif - dma_flag = 0; - - /* Don't invoke OOM killer */ - gfp |= __GFP_NORETRY; - - return gfp | dma_flag; -} - -static void *mips_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) -{ - void *ret; - struct page *page = NULL; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - gfp = massage_gfp_flags(dev, gfp); - - if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) - page = dma_alloc_from_contiguous(dev, count, get_order(size), - gfp); - if (!page) - page = alloc_pages(gfp, get_order(size)); - - if (!page) - return NULL; - - ret = page_address(page); - memset(ret, 0, size); - *dma_handle = plat_map_dma_mem(dev, ret, size); - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && - !plat_device_is_coherent(dev)) { - dma_cache_wback_inv((unsigned long) ret, size); - ret = UNCAC_ADDR(ret); - } - - return ret; -} - -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - unsigned long addr = (unsigned long) vaddr; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct page *page = NULL; - - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) - addr = CAC_ADDR(addr); - - page = virt_to_page((void *) addr); - - if (!dma_release_from_contiguous(dev, page, count)) - __free_pages(page, get_order(size)); -} - -static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long addr = (unsigned long)cpu_addr; - unsigned long off = vma->vm_pgoff; - unsigned long pfn; - int ret = -ENXIO; - - if (!plat_device_is_coherent(dev)) - addr = CAC_ADDR(addr); - - pfn = page_to_pfn(virt_to_page((void *)addr)); - - if (attrs & DMA_ATTR_WRITE_COMBINE) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - else - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < count && user_count <= (count - off)) { - ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); - } - - return ret; -} - -static inline void __dma_sync_virtual(void *addr, size_t size, - enum dma_data_direction direction) -{ - switch (direction) { - case DMA_TO_DEVICE: - dma_cache_wback((unsigned long)addr, size); - break; - - case DMA_FROM_DEVICE: - dma_cache_inv((unsigned long)addr, size); - break; - - case DMA_BIDIRECTIONAL: - dma_cache_wback_inv((unsigned long)addr, size); - break; - - default: - BUG(); - } -} - -/* - * A single sg entry may refer to multiple physically contiguous - * pages. But we still need to process highmem pages individually. - * If highmem is not configured then the bulk of this loop gets - * optimized out. - */ -static inline void __dma_sync(struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - size_t left = size; - - do { - size_t len = left; - - if (PageHighMem(page)) { - void *addr; - - if (offset + len > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; - } - len = PAGE_SIZE - offset; - } - - addr = kmap_atomic(page); - __dma_sync_virtual(addr + offset, len, direction); - kunmap_atomic(addr); - } else - __dma_sync_virtual(page_address(page) + offset, - size, direction); - offset = 0; - page++; - left -= len; - } while (left); -} - -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, unsigned long attrs) -{ - if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync(dma_addr_to_page(dev, dma_addr), - dma_addr & ~PAGE_MASK, size, direction); - plat_post_dma_flush(dev); - plat_unmap_dma_mem(dev, dma_addr, size, direction); -} - -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction direction, unsigned long attrs) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nents, i) { - if (!plat_device_is_coherent(dev) && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + - sg->offset; - } - - return nents; -} - -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction, - unsigned long attrs) -{ - if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync(page, offset, size, direction); - - return plat_map_dma_mem_page(dev, page) + offset; -} - -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nhwentries, enum dma_data_direction direction, - unsigned long attrs) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nhwentries, i) { - if (!plat_device_is_coherent(dev) && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - direction != DMA_TO_DEVICE) - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); - } -} - -static void mips_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) -{ - if (cpu_needs_post_dma_flush(dev)) - __dma_sync(dma_addr_to_page(dev, dma_handle), - dma_handle & ~PAGE_MASK, size, direction); - plat_post_dma_flush(dev); -} - -static void mips_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) -{ - if (!plat_device_is_coherent(dev)) - __dma_sync(dma_addr_to_page(dev, dma_handle), - dma_handle & ~PAGE_MASK, size, direction); -} - -static void mips_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - if (cpu_needs_post_dma_flush(dev)) { - for_each_sg(sglist, sg, nelems, i) { - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - } - } - plat_post_dma_flush(dev); -} - -static void mips_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) -{ - int i; - struct scatterlist *sg; - - if (!plat_device_is_coherent(dev)) { - for_each_sg(sglist, sg, nelems, i) { - __dma_sync(sg_page(sg), sg->offset, sg->length, - direction); - } - } -} - -static int mips_dma_supported(struct device *dev, u64 mask) -{ - return plat_dma_supported(dev, mask); -} - -static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - if (!plat_device_is_coherent(dev)) - __dma_sync_virtual(vaddr, size, direction); -} - -static const struct dma_map_ops mips_default_dma_map_ops = { - .alloc = mips_dma_alloc_coherent, - .free = mips_dma_free_coherent, - .mmap = mips_dma_mmap, - .map_page = mips_dma_map_page, - .unmap_page = mips_dma_unmap_page, - .map_sg = mips_dma_map_sg, - .unmap_sg = mips_dma_unmap_sg, - .sync_single_for_cpu = mips_dma_sync_single_for_cpu, - .sync_single_for_device = mips_dma_sync_single_for_device, - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, - .sync_sg_for_device = mips_dma_sync_sg_for_device, - .dma_supported = mips_dma_supported, - .cache_sync = mips_dma_cache_sync, -}; - -const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; -EXPORT_SYMBOL(mips_dma_map_ops); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c new file mode 100644 index 000000000000..2aca1236af36 --- /dev/null +++ b/arch/mips/mm/dma-noncoherent.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. + */ +#include <linux/dma-direct.h> +#include <linux/dma-noncoherent.h> +#include <linux/dma-contiguous.h> +#include <linux/highmem.h> + +#include <asm/cache.h> +#include <asm/cpu-type.h> +#include <asm/dma-coherence.h> +#include <asm/io.h> + +#ifdef CONFIG_DMA_PERDEV_COHERENT +static inline int dev_is_coherent(struct device *dev) +{ + return dev->archdata.dma_coherent; +} +#else +static inline int dev_is_coherent(struct device *dev) +{ + switch (coherentio) { + default: + case IO_COHERENCE_DEFAULT: + return hw_coherentio; + case IO_COHERENCE_ENABLED: + return 1; + case IO_COHERENCE_DISABLED: + return 0; + } +} +#endif /* CONFIG_DMA_PERDEV_COHERENT */ + +/* + * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively + * fill random cachelines with stale data at any time, requiring an extra + * flush post-DMA. + * + * Warning on the terminology - Linux calls an uncached area coherent; MIPS + * terminology calls memory areas with hardware maintained coherency coherent. + * + * Note that the R14000 and R16000 should also be checked for in this condition. + * However this function is only called on non-I/O-coherent systems and only the + * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. + * SGI IP32 aka O2. + */ +static inline bool cpu_needs_post_dma_flush(struct device *dev) +{ + if (dev_is_coherent(dev)) + return false; + + switch (boot_cpu_type()) { + case CPU_R10000: + case CPU_R12000: + case CPU_BMIPS5000: + return true; + default: + /* + * Presence of MAARs suggests that the CPU supports + * speculatively prefetching data, and therefore requires + * the post-DMA flush/invalidate. + */ + return cpu_has_maar; + } +} + +void *arch_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +{ + void *ret; + + ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); + if (!ret) + return NULL; + + if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { + dma_cache_wback_inv((unsigned long) ret, size); + ret = (void *)UNCAC_ADDR(ret); + } + + return ret; +} + +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev)) + cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); + dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); +} + +int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + unsigned long user_count = vma_pages(vma); + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long addr = (unsigned long)cpu_addr; + unsigned long off = vma->vm_pgoff; + unsigned long pfn; + int ret = -ENXIO; + + if (!dev_is_coherent(dev)) + addr = CAC_ADDR(addr); + + pfn = page_to_pfn(virt_to_page((void *)addr)); + + if (attrs & DMA_ATTR_WRITE_COMBINE) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < count && user_count <= (count - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } + + return ret; +} + +static inline void dma_sync_virt(void *addr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + dma_cache_wback((unsigned long)addr, size); + break; + + case DMA_FROM_DEVICE: + dma_cache_inv((unsigned long)addr, size); + break; + + case DMA_BIDIRECTIONAL: + dma_cache_wback_inv((unsigned long)addr, size); + break; + + default: + BUG(); + } +} + +/* + * A single sg entry may refer to multiple physically contiguous pages. But + * we still need to process highmem pages individually. If highmem is not + * configured then the bulk of this loop gets optimized out. + */ +static inline void dma_sync_phys(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned long offset = paddr & ~PAGE_MASK; + size_t left = size; + + do { + size_t len = left; + + if (PageHighMem(page)) { + void *addr; + + if (offset + len > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + } + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + dma_sync_virt(addr + offset, len, dir); + kunmap_atomic(addr); + } else + dma_sync_virt(page_address(page) + offset, size, dir); + offset = 0; + page++; + left -= len; + } while (left); +} + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + if (!dev_is_coherent(dev)) + dma_sync_phys(paddr, size, dir); +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + if (cpu_needs_post_dma_flush(dev)) + dma_sync_phys(paddr, size, dir); +} + +void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + + if (!dev_is_coherent(dev)) + dma_sync_virt(vaddr, size, direction); +} diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index d5d02993aa21..56e4f8bffd4c 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -623,21 +623,6 @@ struct dmadscr { u64 pad_b; } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS]; -void sb1_dma_init(void) -{ - int i; - - for (i = 0; i < DM_NUM_CHANNELS; i++) { - const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) | - V_DM_DSCR_BASE_RINGSZ(1); - void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE)); - - __raw_writeq(base_val, base_reg); - __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg); - __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg); - } -} - void clear_page(void *page) { u64 to_phys = CPHYSADDR((unsigned long)page); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 79b9f2ad3ff5..49312a14cd17 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1509,7 +1509,7 @@ static void setup_pw(void) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT write_c0_pwctl(1 << 6 | psn); #endif - write_c0_kpgd(swapper_pg_dir); + write_c0_kpgd((long)swapper_pg_dir); kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */ } diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 9bb6baa45da3..24e5b0d06899 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -19,7 +19,6 @@ #include <asm/inst.h> #include <asm/elf.h> #include <asm/bugs.h> -#define UASM_ISA _UASM_ISA_MICROMIPS #include <asm/uasm.h> #define RS_MASK 0x1f diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 9fea6c6bbf49..60ceb93c71a0 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -19,7 +19,6 @@ #include <asm/inst.h> #include <asm/elf.h> #include <asm/bugs.h> -#define UASM_ISA _UASM_ISA_CLASSIC #include <asm/uasm.h> #define RS_MASK 0x1f |