From 8c7ea50c010b2f1e006ad37c43f98202a31de2cb Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Thu, 9 Jul 2015 17:28:16 -0700 Subject: x86/mm, asm-generic: Add IOMMU ioremap_uc() variant default We currently have no safe way of currently defining architecture agnostic IOMMU ioremap_*() variants. The trend is for folks to *assume* that ioremap_nocache() should be the default everywhere and then add this mapping on each architectures -- this is not correct today for a variety of reasons. We have two options: 1) Sit and wait for every architecture in Linux to get a an ioremap_*() variant defined before including it upstream. 2) Gather consensus on a safe architecture agnostic ioremap_*() default. Approach 1) introduces development latencies, and since 2) will take time and work on clarifying semantics the only remaining sensible thing to do to avoid issues is returning NULL on ioremap_*() variants. In order for this to work we must have all architectures declare their own ioremap_*() variants as defined. This will take some work, do this for ioremp_uc() to set the example as its only currently implemented on x86. Document all this. We only provide implementation support for ioremap_uc() as the other ioremap_*() variants are well defined all over the kernel for other architectures already. Signed-off-by: Luis R. Rodriguez Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arnd@arndb.de Cc: benh@kernel.crashing.org Cc: bp@suse.de Cc: dan.j.williams@intel.com Cc: geert@linux-m68k.org Cc: hch@lst.de Cc: hmh@hmh.eng.br Cc: jgross@suse.com Cc: linux-mm@kvack.org Cc: luto@amacapital.net Cc: mpe@ellerman.id.au Cc: mst@redhat.com Cc: ralf@linux-mips.org Cc: ross.zwisler@linux.intel.com Cc: stefan.bader@canonical.com Cc: tj@kernel.org Cc: tomi.valkeinen@ti.com Cc: toshi.kani@hp.com Link: http://lkml.kernel.org/r/1436488096-3165-1-git-send-email-mcgrof@do-not-panic.com Signed-off-by: Ingo Molnar --- include/asm-generic/io.h | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index f56094cfdeff..eed3bbe88c8a 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -736,6 +736,35 @@ static inline void *phys_to_virt(unsigned long address) } #endif +/** + * DOC: ioremap() and ioremap_*() variants + * + * If you have an IOMMU your architecture is expected to have both ioremap() + * and iounmap() implemented otherwise the asm-generic helpers will provide a + * direct mapping. + * + * There are ioremap_*() call variants, if you have no IOMMU we naturally will + * default to direct mapping for all of them, you can override these defaults. + * If you have an IOMMU you are highly encouraged to provide your own + * ioremap variant implementation as there currently is no safe architecture + * agnostic default. To avoid possible improper behaviour default asm-generic + * ioremap_*() variants all return NULL when an IOMMU is available. If you've + * defined your own ioremap_*() variant you must then declare your own + * ioremap_*() variant as defined to itself to avoid the default NULL return. + */ + +#ifdef CONFIG_MMU + +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif + +#else /* !CONFIG_MMU */ + /* * Change "struct page" to physical address. * @@ -743,7 +772,6 @@ static inline void *phys_to_virt(unsigned long address) * you'll need to provide your own definitions. */ -#ifndef CONFIG_MMU #ifndef ioremap #define ioremap ioremap static inline void __iomem *ioremap(phys_addr_t offset, size_t size) -- cgit v1.2.3 From fe32d3cd5e8eb0f82e459763374aa80797023403 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 15 Jul 2015 12:52:04 +0300 Subject: sched/preempt: Fix cond_resched_lock() and cond_resched_softirq() These functions check should_resched() before unlocking spinlock/bh-enable: preempt_count always non-zero => should_resched() always returns false. cond_resched_lock() worked iff spin_needbreak is set. This patch adds argument "preempt_offset" to should_resched(). preempt_count offset constants for that: PREEMPT_DISABLE_OFFSET - offset after preempt_disable() PREEMPT_LOCK_OFFSET - offset after spin_lock() SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable() SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh() Signed-off-by: Konstantin Khlebnikov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Graf Cc: Boris Ostrovsky Cc: David Vrabel Cc: Linus Torvalds Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers") Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz Signed-off-by: Ingo Molnar --- arch/x86/include/asm/preempt.h | 4 ++-- include/asm-generic/preempt.h | 5 +++-- include/linux/preempt.h | 19 ++++++++++++++----- include/linux/sched.h | 6 ------ kernel/sched/core.c | 6 +++--- 5 files changed, 22 insertions(+), 18 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index dca71714f860..b12f81022a6b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!raw_cpu_read_4(__preempt_count)); + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); } #ifdef CONFIG_PREEMPT diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index d0a7a4753db2..0bec580a4885 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!preempt_count() && tif_need_resched()); + return unlikely(preempt_count() == preempt_offset && + tif_need_resched()); } #ifdef CONFIG_PREEMPT diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 84991f185173..bea8dd8ff5e0 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -84,12 +84,20 @@ */ #define in_nmi() (preempt_count() & NMI_MASK) +/* + * The preempt_count offset after preempt_disable(); + */ #if defined(CONFIG_PREEMPT_COUNT) -# define PREEMPT_DISABLE_OFFSET 1 +# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET #else -# define PREEMPT_DISABLE_OFFSET 0 +# define PREEMPT_DISABLE_OFFSET 0 #endif +/* + * The preempt_count offset after spin_lock() + */ +#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET + /* * The preempt_count offset needed for things like: * @@ -103,7 +111,7 @@ * * Work as expected. */ -#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) /* * Are we running in atomic context? WARNING: this macro cannot @@ -124,7 +132,8 @@ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); extern void preempt_count_sub(int val); -#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) +#define preempt_count_dec_and_test() \ + ({ preempt_count_sub(1); should_resched(0); }) #else #define preempt_count_add(val) __preempt_count_add(val) #define preempt_count_sub(val) __preempt_count_sub(val) @@ -184,7 +193,7 @@ do { \ #define preempt_check_resched() \ do { \ - if (should_resched()) \ + if (should_resched(0)) \ __preempt_schedule(); \ } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 65a8a8651596..9c144657aace 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2891,12 +2891,6 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); -#ifdef CONFIG_PREEMPT_COUNT -#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET -#else -#define PREEMPT_LOCK_OFFSET 0 -#endif - #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fa5826cc612f..f5fad2b12baf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4496,7 +4496,7 @@ SYSCALL_DEFINE0(sched_yield) int __sched _cond_resched(void) { - if (should_resched()) { + if (should_resched(0)) { preempt_schedule_common(); return 1; } @@ -4514,7 +4514,7 @@ EXPORT_SYMBOL(_cond_resched); */ int __cond_resched_lock(spinlock_t *lock) { - int resched = should_resched(); + int resched = should_resched(PREEMPT_LOCK_OFFSET); int ret = 0; lockdep_assert_held(lock); @@ -4536,7 +4536,7 @@ int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); - if (should_resched()) { + if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { local_bh_enable(); preempt_schedule_common(); local_bh_disable(); -- cgit v1.2.3 From 2592dbbbf4c67501c2bd2dcf89c2b8924d592a9f Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 17 Jul 2015 06:51:33 +0200 Subject: mm: provide early_memremap_ro to establish read-only mapping During early boot as Xen pv domain the kernel needs to map some page tables supplied by the hypervisor read only. This is needed to be able to relocate some data structures conflicting with the physical memory map especially on systems with huge RAM (above 512GB). Provide the function early_memremap_ro() to provide this read only mapping. Signed-off-by: Juergen Gross Acked-by: Konrad Rzeszutek Wilk Acked-by: Vlastimil Babka Signed-off-by: David Vrabel --- include/asm-generic/early_ioremap.h | 2 ++ include/asm-generic/fixmap.h | 3 +++ mm/early_ioremap.c | 12 ++++++++++++ 3 files changed, 17 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index a5de55c04fb2..316bd043319e 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -11,6 +11,8 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr, unsigned long size); extern void *early_memremap(resource_size_t phys_addr, unsigned long size); +extern void *early_memremap_ro(resource_size_t phys_addr, + unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); extern void early_memunmap(void *addr, unsigned long size); diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index f23174fb9ec4..1cbb8338edf3 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h @@ -46,6 +46,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) #ifndef FIXMAP_PAGE_NORMAL #define FIXMAP_PAGE_NORMAL PAGE_KERNEL #endif +#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO) +#define FIXMAP_PAGE_RO PAGE_KERNEL_RO +#endif #ifndef FIXMAP_PAGE_NOCACHE #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE #endif diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index e10ccd299d66..0cfadafb3fb0 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -217,6 +217,13 @@ early_memremap(resource_size_t phys_addr, unsigned long size) return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } +#ifdef FIXMAP_PAGE_RO +void __init * +early_memremap_ro(resource_size_t phys_addr, unsigned long size) +{ + return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_RO); +} +#endif #else /* CONFIG_MMU */ void __init __iomem * @@ -231,6 +238,11 @@ early_memremap(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } +void __init * +early_memremap_ro(resource_size_t phys_addr, unsigned long size) +{ + return (void *)phys_addr; +} void __init early_iounmap(void __iomem *addr, unsigned long size) { -- cgit v1.2.3 From 9bebe9e5b0f3109a14000df25308c2971f872605 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sun, 19 Jul 2015 18:01:19 -0700 Subject: kbuild: Fix .text.unlikely placement When building a kernel with .text.unlikely text the unlikely text for each translation unit was put next to the main .text code in the final vmlinux. The problem is that the linker doesn't allow more specific submatches of a section name in a different linker script statement after the main match. So we need to move them all into one line. With that change .text.unlikely is at the end of everything again. I also moved .text.hot into the same statement though, even though that's not strictly needed. Signed-off-by: Andi Kleen Signed-off-by: Michal Marek --- include/asm-generic/vmlinux.lds.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8bd374d3cf21..1781e54ea6d3 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -412,12 +412,10 @@ * during second ld run in second ld pass when generating System.map */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot) \ - *(.text .text.fixup) \ + *(.text.hot .text .text.fixup .text.unlikely) \ *(.ref.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ - *(.text.unlikely) /* sched.text is aling to function alignment to secure we have same -- cgit v1.2.3 From 1b3d4200c1e00a3fb5e0aea428de5b07079a37e3 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Mon, 24 Aug 2015 12:13:27 -0700 Subject: PCI: Add pci_iomap_wc() variants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PCI BARs tell us whether prefetching is safe, but they don't say anything about write combining (WC). WC changes ordering rules and allows writes to be collapsed, so it's not safe in general to use it on a prefetchable region. Add pci_iomap_wc() and pci_iomap_wc_range() so drivers can take advantage of write combining when they know it's safe. On architectures that don't fully support WC, e.g., x86 without PAT, drivers for legacy framebuffers may get some of the benefit by using arch_phys_wc_add() in addition to pci_iomap_wc(). But arch_phys_wc_add() is unreliable and should be avoided in general. On x86, it uses MTRRs, which are limited in number and size, so the results will vary based on driver loading order. The goals of adding pci_iomap_wc() are to: - Give drivers an architecture-independent way to use WC so they can stop using interfaces like mtrr_add() (on x86, pci_iomap_wc() uses PAT when available). - Move toward using _PAGE_CACHE_MODE_UC, not _PAGE_CACHE_MODE_UC_MINUS, on x86 on ioremap_nocache() (see de33c442ed2a ("x86 PAT: fix performance drop for glx, use UC minus for ioremap(), ioremap_nocache() and pci_mmap_page_range()"). Signed-off-by: Luis R. Rodriguez [ Move IORESOURCE_IO check up, space out statements for better readability. ] Signed-off-by: Borislav Petkov Acked-by: Arnd Bergmann Cc: Cc: Cc: Andrew Morton Cc: Andy Lutomirski Cc: Antonino Daplas Cc: Bjorn Helgaas Cc: Daniel Vetter Cc: Dave Airlie Cc: Dave Hansen Cc: Davidlohr Bueso Cc: H. Peter Anvin Cc: Jean-Christophe Plagniol-Villard Cc: Juergen Gross Cc: Linus Torvalds Cc: Mel Gorman Cc: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Roger Pau Monné Cc: Rusty Russell Cc: Stefan Bader Cc: Suresh Siddha Cc: Thomas Gleixner Cc: Tomi Valkeinen Cc: Toshi Kani Cc: Ville Syrjälä Cc: Vlastimil Babka Cc: airlied@linux.ie Cc: benh@kernel.crashing.org Cc: dan.j.williams@intel.com Cc: david.vrabel@citrix.com Cc: jbeulich@suse.com Cc: konrad.wilk@oracle.com Cc: linux-arch@vger.kernel.org Cc: linux-fbdev@vger.kernel.org Cc: linux-pci@vger.kernel.org Cc: venkatesh.pallipadi@intel.com Cc: vinod.koul@intel.com Cc: xen-devel@lists.xensource.com Link: http://lkml.kernel.org/r/1440443613-13696-6-git-send-email-mcgrof@do-not-panic.com Signed-off-by: Ingo Molnar --- include/asm-generic/pci_iomap.h | 14 +++++++++ lib/pci_iomap.c | 66 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index 7389c87116a0..b1e17fcee2d0 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -15,9 +15,13 @@ struct pci_dev; #ifdef CONFIG_PCI /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen); +extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); /* Create a virtual mapping cookie for a port on a given PCI device. * Do not call this directly, it exists to make it easier for architectures * to override */ @@ -34,12 +38,22 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon return NULL; } +static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { return NULL; } +static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} #endif #endif /* __ASM_GENERIC_IO_H */ diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index bcce5f149310..5f5d24d1d53f 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c @@ -51,6 +51,51 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, } EXPORT_SYMBOL(pci_iomap_range); +/** + * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @offset: map memory at the given offset in BAR + * @maxlen: max length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR from offset to the end, pass %0 here. + * */ +void __iomem *pci_iomap_wc_range(struct pci_dev *dev, + int bar, + unsigned long offset, + unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + + if (flags & IORESOURCE_IO) + return NULL; + + if (len <= offset || !start) + return NULL; + + len -= offset; + start += offset; + if (maxlen && len > maxlen) + len = maxlen; + + if (flags & IORESOURCE_MEM) + return ioremap_wc(start, len); + + /* What? */ + return NULL; +} +EXPORT_SYMBOL_GPL(pci_iomap_wc_range); + /** * pci_iomap - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR @@ -70,4 +115,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) return pci_iomap_range(dev, bar, 0, maxlen); } EXPORT_SYMBOL(pci_iomap); + +/** + * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @maxlen: length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. When possible write combining + * is used. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR without checking for its length first, pass %0 here. + * */ +void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_wc_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL_GPL(pci_iomap_wc); #endif /* CONFIG_PCI */ -- cgit v1.2.3 From 012dcef3f058385268630c0003e9b7f8dcafbeb4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Aug 2015 17:41:01 -0400 Subject: mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h Three architectures already define these, and we'll need them genericly soon. Signed-off-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/arm/include/asm/memory.h | 6 ------ arch/arm64/include/asm/memory.h | 6 ------ arch/unicore32/include/asm/memory.h | 6 ------ include/asm-generic/memory_model.h | 6 ++++++ 4 files changed, 6 insertions(+), 18 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index b7f6fb462ea0..98d58bb04ac5 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -118,12 +118,6 @@ #define DTCM_OFFSET UL(0xfffe8000) #endif -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - /* * Convert a page to/from a physical address */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f800d45ea226..d808bb688751 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -80,12 +80,6 @@ #define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET)) #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - /* * Convert a page to/from a physical address */ diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h index debafc40200a..3bb0a29fd2d7 100644 --- a/arch/unicore32/include/asm/memory.h +++ b/arch/unicore32/include/asm/memory.h @@ -60,12 +60,6 @@ #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif -/* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) - /* * Convert a page to/from a physical address */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 14909b0b9cae..f20f407ce45d 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -69,6 +69,12 @@ }) #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) + #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page -- cgit v1.2.3 From 3c217e51d8a272b9301058fe845d6c69cc0651cb Mon Sep 17 00:00:00 2001 From: Sylvain Chouleur Date: Mon, 8 Jun 2015 11:45:19 +0200 Subject: rtc: cmos: century support If century field is supported by the RTC CMOS device, then we should use it and then do not consider years greater that 169 as an error. For information, the year field of the rtc_time structure contains the value to add to 1970 to obtain the current year. This was a hack to be able to support years for 1970 to 2069. This patch remains compatible with this implementation. Signed-off-by: Sylvain Chouleur Signed-off-by: Alexandre Belloni --- include/asm-generic/rtc.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h index fa86f240c874..4e3b6558331e 100644 --- a/include/asm-generic/rtc.h +++ b/include/asm-generic/rtc.h @@ -16,6 +16,9 @@ #include #include #include +#ifdef CONFIG_ACPI +#include +#endif #define RTC_PIE 0x40 /* periodic interrupt enable */ #define RTC_AIE 0x20 /* alarm interrupt enable */ @@ -46,6 +49,7 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time) { unsigned char ctrl; unsigned long flags; + unsigned char century = 0; #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; @@ -78,6 +82,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time) time->tm_year = CMOS_READ(RTC_YEAR); #ifdef CONFIG_MACH_DECSTATION real_year = CMOS_READ(RTC_DEC_YEAR); +#endif +#ifdef CONFIG_ACPI + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && + acpi_gbl_FADT.century) + century = CMOS_READ(acpi_gbl_FADT.century); #endif ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irqrestore(&rtc_lock, flags); @@ -90,12 +99,16 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time) time->tm_mday = bcd2bin(time->tm_mday); time->tm_mon = bcd2bin(time->tm_mon); time->tm_year = bcd2bin(time->tm_year); + century = bcd2bin(century); } #ifdef CONFIG_MACH_DECSTATION time->tm_year += real_year - 72; #endif + if (century) + time->tm_year += (century - 19) * 100; + /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; @@ -122,6 +135,7 @@ static inline int __set_rtc_time(struct rtc_time *time) #ifdef CONFIG_MACH_DECSTATION unsigned int real_yrs, leap_yr; #endif + unsigned char century = 0; yrs = time->tm_year; mon = time->tm_mon + 1; /* tm_mon starts at zero */ @@ -150,6 +164,15 @@ static inline int __set_rtc_time(struct rtc_time *time) yrs = 73; } #endif + +#ifdef CONFIG_ACPI + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && + acpi_gbl_FADT.century) { + century = (yrs + 1900) / 100; + yrs %= 100; + } +#endif + /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ @@ -169,6 +192,7 @@ static inline int __set_rtc_time(struct rtc_time *time) day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); + century = bin2bcd(century); } save_control = CMOS_READ(RTC_CONTROL); @@ -185,6 +209,11 @@ static inline int __set_rtc_time(struct rtc_time *time) CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); +#ifdef CONFIG_ACPI + if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && + acpi_gbl_FADT.century) + CMOS_WRITE(century, acpi_gbl_FADT.century); +#endif CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); -- cgit v1.2.3 From 6b0f68e32ea8749ff7d4a66cd5761e915e48e59d Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Tue, 8 Sep 2015 15:03:01 -0700 Subject: mm: add utility for early copy from unmapped ram When booting an arm64 kernel w/initrd using UEFI/grub, use of mem= will likely cut off part or all of the initrd. This leaves it outside the kernel linear map which leads to failure when unpacking. The x86 code has a similar need to relocate an initrd outside of mapped memory in some cases. The current x86 code uses early_memremap() to copy the original initrd from unmapped to mapped RAM. This patchset creates a generic copy_from_early_mem() utility based on that x86 code and has arm64 and x86 share it in their respective initrd relocation code. This patch (of 3): In some early boot circumstances, it may be necessary to copy from RAM outside the kernel linear mapping to mapped RAM. The need to relocate an initrd is one example in the x86 code. This patch creates a helper function based on current x86 code. Signed-off-by: Mark Salter Cc: Catalin Marinas Cc: Will Deacon Cc: Arnd Bergmann Cc: Ard Biesheuvel Cc: Mark Rutland Cc: Russell King Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/early_ioremap.h | 6 ++++++ mm/early_ioremap.c | 22 ++++++++++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index a5de55c04fb2..e539f27ec51b 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -33,6 +33,12 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void); +/* + * Early copy from unmapped memory to kernel mapped memory. + */ +extern void copy_from_early_mem(void *dest, phys_addr_t src, + unsigned long size); + #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index e10ccd299d66..a0baeb4be934 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -217,6 +217,28 @@ early_memremap(resource_size_t phys_addr, unsigned long size) return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_NORMAL); } + +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = src & ~PAGE_MASK; + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + p = early_memremap(src & PAGE_MASK, clen + slop); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + #else /* CONFIG_MMU */ void __init __iomem * -- cgit v1.2.3 From 6894258eda2f9badc28c878086c0e54bd5b7fb30 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:39 -0700 Subject: dma-mapping: consolidate dma_{alloc,free}_{attrs,coherent} Since 2009 we have a nice asm-generic header implementing lots of DMA API functions for architectures using struct dma_map_ops, but unfortunately it's still missing a lot of APIs that all architectures still have to duplicate. This series consolidates the remaining functions, although we still need arch opt outs for two of them as a few architectures have very non-standard implementations. This patch (of 5): The coherent DMA allocator works the same over all architectures supporting dma_map operations. This patch consolidates them and converges the minor differences: - the debug_dma helpers are now called from all architectures, including those that were previously missing them - dma_alloc_from_coherent and dma_release_from_coherent are now always called from the generic alloc/free routines instead of the ops dma-mapping-common.h always includes dma-coherent.h to get the defintions for them, or the stubs if the architecture doesn't support this feature - checks for ->alloc / ->free presence are removed. There is only one magic instead of dma_map_ops without them (mic_dma_ops) and that one is x86 only anyway. Besides that only x86 needs special treatment to replace a default devices if none is passed and tweak the gfp_flags. An optional arch hook is provided for that. [linux@roeck-us.net: fix build] [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Guenter Roeck Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 18 ---------- arch/arm/include/asm/dma-mapping.h | 29 ---------------- arch/arm/mm/dma-mapping.c | 12 ------- arch/arm64/include/asm/dma-mapping.h | 33 ------------------ arch/h8300/include/asm/dma-mapping.h | 26 -------------- arch/hexagon/include/asm/dma-mapping.h | 33 ------------------ arch/ia64/include/asm/dma-mapping.h | 25 ------------- arch/microblaze/include/asm/dma-mapping.h | 31 ----------------- arch/mips/cavium-octeon/dma-octeon.c | 8 ----- arch/mips/include/asm/dma-mapping.h | 31 ----------------- arch/mips/loongson64/common/dma-swiotlb.c | 8 ----- arch/mips/mm/dma-default.c | 7 ---- arch/mips/netlogic/common/nlm-dma.c | 10 ------ arch/openrisc/include/asm/dma-mapping.h | 30 ---------------- arch/powerpc/include/asm/dma-mapping.h | 33 ------------------ arch/s390/include/asm/dma-mapping.h | 31 ----------------- arch/sh/include/asm/dma-mapping.h | 37 -------------------- arch/sparc/include/asm/dma-mapping.h | 26 -------------- arch/tile/include/asm/dma-mapping.h | 27 -------------- arch/unicore32/include/asm/dma-mapping.h | 24 ------------- arch/x86/include/asm/dma-mapping.h | 16 ++------- arch/x86/kernel/pci-dma.c | 49 +++++--------------------- arch/xtensa/include/asm/dma-mapping.h | 31 ----------------- drivers/xen/swiotlb-xen.c | 6 ---- include/asm-generic/dma-mapping-common.h | 58 +++++++++++++++++++++++++++++++ 25 files changed, 70 insertions(+), 569 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index dfa32f061320..9fef5bd59a82 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,24 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs); -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return get_dma_ops(dev)->mapping_error(dev, dma_addr); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a68b9d8a71fe..bc404473f1ca 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -209,21 +208,6 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - BUG_ON(!ops); - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - /** * arm_dma_free - free memory allocated by arm_dma_alloc * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -241,19 +225,6 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs); -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - /** * arm_dma_mmap - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bf35abcc7d59..e62604384945 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -676,10 +676,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); - void *memory; - - if (dma_alloc_from_coherent(dev, size, handle, &memory)) - return memory; return __dma_alloc(dev, size, handle, gfp, prot, false, attrs, __builtin_return_address(0)); @@ -688,11 +684,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - void *memory; - - if (dma_alloc_from_coherent(dev, size, handle, &memory)) - return memory; - return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, attrs, __builtin_return_address(0)); } @@ -752,9 +743,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); - if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) - return; - size = PAGE_ALIGN(size); if (nommu()) { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f0d6d0bfe55c..5e11b3f0fe3a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -22,8 +22,6 @@ #include #include -#include - #include #include @@ -120,37 +118,6 @@ static inline void dma_mark_clean(void *addr, size_t size) { } -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *vaddr; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) - return vaddr; - - vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); - return vaddr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dev_addr, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dev_addr); - ops->free(dev, size, vaddr, dev_addr, attrs); -} - /* * There is no dma_cache_sync() implementation, so just return NULL here. */ diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 6e67a90902f2..826aa9b519b7 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -1,8 +1,6 @@ #ifndef _H8300_DMA_MAPPING_H #define _H8300_DMA_MAPPING_H -#include - extern struct dma_map_ops h8300_dma_map_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) @@ -25,30 +23,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - memory = ops->alloc(dev, size, dma_handle, flag, attrs); - return memory; -} - -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 16965427f6b4..c20d3caa7dad 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -70,37 +70,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == bad_dma_address); } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - ret = ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, ret); - - return ret; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); -} - #endif diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index cf3ab7e784b5..d36f83cc226a 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -23,31 +23,6 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *daddr, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - void *caddr; - - caddr = ops->alloc(dev, size, daddr, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *daddr, caddr); - return caddr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *caddr, dma_addr_t daddr, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - debug_dma_free_coherent(dev, size, caddr, daddr); - ops->free(dev, size, caddr, daddr, attrs); -} - #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index ab353723076a..801dbe215a8c 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -27,7 +27,6 @@ #include #include #include -#include #include #define DMA_ERROR_CODE (~(dma_addr_t)0x0) @@ -102,36 +101,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - BUG_ON(!ops); - - memory = ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - return memory; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!ops); - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index d8960d46417b..2cd45f5f9481 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -161,9 +161,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, { void *ret; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -194,11 +191,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, static void octeon_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - int order = get_order(size); - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 360b3387182a..b197595134ba 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -4,7 +4,6 @@ #include #include #include -#include #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */ #include @@ -65,36 +64,6 @@ dma_set_mask(struct device *dev, u64 mask) extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); - - ret = ops->alloc(dev, size, dma_handle, gfp, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, ret); - - return ret; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - ops->free(dev, size, vaddr, dma_handle, attrs); - - debug_dma_free_coherent(dev, size, vaddr, dma_handle); -} - - void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag); diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 2c6b989c1bc4..ef9da3b5c543 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -14,9 +14,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, { void *ret; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -46,11 +43,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, static void loongson_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - int order = get_order(size); - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 8f23cf08f4ba..6c0fd13fa8e8 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -137,9 +137,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) @@ -176,13 +173,9 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { unsigned long addr = (unsigned long) vaddr; - int order = get_order(size); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; - if (dma_release_from_coherent(dev, order, vaddr)) - return; - plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev) && !hw_coherentio) diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index f3d4ae87abc7..3758715d4ab6 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -47,11 +47,6 @@ static char *nlm_swiotlb; static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { - void *ret; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -69,11 +64,6 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, static void nlm_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { - int order = get_order(size); - - if (dma_release_from_coherent(dev, order, vaddr)) - return; - swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index fab8628e1b6e..a81d6f68e9c8 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -23,7 +23,6 @@ */ #include -#include #include #include @@ -38,35 +37,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - memory = ops->alloc(dev, size, dma_handle, gfp, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - - return memory; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 710f60e380e0..e6ca63ac4c6c 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -137,39 +137,6 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - void *cpu_addr; - - BUG_ON(!dma_ops); - - cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - BUG_ON(!dma_ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *dma_ops = get_dma_ops(dev); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 9d395961e713..c29c9c7d81e8 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -56,35 +56,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == DMA_ERROR_CODE; } -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - BUG_ON(!ops); - - cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!ops); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - #endif /* _ASM_S390_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index b437f2c780b8..3c78059e66ff 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,7 +9,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include #include static inline int dma_supported(struct device *dev, u64 mask) @@ -53,42 +52,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) - return memory; - if (!ops->alloc) - return NULL; - - memory = ops->alloc(dev, size, dma_handle, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - - return memory; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dma_handle); - if (ops->free) - ops->free(dev, size, vaddr, dma_handle, attrs); -} - /* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 7e064c68c5ec..a8c678494ce7 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -41,32 +41,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 1eae359d8315..4aba10e49310 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -116,34 +116,7 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - void *cpu_addr; - - cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); - - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - - return cpu_addr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) /* diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 366460a81796..5294d03e59de 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -18,8 +18,6 @@ #include #include -#include - #include #include @@ -82,28 +80,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - return dma_ops->alloc(dev, size, dma_handle, flag, attrs); -} - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); -} - #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 1f5b7287d1ad..f9b1b6cc48b6 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #ifdef CONFIG_ISA @@ -41,6 +40,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #endif } +bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); +#define arch_dma_alloc_attrs arch_dma_alloc_attrs + #include /* Make sure we keep the same behaviour */ @@ -125,16 +127,4 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) return gfp; } -#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) - -void * -dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs); - -#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) - -void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t bus, - struct dma_attrs *attrs); - #endif diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 353972c1946c..bd23971e8f1d 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -140,50 +140,19 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages((unsigned long)vaddr, get_order(size)); } -void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs) +bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); - void *memory; - - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - - if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) - return memory; - - if (!dev) - dev = &x86_dma_fallback_dev; - - if (!is_device_dma_capable(dev)) - return NULL; - - if (!ops->alloc) - return NULL; - - memory = ops->alloc(dev, size, dma_handle, - dma_alloc_coherent_gfp_flags(dev, gfp), attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, memory); - - return memory; -} -EXPORT_SYMBOL(dma_alloc_attrs); - -void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t bus, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - WARN_ON(irqs_disabled()); /* for portability */ + *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); + *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; + if (!*dev) + *dev = &x86_dma_fallback_dev; + if (!is_device_dma_capable(*dev)) + return false; + return true; - debug_dma_free_coherent(dev, size, vaddr, bus); - if (ops->free) - ops->free(dev, size, vaddr, bus, attrs); } -EXPORT_SYMBOL(dma_free_attrs); +EXPORT_SYMBOL(arch_dma_alloc_attrs); /* * See for the iommu kernel diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index f01cb3044e50..bf24c908e5ff 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -34,37 +34,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) #define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) -{ - void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) - return ret; - - ret = ops->alloc(dev, size, dma_handle, gfp, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, ret); - - return ret; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - ops->free(dev, size, vaddr, dma_handle, attrs); - debug_dma_free_coherent(dev, size, vaddr, dma_handle); -} static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 4c549323c605..da1029ef8159 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -311,9 +311,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ flags &= ~(__GFP_DMA | __GFP_HIGHMEM); - if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) - return ret; - /* On ARM this function returns an ioremap'ped virtual address for * which virt_to_phys doesn't return the corresponding physical * address. In fact on ARM virt_to_phys only works for kernel direct @@ -356,9 +353,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, phys_addr_t phys; u64 dma_mask = DMA_BIT_MASK(32); - if (dma_release_from_coherent(hwdev, order, vaddr)) - return; - if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 940d5ec122c9..56dd9ea2bc8c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -6,6 +6,7 @@ #include #include #include +#include static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, @@ -237,4 +238,61 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) +#ifndef arch_dma_alloc_attrs +#define arch_dma_alloc_attrs(dev, flag) (true) +#endif + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!ops); + + if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) + return cpu_addr; + + if (!arch_dma_alloc_attrs(&dev, &flag)) + return NULL; + if (!ops->alloc) + return NULL; + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + WARN_ON(irqs_disabled()); + + if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) + return; + + if (!ops->free) + return; + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); +} + #endif -- cgit v1.2.3 From 1e8937526e2309d48fccd81bb30a590ac21a5516 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:42 -0700 Subject: dma-mapping: consolidate dma_{alloc,free}_noncoherent Most architectures do not support non-coherent allocations and either define dma_{alloc,free}_noncoherent to their coherent versions or stub them out. Openrisc uses dma_{alloc,free}_attrs to implement them, and only Mips implements them directly. This patch moves the Openrisc version to common code, and handles the DMA_ATTR_NON_CONSISTENT case in the mips dma_map_ops instance. Note that actual non-coherent allocations require a dma_cache_sync implementation, so if non-coherent allocations didn't work on an architecture before this patch they still won't work after it. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 3 --- arch/arm/include/asm/dma-mapping.h | 21 ++++++--------------- arch/arm64/include/asm/dma-mapping.h | 14 -------------- arch/h8300/include/asm/dma-mapping.h | 3 --- arch/hexagon/include/asm/dma-mapping.h | 3 --- arch/ia64/include/asm/dma-mapping.h | 3 --- arch/microblaze/include/asm/dma-mapping.h | 3 --- arch/mips/include/asm/dma-mapping.h | 6 ------ arch/mips/mm/dma-default.c | 20 +++++++++++++++----- arch/openrisc/include/asm/dma-mapping.h | 20 -------------------- arch/powerpc/include/asm/dma-mapping.h | 3 --- arch/s390/include/asm/dma-mapping.h | 3 --- arch/sh/include/asm/dma-mapping.h | 3 --- arch/sparc/include/asm/dma-mapping.h | 3 --- arch/tile/include/asm/dma-mapping.h | 3 --- arch/unicore32/include/asm/dma-mapping.h | 3 --- arch/x86/include/asm/dma-mapping.h | 3 --- arch/xtensa/include/asm/dma-mapping.h | 3 --- include/asm-generic/dma-mapping-common.h | 18 ++++++++++++++++++ 19 files changed, 39 insertions(+), 99 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 9fef5bd59a82..0552bf097245 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -27,9 +27,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return get_dma_ops(dev)->set_dma_mask(dev, mask); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define dma_cache_sync(dev, va, size, dir) ((void)0) #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bc404473f1ca..0b7787167b64 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -38,6 +38,12 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } +/* + * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent + * implementations, we don't provide a dma_cache_sync function so drivers using + * this API are highlighted with build warnings. + */ + #include static inline int dma_set_mask(struct device *dev, u64 mask) @@ -175,21 +181,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == DMA_ERROR_CODE; } -/* - * Dummy noncoherent implementation. We don't provide a dma_cache_sync - * function so drivers using this API are highlighted with build warnings. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - extern int dma_supported(struct device *dev, u64 mask); extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 5e11b3f0fe3a..178e60b80922 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -118,19 +118,5 @@ static inline void dma_mark_clean(void *addr, size_t size) { } -/* - * There is no dma_cache_sync() implementation, so just return NULL here. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t flags) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - #endif /* __KERNEL__ */ #endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 826aa9b519b7..72465ce59453 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -20,9 +20,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index c20d3caa7dad..58d2d8f1544a 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -34,9 +34,6 @@ extern int bad_dma_address; extern struct dma_map_ops *dma_ops; -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline struct dma_map_ops *get_dma_ops(struct device *dev) { if (unlikely(dev == NULL)) diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index d36f83cc226a..a925ff03c964 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -23,9 +23,6 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define get_dma_ops(dev) platform_dma_get_ops(dev) #include diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 801dbe215a8c..bc81625d486f 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -98,9 +98,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == DMA_ERROR_CODE); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index b197595134ba..709b2ba79cc3 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -64,10 +64,4 @@ dma_set_mask(struct device *dev, u64 mask) extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_noncoherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 6c0fd13fa8e8..a914dc1cb6d1 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -112,7 +112,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) return gfp | dma_flag; } -void *dma_alloc_noncoherent(struct device *dev, size_t size, +static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; @@ -128,7 +128,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, return ret; } -EXPORT_SYMBOL(dma_alloc_noncoherent); static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) @@ -137,6 +136,13 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + /* + * XXX: seems like the coherent and non-coherent implementations could + * be consolidated. + */ + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); + gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) @@ -161,13 +167,12 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, } -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) +static void mips_dma_free_noncoherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) { plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } -EXPORT_SYMBOL(dma_free_noncoherent); static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) @@ -176,6 +181,11 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; + if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); + return; + } + plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev) && !hw_coherentio) diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index a81d6f68e9c8..57722528ea4d 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -37,26 +37,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - struct dma_attrs attrs; - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - - return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - struct dma_attrs attrs; - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - - dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); -} - static inline int dma_supported(struct device *dev, u64 dma_mask) { /* Support 32 bit DMA mask exclusively */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e6ca63ac4c6c..7971b421c677 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -177,9 +177,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return daddr - get_dma_offset(dev); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #define ARCH_HAS_DMA_MMAP_COHERENT static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index c29c9c7d81e8..b729efeb9ad8 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -25,9 +25,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - #include static inline int dma_supported(struct device *dev, u64 mask) diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 3c78059e66ff..2c3fa2ccbe9b 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -38,9 +38,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index a8c678494ce7..2564edcb9728 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -9,9 +9,6 @@ int dma_supported(struct device *dev, u64 mask); -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 4aba10e49310..e982dfa5d2f4 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -116,9 +116,6 @@ dma_set_mask(struct device *dev, u64 mask) return 0; } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) - /* * dma_alloc_noncoherent() is #defined to return coherent memory, * so there's no need to do any flushing here. diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 5294d03e59de..636e942940a0 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -80,9 +80,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index f9b1b6cc48b6..7e47e4d6e69c 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -56,9 +56,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return (dma_addr == DMA_ERROR_CODE); } -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index bf24c908e5ff..0a19581375da 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,9 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL) - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 56dd9ea2bc8c..ec321dd98f93 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -295,4 +295,22 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); } +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); +} + +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); + dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} + #endif -- cgit v1.2.3 From efa21e432c7b3c8ae976039d614a017799b6e874 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:46 -0700 Subject: dma-mapping: cosolidate dma_mapping_error Currently there are three valid implementations of dma_mapping_error: (1) call ->mapping_error (2) check for a hardcoded error code (3) always return 0 This patch provides a common implementation that calls ->mapping_error if present, then checks for DMA_ERROR_CODE if defined or otherwise returns 0. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 5 ----- arch/arm/include/asm/dma-mapping.h | 9 --------- arch/arm64/include/asm/dma-mapping.h | 7 ------- arch/h8300/include/asm/dma-mapping.h | 5 ----- arch/hexagon/include/asm/dma-mapping.h | 11 +---------- arch/ia64/include/asm/dma-mapping.h | 7 ------- arch/microblaze/include/asm/dma-mapping.h | 11 ----------- arch/mips/include/asm/dma-mapping.h | 8 -------- arch/openrisc/include/asm/dma-mapping.h | 5 ----- arch/powerpc/include/asm/dma-mapping.h | 17 ++--------------- arch/s390/include/asm/dma-mapping.h | 10 ---------- arch/sh/include/asm/dma-mapping.h | 13 ++----------- arch/sparc/include/asm/dma-mapping.h | 6 ------ arch/tile/include/asm/dma-mapping.h | 7 ------- arch/unicore32/include/asm/dma-mapping.h | 10 ---------- arch/x86/include/asm/dma-mapping.h | 11 ----------- arch/xtensa/include/asm/dma-mapping.h | 9 --------- include/asm-generic/dma-mapping-common.h | 14 ++++++++++++++ 18 files changed, 19 insertions(+), 146 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 0552bf097245..80ac3e835efe 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return get_dma_ops(dev)->mapping_error(dev, dma_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { return get_dma_ops(dev)->dma_supported(dev, mask); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 0b7787167b64..9bef3c541c39 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -172,15 +172,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) { } -/* - * DMA errors are defined by all-bits-set in the DMA address. - */ -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return dma_addr == DMA_ERROR_CODE; -} - extern int dma_supported(struct device *dev, u64 mask); extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 178e60b80922..f45f444b7a66 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -84,13 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dev_addr); - return ops->mapping_error(dev, dev_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 72465ce59453..5eef05382fff 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -20,9 +20,4 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return 0; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - #endif diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 58d2d8f1544a..e66119290eca 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -31,6 +31,7 @@ struct device; extern int bad_dma_address; +#define DMA_ERROR_CODE bad_dma_address extern struct dma_map_ops *dma_ops; @@ -57,14 +58,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - - return (dma_addr == bad_dma_address); -} - #endif diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index a925ff03c964..27b713d0edbc 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -27,13 +27,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #include -static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - debug_dma_mapping_error(dev, daddr); - return ops->mapping_error(dev, daddr); -} - static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = platform_dma_get_ops(dev); diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index bc81625d486f..e5b843839263 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -87,17 +87,6 @@ static inline void __dma_sync(unsigned long paddr, } } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - - return (dma_addr == DMA_ERROR_CODE); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 709b2ba79cc3..158bb36bdcb4 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -37,14 +37,6 @@ static inline int dma_supported(struct device *dev, u64 mask) return ops->dma_supported(dev, mask); } -static inline int dma_mapping_error(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, mask); - return ops->mapping_error(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 57722528ea4d..7dfe9d50856e 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -43,11 +43,6 @@ static inline int dma_supported(struct device *dev, u64 dma_mask) return dma_mask == DMA_BIT_MASK(32); } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - static inline int dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 7971b421c677..712d5afc055a 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -18,7 +18,9 @@ #include #include +#ifdef CONFIG_PPC64 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#endif /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, @@ -137,21 +139,6 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - -#ifdef CONFIG_PPC64 - return (dma_addr == DMA_ERROR_CODE); -#else - return 0; -#endif -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { #ifdef CONFIG_SWIOTLB diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index b729efeb9ad8..3c293291319b 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -43,14 +43,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - return dma_addr == DMA_ERROR_CODE; -} - #endif /* _ASM_S390_DMA_MAPPING_H */ diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 2c3fa2ccbe9b..98308c497162 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -9,6 +9,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } +#define DMA_ERROR_CODE 0 + #include static inline int dma_supported(struct device *dev, u64 mask) @@ -38,17 +40,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - - return dma_addr == 0; -} - /* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 2564edcb9728..5069d137453b 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -38,12 +38,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return (dma_addr == DMA_ERROR_CODE); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { #ifdef CONFIG_PCI diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index e982dfa5d2f4..f8f7a05023bf 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -74,13 +74,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - debug_dma_mapping_error(dev, dma_addr); - return get_dma_ops(dev)->mapping_error(dev, dma_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 636e942940a0..175d7e3f7b0a 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -38,16 +38,6 @@ static inline int dma_supported(struct device *dev, u64 mask) return dma_ops->dma_supported(dev, mask); } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (dma_ops->mapping_error) - return dma_ops->mapping_error(dev, dma_addr); - - return 0; -} - #include static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 7e47e4d6e69c..bbca62e3e43f 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -45,17 +45,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #include -/* Make sure we keep the same behaviour */ -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - - return (dma_addr == DMA_ERROR_CODE); -} - extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 0a19581375da..21925bfdaff7 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,15 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int -dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - debug_dma_mapping_error(dev, dma_addr); - return ops->mapping_error(dev, dma_addr); -} - static inline int dma_supported(struct device *dev, u64 mask) { diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index ec321dd98f93..cdaa24193d4c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -313,4 +313,18 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + debug_dma_mapping_error(dev, dma_addr); + + if (get_dma_ops(dev)->mapping_error) + return get_dma_ops(dev)->mapping_error(dev, dma_addr); + +#ifdef DMA_ERROR_CODE + return dma_addr == DMA_ERROR_CODE; +#else + return 0; +#endif +} + #endif -- cgit v1.2.3 From ee196371d5cb1942ebdccc16bdce389812aa265e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:49 -0700 Subject: dma-mapping: consolidate dma_supported Most architectures just call into ->dma_supported, but some also return 1 if the method is not present, or 0 if no dma ops are present (although that should never happeb). Consolidate this more broad version into common code. Also fix h8300 which inorrectly always returned 0, which would have been a problem if it's dma_set_mask implementation wasn't a similarly buggy noop. As a few architectures have much more elaborate implementations, we still allow for arch overrides. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 5 ----- arch/arm/include/asm/dma-mapping.h | 6 +++--- arch/arm64/include/asm/dma-mapping.h | 6 ------ arch/h8300/include/asm/dma-mapping.h | 5 ----- arch/hexagon/include/asm/dma-mapping.h | 1 + arch/ia64/include/asm/dma-mapping.h | 6 ------ arch/microblaze/include/asm/dma-mapping.h | 13 +------------ arch/mips/include/asm/dma-mapping.h | 6 ------ arch/openrisc/include/asm/dma-mapping.h | 5 +++-- arch/powerpc/include/asm/dma-mapping.h | 11 ----------- arch/s390/include/asm/dma-mapping.h | 9 --------- arch/sh/include/asm/dma-mapping.h | 10 ---------- arch/sparc/include/asm/dma-mapping.h | 1 + arch/tile/include/asm/dma-mapping.h | 6 ------ arch/unicore32/include/asm/dma-mapping.h | 10 ---------- arch/x86/include/asm/dma-mapping.h | 4 +++- arch/xtensa/include/asm/dma-mapping.h | 6 ------ include/asm-generic/dma-mapping-common.h | 13 +++++++++++++ 18 files changed, 25 insertions(+), 98 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 80ac3e835efe..9d763e535c5a 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { return get_dma_ops(dev)->set_dma_mask(dev, mask); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 9bef3c541c39..2f9c731691c0 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -38,12 +38,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) dev->archdata.dma_ops = ops; } +#define HAVE_ARCH_DMA_SUPPORTED 1 +extern int dma_supported(struct device *dev, u64 mask); + /* * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent * implementations, we don't provide a dma_cache_sync function so drivers using * this API are highlighted with build warnings. */ - #include static inline int dma_set_mask(struct device *dev, u64 mask) @@ -172,8 +174,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) { } -extern int dma_supported(struct device *dev, u64 mask); - extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); /** diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f45f444b7a66..f519a58c55ae 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -84,12 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - return ops->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !dma_supported(dev, mask)) diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 5eef05382fff..48d652eb1b5f 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -10,11 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - return 0; -} - static inline int dma_set_mask(struct device *dev, u64 mask) { return 0; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index e66119290eca..36e8de710b32 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -43,6 +43,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } +#define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 27b713d0edbc..7982caa7c5e7 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -27,12 +27,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->dma_supported(dev, mask); -} - static inline int dma_set_mask (struct device *dev, u64 mask) { diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index e5b843839263..3b453c503a43 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -44,16 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &dma_direct_ops; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (unlikely(!ops)) - return 0; - if (!ops->dma_supported) - return 1; - return ops->dma_supported(dev, mask); -} +#include static inline int dma_set_mask(struct device *dev, u64 dma_mask) { @@ -69,8 +60,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) return 0; } -#include - static inline void __dma_sync(unsigned long paddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 158bb36bdcb4..8bf8ec30a4b2 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -31,12 +31,6 @@ static inline void dma_mark_clean(void *addr, size_t size) {} #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - return ops->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 7dfe9d50856e..8fc08b883477 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -35,14 +35,15 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &or1k_dma_map_ops; } -#include - +#define HAVE_ARCH_DMA_SUPPORTED 1 static inline int dma_supported(struct device *dev, u64 dma_mask) { /* Support 32 bit DMA mask exclusively */ return dma_mask == DMA_BIT_MASK(32); } +#include + static inline int dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 712d5afc055a..dd43e0c6f219 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -124,17 +124,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - if (dma_ops->dma_supported == NULL) - return 1; - return dma_ops->dma_supported(dev, mask); -} - extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 3c293291319b..1f42489797da 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -27,15 +27,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (dma_ops->dma_supported == NULL) - return 1; - return dma_ops->dma_supported(dev, mask); -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 98308c497162..088f6e5f1a92 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -13,16 +13,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (ops->dma_supported) - return ops->dma_supported(dev, mask); - - return 1; -} - static inline int dma_set_mask(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 5069d137453b..184651bb0b46 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -7,6 +7,7 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#define HAVE_ARCH_DMA_SUPPORTED 1 int dma_supported(struct device *dev, u64 mask); static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index f8f7a05023bf..559ed4a60077 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -74,12 +74,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } -static inline int -dma_supported(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->dma_supported(dev, mask); -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 175d7e3f7b0a..21231c14182c 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -28,16 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &swiotlb_dma_map_ops; } -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - - return dma_ops->dma_supported(dev, mask); -} - #include static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index bbca62e3e43f..b1fbf582048b 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -43,9 +43,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #define arch_dma_alloc_attrs arch_dma_alloc_attrs +#define HAVE_ARCH_DMA_SUPPORTED 1 +extern int dma_supported(struct device *hwdev, u64 mask); + #include -extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 21925bfdaff7..329abc7211e9 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,12 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int -dma_supported(struct device *dev, u64 mask) -{ - return 1; -} - static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index cdaa24193d4c..67fa6bcd644c 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -327,4 +327,17 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #endif } +#ifndef HAVE_ARCH_DMA_SUPPORTED +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (!ops) + return 0; + if (!ops->dma_supported) + return 1; + return ops->dma_supported(dev, mask); +} +#endif + #endif -- cgit v1.2.3 From 452e06af1f0149b01201f94264d452cd7a95db7a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Sep 2015 15:39:53 -0700 Subject: dma-mapping: consolidate dma_set_mask Almost everyone implements dma_set_mask the same way, although some time that's hidden in ->set_dma_mask methods. This patch consolidates those into a common implementation that either calls ->set_dma_mask if present or otherwise uses the default implementation. Some architectures used to only call ->set_dma_mask after the initial checks, and those instance have been fixed to do the full work. h8300 implemented dma_set_mask bogusly as a no-ops and has been fixed. Unfortunately some architectures overload unrelated semantics like changing the dma_ops into it so we still need to allow for an architecture override for now. [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig Cc: Arnd Bergmann Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Yoshinori Sato Cc: Michal Simek Cc: Jonas Bonn Cc: Chris Metcalf Cc: Guan Xuetao Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Andy Shevchenko Signed-off-by: Max Filippov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/dma-mapping.h | 5 ----- arch/alpha/kernel/pci-noop.c | 10 ---------- arch/alpha/kernel/pci_iommu.c | 11 ----------- arch/arm/include/asm/dma-mapping.h | 5 ----- arch/arm64/include/asm/dma-mapping.h | 9 --------- arch/h8300/include/asm/dma-mapping.h | 5 ----- arch/hexagon/include/asm/dma-mapping.h | 1 - arch/hexagon/kernel/dma.c | 11 ----------- arch/ia64/include/asm/dma-mapping.h | 9 --------- arch/microblaze/include/asm/dma-mapping.h | 14 -------------- arch/mips/include/asm/dma-mapping.h | 16 ---------------- arch/mips/loongson64/common/dma-swiotlb.c | 3 +++ arch/openrisc/include/asm/dma-mapping.h | 9 --------- arch/powerpc/include/asm/dma-mapping.h | 4 +++- arch/s390/include/asm/dma-mapping.h | 2 -- arch/s390/pci/pci_dma.c | 10 ---------- arch/sh/include/asm/dma-mapping.h | 14 -------------- arch/sparc/include/asm/dma-mapping.h | 4 +++- arch/tile/include/asm/dma-mapping.h | 6 ++++-- arch/unicore32/include/asm/dma-mapping.h | 10 ---------- arch/x86/include/asm/dma-mapping.h | 2 -- arch/x86/kernel/pci-dma.c | 11 ----------- arch/xtensa/include/asm/dma-mapping.h | 11 ----------- include/asm-generic/dma-mapping-common.h | 15 +++++++++++++++ 24 files changed, 28 insertions(+), 169 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 9d763e535c5a..72a8ca7796d9 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -12,11 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->set_dma_mask(dev, mask); -} - #define dma_cache_sync(dev, va, size, dir) ((void)0) #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index df24b76f9246..2b1f4a1e9272 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -166,15 +166,6 @@ static int alpha_noop_supported(struct device *dev, u64 mask) return mask < 0x00ffffffUL ? 0 : 1; } -static int alpha_noop_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - return 0; -} - struct dma_map_ops alpha_noop_ops = { .alloc = alpha_noop_alloc_coherent, .free = alpha_noop_free_coherent, @@ -182,7 +173,6 @@ struct dma_map_ops alpha_noop_ops = { .map_sg = alpha_noop_map_sg, .mapping_error = alpha_noop_mapping_error, .dma_supported = alpha_noop_supported, - .set_dma_mask = alpha_noop_set_mask, }; struct dma_map_ops *dma_ops = &alpha_noop_ops; diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index eddee7720343..8969bf2dfe3a 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -939,16 +939,6 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -static int alpha_pci_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || - !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) - return -EIO; - - *dev->dma_mask = mask; - return 0; -} - struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, @@ -958,7 +948,6 @@ struct dma_map_ops alpha_pci_ops = { .unmap_sg = alpha_pci_unmap_sg, .mapping_error = alpha_pci_mapping_error, .dma_supported = alpha_pci_supported, - .set_dma_mask = alpha_pci_set_mask, }; struct dma_map_ops *dma_ops = &alpha_pci_ops; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2f9c731691c0..ccb3aa64640d 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -48,11 +48,6 @@ extern int dma_supported(struct device *dev, u64 mask); */ #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return get_dma_ops(dev)->set_dma_mask(dev, mask); -} - #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f519a58c55ae..cfdb34bedbcd 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -84,15 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - - return 0; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 48d652eb1b5f..d9b5b806afe6 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -10,9 +10,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - return 0; -} - #endif diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 36e8de710b32..268fde8a4575 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -45,7 +45,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #define HAVE_ARCH_DMA_SUPPORTED 1 extern int dma_supported(struct device *dev, u64 mask); -extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index b74f9bae31a3..9e3ddf792bd3 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -44,17 +44,6 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); -int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} -EXPORT_SYMBOL(dma_set_mask); - static struct gen_pool *coherent_pool; diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 7982caa7c5e7..9beccf8010bd 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -27,15 +27,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, #include -static inline int -dma_set_mask (struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - return 0; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 3b453c503a43..24b12970c9cf 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -46,20 +46,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (unlikely(ops == NULL)) - return -EIO; - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, dma_mask); - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} - static inline void __dma_sync(unsigned long paddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 8bf8ec30a4b2..e604f760c4a0 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -31,22 +31,6 @@ static inline void dma_mark_clean(void *addr, size_t size) {} #include -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, mask); - - *dev->dma_mask = mask; - - return 0; -} - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index ef9da3b5c543..4ffa6fc81c8f 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -85,6 +85,9 @@ static void loongson_dma_sync_sg_for_device(struct device *dev, static int loongson_dma_set_mask(struct device *dev, u64 mask) { + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) { *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits); return -EIO; diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 8fc08b883477..413bfcf86384 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -44,13 +44,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask) #include -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} #endif /* __ASM_OPENRISC_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index dd43e0c6f219..7f522c021dc3 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -122,9 +122,11 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off) /* this will be removed soon */ #define flush_write_buffers() +#define HAVE_ARCH_DMA_SET_MASK 1 +extern int dma_set_mask(struct device *dev, u64 dma_mask); + #include -extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int __dma_set_mask(struct device *dev, u64 dma_mask); extern u64 __dma_get_required_mask(struct device *dev); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 1f42489797da..b3fd54d93dd2 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -18,8 +18,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return &s390_dma_ops; } -extern int dma_set_mask(struct device *dev, u64 mask); - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 42b76580c8b8..37505b8b4093 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -262,16 +262,6 @@ out: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); } -int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - return 0; -} -EXPORT_SYMBOL_GPL(dma_set_mask); - static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 088f6e5f1a92..a3745a3fe029 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -13,20 +13,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - if (ops->set_dma_mask) - return ops->set_dma_mask(dev, mask); - - *dev->dma_mask = mask; - - return 0; -} - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir); diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 184651bb0b46..a21da597b0b5 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -37,7 +37,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dma_ops; } -#include +#define HAVE_ARCH_DMA_SET_MASK 1 static inline int dma_set_mask(struct device *dev, u64 mask) { @@ -52,4 +52,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask) return -EINVAL; } +#include + #endif diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 559ed4a60077..96ac6cce4a32 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -59,8 +59,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -#include - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; @@ -74,6 +72,10 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size - 1 <= *dev->dma_mask; } +#define HAVE_ARCH_DMA_SET_MASK 1 + +#include + static inline int dma_set_mask(struct device *dev, u64 mask) { diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 21231c14182c..8140e053ccd3 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -50,16 +50,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index b1fbf582048b..953b7263f844 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -48,8 +48,6 @@ extern int dma_supported(struct device *hwdev, u64 mask); #include -extern int dma_set_mask(struct device *dev, u64 mask); - extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, struct dma_attrs *attrs); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index bd23971e8f1d..84b8ef82a159 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -58,17 +58,6 @@ EXPORT_SYMBOL(x86_dma_fallback_dev); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES 65536 -int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} -EXPORT_SYMBOL(dma_set_mask); - void __init pci_iommu_alloc(void) { struct iommu_table_entry *p; diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 329abc7211e9..4427f38b634e 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -32,17 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #include -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 67fa6bcd644c..b1bc954eccf3 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -340,4 +340,19 @@ static inline int dma_supported(struct device *dev, u64 mask) } #endif +#ifndef HAVE_ARCH_DMA_SET_MASK +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, mask); + + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + *dev->dma_mask = mask; + return 0; +} +#endif + #endif -- cgit v1.2.3