summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 03:18:45 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 03:18:45 +0300
commit2fcd2b306aa80771e053275ed74b2dfe7e3d1434 (patch)
treeb19f4e3e9552eab00056c833650e692192fe8f5c /drivers
parentce6eba3dba366b607c0a363c7cdbd4ee8fcc6434 (diff)
parente89f5b37015309a8bdf0b21d08007580b92f92a4 (diff)
downloadlinux-2fcd2b306aa80771e053275ed74b2dfe7e3d1434.tar.xz
Merge branch 'x86-dma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 dma mapping updates from Ingo Molnar: "This tree, by Christoph Hellwig, switches over the x86 architecture to the generic dma-direct and swiotlb code, and also unifies more of the dma-direct code between architectures. The now unused x86-only primitives are removed" * 'x86-dma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: dma-mapping: Don't clear GFP_ZERO in dma_alloc_attrs swiotlb: Make swiotlb_{alloc,free}_buffer depend on CONFIG_DMA_DIRECT_OPS dma/swiotlb: Remove swiotlb_{alloc,free}_coherent() dma/direct: Handle force decryption for DMA coherent buffers in common code dma/direct: Handle the memory encryption bit in common code dma/swiotlb: Remove swiotlb_set_mem_attributes() set_memory.h: Provide set_memory_{en,de}crypted() stubs x86/dma: Remove dma_alloc_coherent_gfp_flags() iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent() iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}() x86/dma/amd_gart: Use dma_direct_{alloc,free}() x86/dma/amd_gart: Look at dev->coherent_dma_mask instead of GFP_DMA x86/dma: Use generic swiotlb_ops x86/dma: Use DMA-direct (CONFIG_DMA_DIRECT_OPS=y) x86/dma: Remove dma_alloc_coherent_mask()
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c75
-rw-r--r--drivers/iommu/intel-iommu.c65
-rw-r--r--drivers/xen/swiotlb-xen.c16
4 files changed, 46 insertions, 112 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f3a21343e636..df171cb85822 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -107,6 +107,7 @@ config IOMMU_PGTABLES_L2
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
+ select DMA_DIRECT_OPS
select SWIOTLB
select PCI_MSI
select PCI_ATS
@@ -142,6 +143,7 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
+ select DMA_DIRECT_OPS
select IOMMU_API
select IOMMU_IOVA
select DMAR_TABLE
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 74788fdeb773..83819d0cbf90 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -28,6 +28,7 @@
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/iommu-helper.h>
#include <linux/iommu.h>
#include <linux/delay.h>
@@ -2193,7 +2194,7 @@ static int amd_iommu_add_device(struct device *dev)
dev_name(dev));
iommu_ignore_device(dev);
- dev->dma_ops = &nommu_dma_ops;
+ dev->dma_ops = &dma_direct_ops;
goto out;
}
init_iommu_group(dev);
@@ -2599,51 +2600,32 @@ static void *alloc_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
u64 dma_mask = dev->coherent_dma_mask;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL) {
- page = alloc_pages(flag, get_order(size));
- *dma_addr = page_to_phys(page);
- return page_address(page);
- } else if (IS_ERR(domain))
- return NULL;
+ struct protection_domain *domain = get_domain(dev);
+ bool is_direct = false;
+ void *virt_addr;
- dma_dom = to_dma_ops_domain(domain);
- size = PAGE_ALIGN(size);
- dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
-
- page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
- if (!page) {
- if (!gfpflags_allow_blocking(flag))
- return NULL;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag);
- if (!page)
+ if (IS_ERR(domain)) {
+ if (PTR_ERR(domain) != -EINVAL)
return NULL;
+ is_direct = true;
}
+ virt_addr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+ if (!virt_addr || is_direct)
+ return virt_addr;
+
if (!dma_mask)
dma_mask = *dev->dma_mask;
- *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, dma_mask);
-
+ *dma_addr = __map_single(dev, to_dma_ops_domain(domain),
+ virt_to_phys(virt_addr), PAGE_ALIGN(size),
+ DMA_BIDIRECTIONAL, dma_mask);
if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
goto out_free;
-
- return page_address(page);
+ return virt_addr;
out_free:
-
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-
+ dma_direct_free(dev, size, virt_addr, *dma_addr, attrs);
return NULL;
}
@@ -2654,24 +2636,17 @@ static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
+ struct protection_domain *domain = get_domain(dev);
- page = virt_to_page(virt_addr);
size = PAGE_ALIGN(size);
- domain = get_domain(dev);
- if (IS_ERR(domain))
- goto free_mem;
+ if (!IS_ERR(domain)) {
+ struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
+ __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
+ }
-free_mem:
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
+ dma_direct_free(dev, size, virt_addr, dma_addr, attrs);
}
/*
@@ -2680,7 +2655,7 @@ free_mem:
*/
static int amd_iommu_dma_supported(struct device *dev, u64 mask)
{
- if (!x86_dma_supported(dev, mask))
+ if (!dma_direct_supported(dev, mask))
return 0;
return check_device(dev);
}
@@ -2794,7 +2769,7 @@ int __init amd_iommu_init_dma_ops(void)
* continue to be SWIOTLB.
*/
if (!swiotlb)
- dma_ops = &nommu_dma_ops;
+ dma_ops = &dma_direct_ops;
if (amd_iommu_unmap_flush)
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 582fd01cb7d1..24d1b1b42013 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
@@ -45,6 +46,7 @@
#include <linux/pci-ats.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
+#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
@@ -3707,61 +3709,30 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
{
- struct page *page = NULL;
- int order;
+ void *vaddr;
- size = PAGE_ALIGN(size);
- order = get_order(size);
+ vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+ if (iommu_no_mapping(dev) || !vaddr)
+ return vaddr;
- if (!iommu_no_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
-
- if (gfpflags_allow_blocking(flags)) {
- unsigned int count = size >> PAGE_SHIFT;
-
- page = dma_alloc_from_contiguous(dev, count, order, flags);
- if (page && iommu_no_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
- }
-
- if (!page)
- page = alloc_pages(flags, order);
- if (!page)
- return NULL;
- memset(page_address(page), 0, size);
-
- *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
- DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (*dma_handle)
- return page_address(page);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
+ *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
+ PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
+ if (!*dma_handle)
+ goto out_free_pages;
+ return vaddr;
+out_free_pages:
+ dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
return NULL;
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- int order;
- struct page *page = virt_to_page(vaddr);
-
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- intel_unmap(dev, dma_handle, size);
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, order);
+ if (!iommu_no_mapping(dev))
+ intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
+ dma_direct_free(dev, size, vaddr, dma_handle, attrs);
}
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
@@ -3871,7 +3842,7 @@ const struct dma_map_ops intel_dma_ops = {
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
#ifdef CONFIG_X86
- .dma_supported = x86_dma_supported,
+ .dma_supported = dma_direct_supported,
#endif
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 5bb72d3f8337..e1c60899fdbc 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -53,20 +53,6 @@
* API.
*/
-#ifndef CONFIG_X86
-static unsigned long dma_alloc_coherent_mask(struct device *dev,
- gfp_t gfp)
-{
- unsigned long dma_mask = 0;
-
- dma_mask = dev->coherent_dma_mask;
- if (!dma_mask)
- dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
-
- return dma_mask;
-}
-#endif
-
#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
static char *xen_io_tlb_start, *xen_io_tlb_end;
@@ -328,7 +314,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
return ret;
if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+ dma_mask = hwdev->coherent_dma_mask;
/* At this point dma_handle is the physical address, next we are
* going to set it to the machine address.