summaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2019-02-21 15:15:10 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2019-02-21 15:15:10 +0300
commitd0055df0c9c1471c389197a69f43e300185a75aa (patch)
tree7a1c253162f9ea99bf544fac4ae9219c398cae3f /arch/powerpc/sysdev
parent637cfeb9f99ca097747139a5419bc23e0b885655 (diff)
parent4a605e2d1a69f5aea06da10d81e22802a90812a3 (diff)
downloadlinux-d0055df0c9c1471c389197a69f43e300185a75aa.tar.xz
Merge branch 'topic/dma' into next
Merge hch's big DMA rework series. This is in a topic branch in case he wants to merge it to minimise conflicts.
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c58
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c25
2 files changed, 31 insertions, 52 deletions
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index a5b40d1460f1..809797dbe169 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -360,13 +360,6 @@ static void iommu_table_dart_setup(void)
set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
}
-static void pci_dma_dev_setup_dart(struct pci_dev *dev)
-{
- if (dart_is_u4)
- set_dma_offset(&dev->dev, DART_U4_BYPASS_BASE);
- set_iommu_table_base(&dev->dev, &iommu_table_dart);
-}
-
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
{
if (!iommu_table_dart_inited) {
@@ -390,27 +383,18 @@ static bool dart_device_on_pcie(struct device *dev)
return false;
}
-static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
+static void pci_dma_dev_setup_dart(struct pci_dev *dev)
{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- /* U4 supports a DART bypass, we use it for 64-bit capable
- * devices to improve performances. However, that only works
- * for devices connected to U4 own PCIe interface, not bridged
- * through hypertransport. We need the device to support at
- * least 40 bits of addresses.
- */
- if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) {
- dev_info(dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(dev, &dma_nommu_ops);
- } else {
- dev_info(dev, "Using 32-bit DMA via iommu\n");
- set_dma_ops(dev, &dma_iommu_ops);
- }
+ if (dart_is_u4 && dart_device_on_pcie(&dev->dev))
+ dev->dev.archdata.dma_offset = DART_U4_BYPASS_BASE;
+ set_iommu_table_base(&dev->dev, &iommu_table_dart);
+}
- *dev->dma_mask = dma_mask;
- return 0;
+static bool iommu_bypass_supported_dart(struct pci_dev *dev, u64 mask)
+{
+ return dart_is_u4 &&
+ dart_device_on_pcie(&dev->dev) &&
+ mask >= DMA_BIT_MASK(40);
}
void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
@@ -428,26 +412,20 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
/* Initialize the DART HW */
if (dart_init(dn) != 0)
- goto bail;
-
- /* Setup bypass if supported */
- if (dart_is_u4)
- ppc_md.dma_set_mask = dart_dma_set_mask;
+ return;
+ /*
+ * U4 supports a DART bypass, we use it for 64-bit capable devices to
+ * improve performance. However, that only works for devices connected
+ * to the U4 own PCIe interface, not bridged through hypertransport.
+ * We need the device to support at least 40 bits of addresses.
+ */
controller_ops->dma_dev_setup = pci_dma_dev_setup_dart;
controller_ops->dma_bus_setup = pci_dma_bus_setup_dart;
+ controller_ops->iommu_bypass_supported = iommu_bypass_supported_dart;
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
- return;
-
- bail:
- /* If init failed, use direct iommu and null setup functions */
- controller_ops->dma_dev_setup = NULL;
- controller_ops->dma_bus_setup = NULL;
-
- /* Setup pci_dma ops */
- set_pci_dma_ops(&dma_nommu_ops);
}
#ifdef CONFIG_PM
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 918be816b097..f49aec251a5a 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -40,6 +40,7 @@
#include <asm/mpc85xx.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
+#include <asm/swiotlb.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -114,33 +115,33 @@ static struct pci_ops fsl_indirect_pcie_ops =
static u64 pci64_dma_offset;
#ifdef CONFIG_SWIOTLB
+static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+
+ pdev->dev.bus_dma_mask =
+ hose->dma_window_base_cur + hose->dma_window_size;
+}
+
static void setup_swiotlb_ops(struct pci_controller *hose)
{
- if (ppc_swiotlb_enable) {
+ if (ppc_swiotlb_enable)
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
- set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
- }
}
#else
static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
#endif
-static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
+static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
/*
* Fix up PCI devices that are able to DMA to the large inbound
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
- set_dma_ops(dev, &dma_nommu_ops);
- set_dma_offset(dev, pci64_dma_offset);
+ dev->bus_dma_mask = 0;
+ dev->archdata.dma_offset = pci64_dma_offset;
}
-
- *dev->dma_mask = dma_mask;
- return 0;
}
static int setup_one_atmu(struct ccsr_pci __iomem *pci,