diff options
author | Arthur Kepner <akepner@sgi.com> | 2008-04-29 12:00:32 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 19:06:12 +0400 |
commit | 309df0c503c35fbb5a09537fcbb1f4967b9ca489 (patch) | |
tree | 56a9df627a229bd64b934608b5f84d20bdaabf3a /arch/ia64/sn | |
parent | a75b0a2f68d3937f96ed39525e4750601483e3b4 (diff) | |
download | linux-309df0c503c35fbb5a09537fcbb1f4967b9ca489.tar.xz |
dma/ia64: update ia64 machvecs, swiotlb.c
Change all ia64 machvecs to use the new dma_*map*_attrs() interfaces.
Implement the old dma_*map_*() interfaces in terms of the corresponding new
interfaces. For ia64/sn, make use of one dma attribute,
DMA_ATTR_WRITE_BARRIER. Introduce swiotlb_*map*_attrs() functions.
Signed-off-by: Arthur Kepner <akepner@sgi.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r-- | arch/ia64/sn/pci/pci_dma.c | 81 |
1 files changed, 60 insertions, 21 deletions
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 18b94b792d54..52175af299a0 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -10,6 +10,7 @@ */ #include <linux/module.h> +#include <linux/dma-attrs.h> #include <asm/dma.h> #include <asm/sn/intr.h> #include <asm/sn/pcibus_provider_defs.h> @@ -149,11 +150,12 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, EXPORT_SYMBOL(sn_dma_free_coherent); /** - * sn_dma_map_single - map a single page for DMA + * sn_dma_map_single_attrs - map a single page for DMA * @dev: device to map for * @cpu_addr: kernel virtual address of the region to map * @size: size of the region * @direction: DMA direction + * @attrs: optional dma attributes * * Map the region pointed to by @cpu_addr for DMA and return the * DMA address. @@ -163,42 +165,59 @@ EXPORT_SYMBOL(sn_dma_free_coherent); * no way of saving the dmamap handle from the alloc to later free * (which is pretty much unacceptable). * + * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with + * dma_map_consistent() so that writes force a flush of pending DMA. + * (See "SGI Altix Architecture Considerations for Linux Device Drivers", + * Document Number: 007-4763-001) + * * TODO: simplify our interface; * figure out how to save dmamap handle so can use two step. */ -dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, - int direction) +dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, + size_t size, int direction, + struct dma_attrs *attrs) { dma_addr_t dma_addr; unsigned long phys_addr; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); + int dmabarr; + + dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(dev->bus != &pci_bus_type); phys_addr = __pa(cpu_addr); - dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); + if (dmabarr) + dma_addr = provider->dma_map_consistent(pdev, phys_addr, + size, SN_DMA_ADDR_PHYS); + else + dma_addr = provider->dma_map(pdev, phys_addr, size, + SN_DMA_ADDR_PHYS); + if (!dma_addr) { printk(KERN_ERR "%s: out of ATEs\n", __func__); return 0; } return dma_addr; } -EXPORT_SYMBOL(sn_dma_map_single); +EXPORT_SYMBOL(sn_dma_map_single_attrs); /** - * sn_dma_unmap_single - unamp a DMA mapped page + * sn_dma_unmap_single_attrs - unamp a DMA mapped page * @dev: device to sync * @dma_addr: DMA address to sync * @size: size of region * @direction: DMA direction + * @attrs: optional dma attributes * * This routine is supposed to sync the DMA region specified * by @dma_handle into the coherence domain. On SN, we're always cache * coherent, so we just need to free any ATEs associated with this mapping. */ -void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - int direction) +void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction, + struct dma_attrs *attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -207,19 +226,21 @@ void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, provider->dma_unmap(pdev, dma_addr, direction); } -EXPORT_SYMBOL(sn_dma_unmap_single); +EXPORT_SYMBOL(sn_dma_unmap_single_attrs); /** - * sn_dma_unmap_sg - unmap a DMA scatterlist + * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist * @dev: device to unmap * @sg: scatterlist to unmap * @nhwentries: number of scatterlist entries * @direction: DMA direction + * @attrs: optional dma attributes * * Unmap a set of streaming mode DMA translations. */ -void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nhwentries, int direction) +void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nhwentries, int direction, + struct dma_attrs *attrs) { int i; struct pci_dev *pdev = to_pci_dev(dev); @@ -234,25 +255,34 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, sg->dma_length = 0; } } -EXPORT_SYMBOL(sn_dma_unmap_sg); +EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); /** - * sn_dma_map_sg - map a scatterlist for DMA + * sn_dma_map_sg_attrs - map a scatterlist for DMA * @dev: device to map for * @sg: scatterlist to map * @nhwentries: number of entries * @direction: direction of the DMA transaction + * @attrs: optional dma attributes + * + * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with + * dma_map_consistent() so that writes force a flush of pending DMA. + * (See "SGI Altix Architecture Considerations for Linux Device Drivers", + * Document Number: 007-4763-001) * * Maps each entry of @sg for DMA. */ -int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, - int direction) +int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, + int nhwentries, int direction, struct dma_attrs *attrs) { unsigned long phys_addr; struct scatterlist *saved_sg = sgl, *sg; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int i; + int dmabarr; + + dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(dev->bus != &pci_bus_type); @@ -260,11 +290,19 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, * Setup a DMA address for each entry in the scatterlist. */ for_each_sg(sgl, sg, nhwentries, i) { + dma_addr_t dma_addr; phys_addr = SG_ENT_PHYS_ADDRESS(sg); - sg->dma_address = provider->dma_map(pdev, - phys_addr, sg->length, - SN_DMA_ADDR_PHYS); + if (dmabarr) + dma_addr = provider->dma_map_consistent(pdev, + phys_addr, + sg->length, + SN_DMA_ADDR_PHYS); + else + dma_addr = provider->dma_map(pdev, phys_addr, + sg->length, + SN_DMA_ADDR_PHYS); + sg->dma_address = dma_addr; if (!sg->dma_address) { printk(KERN_ERR "%s: out of ATEs\n", __func__); @@ -272,7 +310,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, * Free any successfully allocated entries. */ if (i > 0) - sn_dma_unmap_sg(dev, saved_sg, i, direction); + sn_dma_unmap_sg_attrs(dev, saved_sg, i, + direction, attrs); return 0; } @@ -281,7 +320,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, return nhwentries; } -EXPORT_SYMBOL(sn_dma_map_sg); +EXPORT_SYMBOL(sn_dma_map_sg_attrs); void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) |