diff options
author | Felix Fietkau <nbd@nbd.name> | 2022-01-06 15:22:28 +0300 |
---|---|---|
committer | Felix Fietkau <nbd@nbd.name> | 2022-05-13 10:39:35 +0300 |
commit | d1ddc536df93ae406ef671deb3218898d3515ea4 (patch) | |
tree | 66b17bb38facff239845efd0072c2501d7d5c6f0 /drivers/net/wireless/mediatek/mt76/dma.c | |
parent | cc9fd945db4fa1ea2317c3d716b92ba2e9a13147 (diff) | |
download | linux-d1ddc536df93ae406ef671deb3218898d3515ea4.tar.xz |
mt76: add support for overriding the device used for DMA mapping
WED support requires using non-coherent DMA, whereas the PCI device might
be configured for coherent DMA.
The WED driver will take care of changing the PCI HIF coherent IO setting
on attach.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/dma.c')
-rw-r--r-- | drivers/net/wireless/mediatek/mt76/dma.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 03d5beb1afdd..83787fc01e4e 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -24,7 +24,7 @@ mt76_alloc_txwi(struct mt76_dev *dev) if (!txwi) return NULL; - addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, + addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, DMA_TO_DEVICE); t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); t->dma_addr = addr; @@ -78,7 +78,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev) local_bh_disable(); while ((t = __mt76_get_txwi(dev)) != NULL) { - dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, + dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); kfree(mt76_get_txwi_ptr(dev, t)); } @@ -127,7 +127,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, q->hw_idx = idx; size = q->ndesc * sizeof(struct mt76_desc); - q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); + q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); if (!q->desc) return -ENOMEM; @@ -209,11 +209,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, struct mt76_queue_entry *e = &q->entry[idx]; if (!e->skip_buf0) - dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], + dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], DMA_TO_DEVICE); if (!e->skip_buf1) - dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], + dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], DMA_TO_DEVICE); if (e->txwi == DMA_DUMMY_DATA) @@ -293,7 +293,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, if (info) *info = le32_to_cpu(desc->info); - dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); + dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE); e->buf = NULL; return buf; @@ -330,9 +330,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, if (q->queued + 1 >= q->ndesc - 1) goto error; - addr = dma_map_single(dev->dev, skb->data, skb->len, + addr = dma_map_single(dev->dma_dev, skb->data, skb->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) goto error; buf.addr = addr; @@ -379,8 +379,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, mt76_insert_hdr_pad(skb); len = skb_headlen(skb); - addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) + addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) goto free; tx_info.buf[n].addr = t->dma_addr; @@ -392,9 +392,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, if (n == ARRAY_SIZE(tx_info.buf)) goto unmap; - addr = dma_map_single(dev->dev, iter->data, iter->len, + addr = dma_map_single(dev->dma_dev, iter->data, iter->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) goto unmap; tx_info.buf[n].addr = addr; @@ -407,10 +407,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, goto unmap; } - dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, + dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); - dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, + dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); if (ret < 0) goto unmap; @@ -420,7 +420,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, unmap: for (n--; n > 0; n--) - dma_unmap_single(dev->dev, tx_info.buf[n].addr, + dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, tx_info.buf[n].len, DMA_TO_DEVICE); free: @@ -465,8 +465,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) if (!buf) break; - addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) { + addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { skb_free_frag(buf); break; } |