diff options
author | Yangchun Fu <yangchun@google.com> | 2019-11-01 20:09:56 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-11-02 01:00:05 +0300 |
commit | 9cfeeb576d49a7b5e643b8066ba64a55e8417c5d (patch) | |
tree | b15f25e27fc5ea1a0463cd1d175de1339dc2c253 /drivers/net/ethernet/google | |
parent | a904a0693c189691eeee64f6c6b188bd7dc244e9 (diff) | |
download | linux-9cfeeb576d49a7b5e643b8066ba64a55e8417c5d.tar.xz |
gve: Fixes DMA synchronization.
Synces the DMA buffer properly in order for CPU and device to see
the most up-to-data data.
Signed-off-by: Yangchun Fu <yangchun@google.com>
Reviewed-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google')
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_rx.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx.c | 24 |
2 files changed, 24 insertions, 2 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 59564ac99d2a..edec61dfc868 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; page_info = &rx->data.page_info[idx]; + dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx], + PAGE_SIZE, DMA_FROM_DEVICE); /* gvnic can only receive into registered segments. If the buffer * can't be recycled, our only choice is to copy the data out of diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 778b87b5a06c..0a9a7ee2a866 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, seg_desc->seg.seg_addr = cpu_to_be64(addr); } -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) +static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, + u64 iov_offset, u64 iov_len) +{ + dma_addr_t dma; + u64 addr; + + for (addr = iov_offset; addr < iov_offset + iov_len; + addr += PAGE_SIZE) { + dma = page_buses[addr / PAGE_SIZE]; + dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); + } +} + +static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, + struct device *dev) { int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; union gve_tx_desc *pkt_desc, *seg_desc; @@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) skb_copy_bits(skb, 0, tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, hlen); + gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + info->iov[hdr_nfrags - 1].iov_offset, + info->iov[hdr_nfrags - 1].iov_len); copy_offset = hlen; for (i = payload_iov; i < payload_nfrags + payload_iov; i++) { @@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb) skb_copy_bits(skb, copy_offset, tx->tx_fifo.base + info->iov[i].iov_offset, info->iov[i].iov_len); + gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + info->iov[i].iov_offset, + info->iov[i].iov_len); copy_offset += info->iov[i].iov_len; } @@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } - nsegs = gve_tx_add_skb(tx, skb); + nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev); netdev_tx_sent_queue(tx->netdev_txq, skb->len); skb_tx_timestamp(skb); |