diff options
Diffstat (limited to 'drivers/net/ethernet/fealnx.c')
-rw-r--r-- | drivers/net/ethernet/fealnx.c | 91 |
1 files changed, 53 insertions, 38 deletions
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 73e896a7d8fd..c696651dd735 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -543,7 +543,8 @@ static int fealnx_init_one(struct pci_dev *pdev, np->mii.phy_id_mask = 0x1f; np->mii.reg_num_mask = 0x1f; - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) { err = -ENOMEM; goto err_out_free_dev; @@ -551,7 +552,8 @@ static int fealnx_init_one(struct pci_dev *pdev, np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) { err = -ENOMEM; goto err_out_free_rx; @@ -656,9 +658,11 @@ static int fealnx_init_one(struct pci_dev *pdev, return 0; err_out_free_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); err_out_free_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); err_out_free_dev: free_netdev(dev); err_out_unmap: @@ -676,10 +680,10 @@ static void fealnx_remove_one(struct pci_dev *pdev) if (dev) { struct netdev_private *np = netdev_priv(dev); - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, - np->tx_ring_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, - np->rx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); unregister_netdev(dev); pci_iounmap(pdev, np->mem); free_netdev(dev); @@ -1056,8 +1060,10 @@ static void allocate_rx_buffers(struct net_device *dev) np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; np->lack_rxbuf->skbuff = skb; - np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, - np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev, + skb->data, + np->rx_buf_sz, + DMA_FROM_DEVICE); np->lack_rxbuf->status = RXOWN; ++np->really_rx_count; } @@ -1251,8 +1257,10 @@ static void init_ring(struct net_device *dev) ++np->really_rx_count; np->rx_ring[i].skbuff = skb; - np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, - np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev, + skb->data, + np->rx_buf_sz, + DMA_FROM_DEVICE); np->rx_ring[i].status = RXOWN; np->rx_ring[i].control |= RXIC; } @@ -1290,8 +1298,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) #define one_buffer #define BPT 1022 #if defined(one_buffer) - np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, - skb->len, PCI_DMA_TODEVICE); + np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data, + skb->len, DMA_TO_DEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ @@ -1306,8 +1314,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) struct fealnx_desc *next; /* for the first descriptor */ - np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, - BPT, PCI_DMA_TODEVICE); + np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, + skb->data, BPT, + DMA_TO_DEVICE); np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ @@ -1321,8 +1330,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) // 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; - next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, - skb->len - BPT, PCI_DMA_TODEVICE); + next->buffer = dma_map_single(&ep->pci_dev->dev, + skb->data + BPT, skb->len - BPT, + DMA_TO_DEVICE); next->status = TXOWN; np->cur_tx_copy->status = TXOWN; @@ -1330,8 +1340,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) np->cur_tx_copy = next->next_desc_logical; np->free_tx_count -= 2; } else { - np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, - skb->len, PCI_DMA_TODEVICE); + np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, + skb->data, skb->len, + DMA_TO_DEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ @@ -1371,8 +1382,8 @@ static void reset_tx_descriptors(struct net_device *dev) for (i = 0; i < TX_RING_SIZE; i++) { cur = &np->tx_ring[i]; if (cur->skbuff) { - pci_unmap_single(np->pci_dev, cur->buffer, - cur->skbuff->len, PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, cur->buffer, + cur->skbuff->len, DMA_TO_DEVICE); dev_kfree_skb_any(cur->skbuff); cur->skbuff = NULL; } @@ -1515,8 +1526,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) } /* Free the original skb. */ - pci_unmap_single(np->pci_dev, np->cur_tx->buffer, - np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, + np->cur_tx->buffer, + np->cur_tx->skbuff->len, + DMA_TO_DEVICE); dev_consume_skb_irq(np->cur_tx->skbuff); np->cur_tx->skbuff = NULL; --np->really_tx_count; @@ -1682,10 +1695,10 @@ static int netdev_rx(struct net_device *dev) if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ - pci_dma_sync_single_for_cpu(np->pci_dev, - np->cur_rx->buffer, - np->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&np->pci_dev->dev, + np->cur_rx->buffer, + np->rx_buf_sz, + DMA_FROM_DEVICE); /* Call copy + cksum if available. */ #if ! defined(__alpha__) @@ -1696,15 +1709,15 @@ static int netdev_rx(struct net_device *dev) skb_put_data(skb, np->cur_rx->skbuff->data, pkt_len); #endif - pci_dma_sync_single_for_device(np->pci_dev, - np->cur_rx->buffer, - np->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&np->pci_dev->dev, + np->cur_rx->buffer, + np->rx_buf_sz, + DMA_FROM_DEVICE); } else { - pci_unmap_single(np->pci_dev, + dma_unmap_single(&np->pci_dev->dev, np->cur_rx->buffer, np->rx_buf_sz, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb_put(skb = np->cur_rx->skbuff, pkt_len); np->cur_rx->skbuff = NULL; --np->really_rx_count; @@ -1896,8 +1909,9 @@ static int netdev_close(struct net_device *dev) np->rx_ring[i].status = 0; if (skb) { - pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, - np->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&np->pci_dev->dev, + np->rx_ring[i].buffer, np->rx_buf_sz, + DMA_FROM_DEVICE); dev_kfree_skb(skb); np->rx_ring[i].skbuff = NULL; } @@ -1907,8 +1921,9 @@ static int netdev_close(struct net_device *dev) struct sk_buff *skb = np->tx_ring[i].skbuff; if (skb) { - pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, + np->tx_ring[i].buffer, skb->len, + DMA_TO_DEVICE); dev_kfree_skb(skb); np->tx_ring[i].skbuff = NULL; } |