diff options
author | David S. Miller <davem@davemloft.net> | 2011-06-21 09:29:08 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-06-21 09:29:08 +0400 |
commit | 9f6ec8d697c08963d83880ccd35c13c5ace716ea (patch) | |
tree | ad8d93cf6fcdd09b86ade09f5fcbbc66cdb1cca2 /drivers/net/arm/ep93xx_eth.c | |
parent | 4aa3a715551c93eda32d79bd52042ce500bd5383 (diff) | |
parent | 56299378726d5f2ba8d3c8cbbd13cb280ba45e4f (diff) | |
download | linux-9f6ec8d697c08963d83880ccd35c13c5ace716ea.tar.xz |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/rtlwifi/pci.c
net/netfilter/ipvs/ip_vs_core.c
Diffstat (limited to 'drivers/net/arm/ep93xx_eth.c')
-rw-r--r-- | drivers/net/arm/ep93xx_eth.c | 82 |
1 files changed, 41 insertions, 41 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index a167addd5382..4317af8d2f0a 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -284,10 +284,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) skb = dev_alloc_skb(length + 2); if (likely(skb != NULL)) { + struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; skb_reserve(skb, 2); - dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, + dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, length, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); + dma_sync_single_for_device(dev->dev.parent, + rxd->buf_addr, length, + DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); @@ -349,6 +353,7 @@ poll_some_more: static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); + struct ep93xx_tdesc *txd; int entry; if (unlikely(skb->len > MAX_PKT_SIZE)) { @@ -360,11 +365,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) entry = ep->tx_pointer; ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); - ep->descs->tdesc[entry].tdesc1 = - TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); + txd = &ep->descs->tdesc[entry]; + + txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); + dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, + DMA_TO_DEVICE); skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); - dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, - skb->len, DMA_TO_DEVICE); + dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, + DMA_TO_DEVICE); dev_kfree_skb(skb); spin_lock_irq(&ep->tx_pending_lock); @@ -458,89 +466,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) static void ep93xx_free_buffers(struct ep93xx_priv *ep) { + struct device *dev = ep->dev->dev.parent; int i; - for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { dma_addr_t d; d = ep->descs->rdesc[i].buf_addr; if (d) - dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); if (ep->rx_buf[i] != NULL) - free_page((unsigned long)ep->rx_buf[i]); + kfree(ep->rx_buf[i]); } - for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { dma_addr_t d; d = ep->descs->tdesc[i].buf_addr; if (d) - dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); if (ep->tx_buf[i] != NULL) - free_page((unsigned long)ep->tx_buf[i]); + kfree(ep->tx_buf[i]); } - dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, + dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep->descs_dma_addr); } -/* - * The hardware enforces a sub-2K maximum packet size, so we put - * two buffers on every hardware page. - */ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) { + struct device *dev = ep->dev->dev.parent; int i; - ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), - &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); + ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), + &ep->descs_dma_addr, GFP_KERNEL); if (ep->descs == NULL) return 1; - for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { - void *page; + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + void *buf; dma_addr_t d; - page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); - if (page == NULL) + buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) goto err; - d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(NULL, d)) { - free_page((unsigned long)page); + d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, d)) { + kfree(buf); goto err; } - ep->rx_buf[i] = page; + ep->rx_buf[i] = buf; ep->descs->rdesc[i].buf_addr = d; ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; - - ep->rx_buf[i + 1] = page + PKT_BUF_SIZE; - ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; - ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE; } - for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { - void *page; + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + void *buf; dma_addr_t d; - page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); - if (page == NULL) + buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) goto err; - d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(NULL, d)) { - free_page((unsigned long)page); + d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, d)) { + kfree(buf); goto err; } - ep->tx_buf[i] = page; + ep->tx_buf[i] = buf; ep->descs->tdesc[i].buf_addr = d; - - ep->tx_buf[i + 1] = page + PKT_BUF_SIZE; - ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; } return 0; @@ -830,6 +829,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev) } ep = netdev_priv(dev); ep->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); platform_set_drvdata(pdev, dev); |