diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/arm/ep93xx_eth.c | 4 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 14 | ||||
-rw-r--r-- | drivers/net/cassini.c | 12 | ||||
-rw-r--r-- | drivers/net/cxgb3/sge.c | 2 | ||||
-rw-r--r-- | drivers/net/e100.c | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 11 | ||||
-rw-r--r-- | drivers/net/ibmveth.c | 219 | ||||
-rw-r--r-- | drivers/net/ibmveth.h | 5 | ||||
-rw-r--r-- | drivers/net/iseries_veth.c | 4 | ||||
-rw-r--r-- | drivers/net/mlx4/eq.c | 2 | ||||
-rw-r--r-- | drivers/net/pasemi_mac.c | 6 | ||||
-rw-r--r-- | drivers/net/ppp_generic.c | 6 | ||||
-rw-r--r-- | drivers/net/qla3xxx.c | 12 | ||||
-rw-r--r-- | drivers/net/s2io.c | 48 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 4 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 7 | ||||
-rw-r--r-- | drivers/net/spider_net.c | 4 | ||||
-rw-r--r-- | drivers/net/tc35815.c | 4 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 114 | ||||
-rw-r--r-- | drivers/net/wireless/ath5k/base.c | 4 |
21 files changed, 354 insertions, 134 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 7a14980f3472..18d3eeb7eab2 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) goto err; d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(d)) { + if (dma_mapping_error(NULL, d)) { free_page((unsigned long)page); goto err; } @@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) goto err; d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(d)) { + if (dma_mapping_error(NULL, d)) { free_page((unsigned long)page); goto err; } diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 0263bef9cc6d..af251a5df844 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -814,7 +814,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, } /* release skb */ - BUG_TRAP(skb); + WARN_ON(!skb); dev_kfree_skb(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -837,9 +837,9 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; #ifdef BNX2X_STOP_ON_ERROR - BUG_TRAP(used >= 0); - BUG_TRAP(used <= fp->bp->tx_ring_size); - BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); + WARN_ON(used < 0); + WARN_ON(used > fp->bp->tx_ring_size); + WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); #endif return (s16)(fp->bp->tx_ring_size) - used; @@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(mapping))) { + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { __free_pages(page, PAGES_PER_SGE_SHIFT); return -ENOMEM; } @@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(mapping))) { + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { dev_kfree_skb(skb); return -ENOMEM; } @@ -4374,7 +4374,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) } ring_prod = NEXT_RX_IDX(ring_prod); cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); - BUG_TRAP(ring_prod > i); + WARN_ON(ring_prod <= i); } fp->rx_bd_prod = ring_prod; diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 83768df27806..f1936d51b458 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) list_for_each_safe(elem, tmp, &list) { cas_page_t *page = list_entry(elem, cas_page_t, list); + /* + * With the lockless pagecache, cassini buffering scheme gets + * slightly less accurate: we might find that a page has an + * elevated reference count here, due to a speculative ref, + * and skip it as in-use. Ideally we would be able to reclaim + * it. However this would be such a rare case, it doesn't + * matter too much as we should pick it up the next time round. + * + * Importantly, if we find that the page has a refcount of 1 + * here (our refcount), then we know it is definitely not inuse + * so we can reuse it. + */ if (page_count(page->buffer) > 1) continue; diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a96331c875e6..1b0861d73ab7 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, dma_addr_t mapping; mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(mapping))) + if (unlikely(pci_dma_mapping_error(pdev, mapping))) return -ENOMEM; pci_unmap_addr_set(sd, dma_addr, mapping); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1037b1332312..19d32a227be1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rx->dma_addr)) { + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { dev_kfree_skb_any(rx->skb); rx->skb = NULL; rx->dma_addr = 0; diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index a14561f40db0..9350564065e7 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { + if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { ret_val = 4; goto err_nomem; } @@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, 2048, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { + if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { ret_val = 8; goto err_nomem; } diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9c0f56b3c518..d13677899767 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -195,7 +195,7 @@ map_skb: buffer_info->dma = pci_map_single(pdev, skb->data, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { dev_err(&pdev->dev, "RX DMA map failed\n"); adapter->rx_dma_failed++; break; @@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, ps_page->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(ps_page->dma)) { + if (pci_dma_mapping_error(pdev, ps_page->dma)) { dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); adapter->rx_dma_failed++; @@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, buffer_info->dma = pci_map_single(pdev, skb->data, adapter->rx_ps_bsize0, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { dev_err(&pdev->dev, "RX DMA map failed\n"); adapter->rx_dma_failed++; /* cleanup skb */ @@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, skb->data + offset, size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); adapter->tx_dma_failed++; return -1; @@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, offset, size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(buffer_info->dma)) { + if (pci_dma_mapping_error(adapter->pdev, + buffer_info->dma)) { dev_err(&adapter->pdev->dev, "TX DMA page map failed\n"); adapter->tx_dma_failed++; diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 00527805e4f1..91ec9fdc7184 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -33,6 +33,7 @@ */ #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -52,7 +53,9 @@ #include <asm/hvcall.h> #include <asm/atomic.h> #include <asm/vio.h> +#include <asm/iommu.h> #include <asm/uaccess.h> +#include <asm/firmware.h> #include <linux/seq_file.h> #include "ibmveth.h" @@ -94,8 +97,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); static struct kobj_type ktype_veth_pool; + #ifdef CONFIG_PROC_FS #define IBMVETH_PROC_DIR "ibmveth" static struct proc_dir_entry *ibmveth_proc_dir; @@ -226,16 +231,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc u32 i; u32 count = pool->size - atomic_read(&pool->available); u32 buffers_added = 0; + struct sk_buff *skb; + unsigned int free_index, index; + u64 correlator; + unsigned long lpar_rc; + dma_addr_t dma_addr; mb(); for(i = 0; i < count; ++i) { - struct sk_buff *skb; - unsigned int free_index, index; - u64 correlator; union ibmveth_buf_desc desc; - unsigned long lpar_rc; - dma_addr_t dma_addr; skb = alloc_skb(pool->buff_size, GFP_ATOMIC); @@ -255,6 +260,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, pool->buff_size, DMA_FROM_DEVICE); + if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) + goto failure; + pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->dma_addr[index] = dma_addr; pool->skbuff[index] = skb; @@ -267,20 +275,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); - if(lpar_rc != H_SUCCESS) { - pool->free_map[free_index] = index; - pool->skbuff[index] = NULL; - if (pool->consumer_index == 0) - pool->consumer_index = pool->size - 1; - else - pool->consumer_index--; - dma_unmap_single(&adapter->vdev->dev, - pool->dma_addr[index], pool->buff_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - adapter->replenish_add_buff_failure++; - break; - } else { + if (lpar_rc != H_SUCCESS) + goto failure; + else { buffers_added++; adapter->replenish_add_buff_success++; } @@ -288,6 +285,24 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc mb(); atomic_add(buffers_added, &(pool->available)); + return; + +failure: + pool->free_map[free_index] = index; + pool->skbuff[index] = NULL; + if (pool->consumer_index == 0) + pool->consumer_index = pool->size - 1; + else + pool->consumer_index--; + if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) + dma_unmap_single(&adapter->vdev->dev, + pool->dma_addr[index], pool->buff_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + adapter->replenish_add_buff_failure++; + + mb(); + atomic_add(buffers_added, &(pool->available)); } /* replenish routine */ @@ -297,7 +312,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) adapter->replenish_task_cycles++; - for(i = 0; i < IbmVethNumBufferPools; i++) + for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) if(adapter->rx_buff_pool[i].active) ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[i]); @@ -433,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) static void ibmveth_cleanup(struct ibmveth_adapter *adapter) { int i; + struct device *dev = &adapter->vdev->dev; if(adapter->buffer_list_addr != NULL) { - if(!dma_mapping_error(adapter->buffer_list_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->buffer_list_dma, 4096, + if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_dma = DMA_ERROR_CODE; } @@ -446,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if(adapter->filter_list_addr != NULL) { - if(!dma_mapping_error(adapter->filter_list_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->filter_list_dma, 4096, + if (!dma_mapping_error(dev, adapter->filter_list_dma)) { + dma_unmap_single(dev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = DMA_ERROR_CODE; } @@ -457,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if(adapter->rx_queue.queue_addr != NULL) { - if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { - dma_unmap_single(&adapter->vdev->dev, + if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { + dma_unmap_single(dev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); @@ -472,6 +486,18 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) if (adapter->rx_buff_pool[i].active) ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); + + if (adapter->bounce_buffer != NULL) { + if (!dma_mapping_error(adapter->bounce_buffer_dma)) { + dma_unmap_single(&adapter->vdev->dev, + adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + adapter->bounce_buffer_dma = DMA_ERROR_CODE; + } + kfree(adapter->bounce_buffer); + adapter->bounce_buffer = NULL; + } } static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, @@ -508,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) int rc; union ibmveth_buf_desc rxq_desc; int i; + struct device *dev; ibmveth_debug_printk("open starting\n"); @@ -536,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) return -ENOMEM; } - adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, + dev = &adapter->vdev->dev; + + adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, + adapter->rx_queue.queue_dma = dma_map_single(dev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); - if((dma_mapping_error(adapter->buffer_list_dma) ) || - (dma_mapping_error(adapter->filter_list_dma)) || - (dma_mapping_error(adapter->rx_queue.queue_dma))) { + if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || + (dma_mapping_error(dev, adapter->filter_list_dma)) || + (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); @@ -607,6 +636,24 @@ static int ibmveth_open(struct net_device *netdev) return rc; } + adapter->bounce_buffer = + kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); + if (!adapter->bounce_buffer) { + ibmveth_error_printk("unable to allocate bounce buffer\n"); + ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); + return -ENOMEM; + } + adapter->bounce_buffer_dma = + dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, + netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + ibmveth_error_printk("unable to map bounce buffer\n"); + ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); + return -ENOMEM; + } + ibmveth_debug_printk("initial replenish cycle\n"); ibmveth_interrupt(netdev->irq, netdev); @@ -853,10 +900,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_packets = 0; unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; + int used_bounce = 0; + unsigned long data_dma_addr; desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; - desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); + data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { @@ -875,12 +924,16 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) buf[1] = 0; } - if (dma_mapping_error(desc.fields.address)) { - ibmveth_error_printk("tx: unable to map xmit buffer\n"); + if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + ibmveth_error_printk("tx: unable to map xmit buffer\n"); + skb_copy_from_linear_data(skb, adapter->bounce_buffer, + skb->len); + desc.fields.address = adapter->bounce_buffer_dma; tx_map_failed++; - tx_dropped++; - goto out; - } + used_bounce = 1; + } else + desc.fields.address = data_dma_addr; /* send the frame. Arbitrarily set retrycount to 1024 */ correlator = 0; @@ -904,8 +957,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->trans_start = jiffies; } - dma_unmap_single(&adapter->vdev->dev, desc.fields.address, - skb->len, DMA_TO_DEVICE); + if (!used_bounce) + dma_unmap_single(&adapter->vdev->dev, data_dma_addr, + skb->len, DMA_TO_DEVICE); out: spin_lock_irqsave(&adapter->stats_lock, flags); netdev->stats.tx_dropped += tx_dropped; @@ -1053,9 +1107,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) { struct ibmveth_adapter *adapter = dev->priv; + struct vio_dev *viodev = adapter->vdev; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; - int reinit = 0; - int i, rc; + int i; if (new_mtu < IBMVETH_MAX_MTU) return -EINVAL; @@ -1067,23 +1121,34 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) if (i == IbmVethNumBufferPools) return -EINVAL; + /* Deactivate all the buffer pools so that the next loop can activate + only the buffer pools necessary to hold the new MTU */ + for (i = 0; i < IbmVethNumBufferPools; i++) + if (adapter->rx_buff_pool[i].active) { + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + adapter->rx_buff_pool[i].active = 0; + } + /* Look for an active buffer pool that can hold the new MTU */ for(i = 0; i<IbmVethNumBufferPools; i++) { - if (!adapter->rx_buff_pool[i].active) { - adapter->rx_buff_pool[i].active = 1; - reinit = 1; - } + adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { - if (reinit && netif_running(adapter->netdev)) { + if (netif_running(adapter->netdev)) { adapter->pool_config = 1; ibmveth_close(adapter->netdev); adapter->pool_config = 0; dev->mtu = new_mtu; - if ((rc = ibmveth_open(adapter->netdev))) - return rc; - } else - dev->mtu = new_mtu; + vio_cmo_set_dev_desired(viodev, + ibmveth_get_desired_dma + (viodev)); + return ibmveth_open(adapter->netdev); + } + dev->mtu = new_mtu; + vio_cmo_set_dev_desired(viodev, + ibmveth_get_desired_dma + (viodev)); return 0; } } @@ -1098,6 +1163,46 @@ static void ibmveth_poll_controller(struct net_device *dev) } #endif +/** + * ibmveth_get_desired_dma - Calculate IO memory desired by the driver + * + * @vdev: struct vio_dev for the device whose desired IO mem is to be returned + * + * Return value: + * Number of bytes of IO data the driver will need to perform well. + */ +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) +{ + struct net_device *netdev = dev_get_drvdata(&vdev->dev); + struct ibmveth_adapter *adapter; + unsigned long ret; + int i; + int rxqentries = 1; + + /* netdev inits at probe time along with the structures we need below*/ + if (netdev == NULL) + return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); + + adapter = netdev_priv(netdev); + + ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; + ret += IOMMU_PAGE_ALIGN(netdev->mtu); + + for (i = 0; i < IbmVethNumBufferPools; i++) { + /* add the size of the active receive buffers */ + if (adapter->rx_buff_pool[i].active) + ret += + adapter->rx_buff_pool[i].size * + IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. + buff_size); + rxqentries += adapter->rx_buff_pool[i].size; + } + /* add the size of the receive queue entries */ + ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); + + return ret; +} + static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { int rc, i; @@ -1242,6 +1347,8 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) ibmveth_proc_unregister_adapter(adapter); free_netdev(netdev); + dev_set_drvdata(&dev->dev, NULL); + return 0; } @@ -1402,14 +1509,15 @@ const char * buf, size_t count) return -EPERM; } - pool->active = 0; if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); + pool->active = 0; adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } + pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) @@ -1485,6 +1593,7 @@ static struct vio_driver ibmveth_driver = { .id_table = ibmveth_device_table, .probe = ibmveth_probe, .remove = ibmveth_remove, + .get_desired_dma = ibmveth_get_desired_dma, .driver = { .name = ibmveth_driver_name, .owner = THIS_MODULE, diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 41f61cd18852..d28186948752 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h @@ -93,9 +93,12 @@ static inline long h_illan_attributes(unsigned long unit_address, plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) #define IbmVethNumBufferPools 5 +#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ #define IBMVETH_MAX_MTU 68 #define IBMVETH_MAX_POOL_COUNT 4096 +#define IBMVETH_BUFF_LIST_SIZE 4096 +#define IBMVETH_FILT_LIST_SIZE 4096 #define IBMVETH_MAX_BUF_SIZE (1024 * 128) static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; @@ -143,6 +146,8 @@ struct ibmveth_adapter { struct ibmveth_rx_q rx_queue; int pool_config; int rx_csum; + void *bounce_buffer; + dma_addr_t bounce_buffer_dma; /* adapter specific stats */ u64 replenish_task_cycles; diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index b8d0639c1cdf..c46864d626b2 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, msg->data.addr[0] = dma_map_single(port->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(msg->data.addr[0])) + if (dma_mapping_error(port->dev, msg->data.addr[0])) goto recycle_and_drop; msg->dev = port->dev; @@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, dma_address = msg->data.addr[0]; dma_length = msg->data.len[0]; - if (!dma_mapping_error(dma_address)) + if (!dma_mapping_error(msg->dev, dma_address)) dma_unmap_single(msg->dev, dma_address, dma_length, DMA_TO_DEVICE); diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index bd34bf08830d..8a8b56135a58 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c @@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) return -ENOMEM; priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { + if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { __free_page(priv->eq_table.icm_page); return -ENOMEM; } diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 993d87c9296f..edc0fd588985 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, mac->bufsz - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); - if (unlikely(dma_mapping_error(dma))) { + if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { dev_kfree_skb_irq(info->skb); break; } @@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), PCI_DMA_TODEVICE); map_size[0] = skb_headlen(skb); - if (dma_mapping_error(map[0])) + if (pci_dma_mapping_error(mac->dma_pdev, map[0])) goto out_err_nolock; for (i = 0; i < nfrags; i++) { @@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) frag->page_offset, frag->size, PCI_DMA_TODEVICE); map_size[i+1] = frag->size; - if (dma_mapping_error(map[i+1])) { + if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { nfrags = i; goto out_err_nolock; } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 739b3ab7bccc..ddccc074a76a 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -581,12 +581,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (file == ppp->owner) ppp_shutdown_interface(ppp); } - if (atomic_read(&file->f_count) <= 2) { + if (atomic_long_read(&file->f_count) <= 2) { ppp_release(NULL, file); err = 0; } else - printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", - atomic_read(&file->f_count)); + printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", + atomic_long_read(&file->f_count)); unlock_kernel(); return err; } diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e7d48a352beb..e82b37bbd6c3 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, qdev->lrg_buffer_len - QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); @@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); @@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev, */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); @@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev, sizeof(struct oal), PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", @@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev, frag->page_offset, frag->size, PCI_DMA_TODEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); @@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) QL_HEADER_SPACE, PCI_DMA_FROMDEVICE); - err = pci_dma_mapping_error(map); + err = pci_dma_mapping_error(qdev->pdev, map); if(err) { printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 9dae40ccf048..86d77d05190a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic) * Return Value: * SUCCESS on success or an appropriate -ve value on failure. */ - -static int fill_rx_buffers(struct ring_info *ring, int from_card_up) +static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, + int from_card_up) { struct sk_buff *skb; struct RxD_t *rxdp; @@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) rxdp1->Buffer0_ptr = pci_map_single (ring->pdev, skb->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) + if (pci_dma_mapping_error(nic->pdev, + rxdp1->Buffer0_ptr)) goto pci_map_failed; rxdp->Control_2 = @@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) rxdp3->Buffer0_ptr = pci_map_single(ring->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer0_ptr)) goto pci_map_failed; } else pci_dma_sync_single_for_device(ring->pdev, @@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) (ring->pdev, skb->data, ring->mtu + 4, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer2_ptr)) goto pci_map_failed; if (from_card_up) { @@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error - (rxdp3->Buffer1_ptr)) { + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer1_ptr)) { pci_unmap_single (ring->pdev, (dma_addr_t)(unsigned long) @@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp) } } -static int s2io_chk_rx_buffers(struct ring_info *ring) +static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) { - if (fill_rx_buffers(ring, 0) == -ENOMEM) { + if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); } @@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) return 0; pkts_processed = rx_intr_handler(ring, budget); - s2io_chk_rx_buffers(ring); + s2io_chk_rx_buffers(nic, ring); if (pkts_processed < budget_org) { netif_rx_complete(dev, napi); @@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) for (i = 0; i < config->rx_ring_num; i++) { ring = &mac_control->rings[i]; ring_pkts_processed = rx_intr_handler(ring, budget); - s2io_chk_rx_buffers(ring); + s2io_chk_rx_buffers(nic, ring); pkts_processed += ring_pkts_processed; budget -= ring_pkts_processed; if (budget <= 0) @@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev) rx_intr_handler(&mac_control->rings[i], 0); for (i = 0; i < config->rx_ring_num; i++) { - if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { + if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == + -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); break; @@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Buffer_Pointer = pci_map_single(sp->pdev, fifo->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(txdp->Buffer_Pointer)) + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) goto pci_map_failed; txdp++; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(txdp->Buffer_Pointer)) + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) goto pci_map_failed; txdp->Host_Control = (unsigned long) skb; @@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) netif_rx_schedule(dev, &ring->napi); } else { rx_intr_handler(ring, 0); - s2io_chk_rx_buffers(ring); + s2io_chk_rx_buffers(sp, ring); } return IRQ_HANDLED; @@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) */ if (!config->napi) { for (i = 0; i < config->rx_ring_num; i++) - s2io_chk_rx_buffers(&mac_control->rings[i]); + s2io_chk_rx_buffers(sp, &mac_control->rings[i]); } writeq(sp->general_int_mask, &bar0->general_int_mask); readl(&bar0->general_int_status); @@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, pci_map_single( sp->pdev, (*skb)->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) + if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) goto memalloc_failed; rxdp->Host_Control = (unsigned long) (*skb); } @@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, pci_map_single(sp->pdev, (*skb)->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) + if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) goto memalloc_failed; rxdp3->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { + if (pci_dma_mapping_error(sp->pdev, + rxdp3->Buffer0_ptr)) { pci_unmap_single (sp->pdev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, PCI_DMA_FROMDEVICE); @@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, rxdp3->Buffer1_ptr = *temp1 = pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { + if (pci_dma_mapping_error(sp->pdev, + rxdp3->Buffer1_ptr)) { pci_unmap_single (sp->pdev, (dma_addr_t)rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); @@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp) for (i = 0; i < config->rx_ring_num; i++) { mac_control->rings[i].mtu = dev->mtu; - ret = fill_rx_buffers(&mac_control->rings[i], 1); + ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", dev->name); diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 601b001437c0..0d27dd39bc09 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, rx_buf->data, rx_buf->len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { + if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { dev_kfree_skb_any(rx_buf->skb); rx_buf->skb = NULL; return -EIO; @@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 0, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(dma_addr))) { + if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { __free_pages(rx_buf->page, efx->rx_buffer_order); rx_buf->page = NULL; return -EIO; diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5cdd082ab8f6..5e8374ab28ee 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, /* Process all fragments */ while (1) { - if (unlikely(pci_dma_mapping_error(dma_addr))) + if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) goto pci_err; /* Store fields for marking in the per-fragment final @@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, TSOH_BUFFER(tsoh), header_len, PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { + if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, + tsoh->dma_addr))) { kfree(tsoh); return NULL; } @@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, len, PCI_DMA_TODEVICE); - if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { + if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { st->ifc.unmap_len = len; st->ifc.len = len; st->ifc.dma_addr = st->ifc.unmap_addr; diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 00aa0b108cb9..b6435d0d71f9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, /* iommu-map the skb */ buf = pci_map_single(card->pdev, descr->skb->data, SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(buf)) { + if (pci_dma_mapping_error(card->pdev, buf)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; if (netif_msg_rx_err(card) && net_ratelimit()) @@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, unsigned long flags; buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(buf)) { + if (pci_dma_mapping_error(card->pdev, buf)) { if (netif_msg_tx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a645e5028c14..8487ace9d2e3 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) return NULL; *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(*dma_handle)) { + if (pci_dma_mapping_error(hwdev, *dma_handle)) { free_page((unsigned long)buf); return NULL; } @@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, return NULL; *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(*dma_handle)) { + if (pci_dma_mapping_error(hwdev, *dma_handle)) { dev_kfree_skb_any(skb); return NULL; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c28d7cb2035b..0196a0df9021 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -19,6 +19,7 @@ //#define DEBUG #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_net.h> @@ -54,9 +55,15 @@ struct virtnet_info struct tasklet_struct tasklet; bool free_in_tasklet; + /* I like... big packets and I cannot lie! */ + bool big_packets; + /* Receive & send queues. */ struct sk_buff_head recv; struct sk_buff_head send; + + /* Chain pages by the private ptr. */ + struct page *pages; }; static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) @@ -69,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb) sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); } +static void give_a_page(struct virtnet_info *vi, struct page *page) +{ + page->private = (unsigned long)vi->pages; + vi->pages = page; +} + +static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) +{ + struct page *p = vi->pages; + + if (p) + vi->pages = (struct page *)p->private; + else + p = alloc_page(gfp_mask); + return p; +} + static void skb_xmit_done(struct virtqueue *svq) { struct virtnet_info *vi = svq->vdev->priv; @@ -88,6 +112,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, unsigned len) { struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); + int err; if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -95,10 +120,23 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, goto drop; } len -= sizeof(struct virtio_net_hdr); - BUG_ON(len > MAX_PACKET_LEN); - skb_trim(skb, len); + if (len <= MAX_PACKET_LEN) { + unsigned int i; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page); + skb->data_len = 0; + skb_shinfo(skb)->nr_frags = 0; + } + + err = pskb_trim(skb, len); + if (err) { + pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); + dev->stats.rx_dropped++; + goto drop; + } + skb->truesize += skb->data_len; dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; @@ -160,7 +198,7 @@ static void try_fill_recv(struct virtnet_info *vi) { struct sk_buff *skb; struct scatterlist sg[2+MAX_SKB_FRAGS]; - int num, err; + int num, err, i; sg_init_table(sg, 2+MAX_SKB_FRAGS); for (;;) { @@ -170,6 +208,24 @@ static void try_fill_recv(struct virtnet_info *vi) skb_put(skb, MAX_PACKET_LEN); vnet_hdr_to_sg(sg, skb); + + if (vi->big_packets) { + for (i = 0; i < MAX_SKB_FRAGS; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + f->page = get_a_page(vi, GFP_ATOMIC); + if (!f->page) + break; + + f->page_offset = 0; + f->size = PAGE_SIZE; + + skb->data_len += PAGE_SIZE; + skb->len += PAGE_SIZE; + + skb_shinfo(skb)->nr_frags++; + } + } + num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; skb_queue_head(&vi->recv, skb); @@ -335,16 +391,11 @@ again: free_old_xmit_skbs(vi); /* If we has a buffer left over from last time, send it now. */ - if (unlikely(vi->last_xmit_skb)) { - if (xmit_skb(vi, vi->last_xmit_skb) != 0) { - /* Drop this skb: we only queue one. */ - vi->dev->stats.tx_dropped++; - kfree_skb(skb); - skb = NULL; - goto stop_queue; - } - vi->last_xmit_skb = NULL; - } + if (unlikely(vi->last_xmit_skb) && + xmit_skb(vi, vi->last_xmit_skb) != 0) + goto stop_queue; + + vi->last_xmit_skb = NULL; /* Put new one in send queue and do transmit */ if (likely(skb)) { @@ -370,6 +421,11 @@ stop_queue: netif_start_queue(dev); goto again; } + if (skb) { + /* Drop this skb: we only queue one. */ + vi->dev->stats.tx_dropped++; + kfree_skb(skb); + } goto done; } @@ -408,6 +464,22 @@ static int virtnet_close(struct net_device *dev) return 0; } +static int virtnet_set_tx_csum(struct net_device *dev, u32 data) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + + if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) + return -ENOSYS; + + return ethtool_op_set_tx_hw_csum(dev, data); +} + +static struct ethtool_ops virtnet_ethtool_ops = { + .set_tx_csum = virtnet_set_tx_csum, + .set_sg = ethtool_op_set_sg, +}; + static int virtnet_probe(struct virtio_device *vdev) { int err; @@ -427,6 +499,7 @@ static int virtnet_probe(struct virtio_device *vdev) #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = virtnet_netpoll; #endif + SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ @@ -462,11 +535,18 @@ static int virtnet_probe(struct virtio_device *vdev) vi->dev = dev; vi->vdev = vdev; vdev->priv = vi; + vi->pages = NULL; /* If they give us a callback when all buffers are done, we don't need * the timer. */ vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); + /* If we can receive ANY GSO packets, we must allocate large ones. */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) + || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) + || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) + vi->big_packets = true; + /* We expect two virtqueues, receive then send. */ vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); if (IS_ERR(vi->rvq)) { @@ -541,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev) vdev->config->del_vq(vi->svq); vdev->config->del_vq(vi->rvq); unregister_netdev(vi->dev); + + while (vi->pages) + __free_pages(get_a_page(vi, GFP_KERNEL), 0); + free_netdev(vi->dev); } @@ -553,7 +637,9 @@ static unsigned int features[] = { VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, - VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ + VIRTIO_F_NOTIFY_ON_EMPTY, }; static struct virtio_driver virtio_net = { diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 217d506527a9..d9769c527346 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) bf->skb = skb; bf->skbaddr = pci_map_single(sc->pdev, skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { + if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); dev_kfree_skb(skb); bf->skb = NULL; @@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " "skbaddr %llx\n", skb, skb->data, skb->len, (unsigned long long)bf->skbaddr); - if (pci_dma_mapping_error(bf->skbaddr)) { + if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { ATH5K_ERR(sc, "beacon DMA mapping failed\n"); return -EIO; } |