diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-13 09:34:20 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-13 22:48:18 +0400 |
commit | 89d71a66c40d629e3b1285def543ab1425558cd5 (patch) | |
tree | 45159e85418170fe36e4e023d9617693625d1740 | |
parent | bff1c09640b3006bca711e18ef08a5fb955ad9b5 (diff) | |
download | linux-89d71a66c40d629e3b1285def543ab1425558cd5.tar.xz |
net: Use netdev_alloc_skb_ip_align()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
38 files changed, 90 insertions, 232 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 975e25b19ebe..32031eaf4910 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -2560,7 +2560,7 @@ boomerang_rx(struct net_device *dev) struct sk_buff *skb; entry = vp->dirty_rx % RX_RING_SIZE; if (vp->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { @@ -2572,7 +2572,6 @@ boomerang_rx(struct net_device *dev) break; /* Bad news! */ } - skb_reserve(skb, NET_IP_ALIGN); vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); vp->rx_skbuff[entry] = skb; } diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 83a1922e68e0..ab451bb8995a 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -549,14 +549,12 @@ rx_status_loop: pr_debug("%s: rx slot %d status 0x%x len %d\n", dev->name, rx_tail, status, len); - new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); + new_skb = netdev_alloc_skb_ip_align(dev, buflen); if (!new_skb) { dev->stats.rx_dropped++; goto rx_next; } - skb_reserve(new_skb, NET_IP_ALIGN); - dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); @@ -1057,12 +1055,10 @@ static int cp_refill_rx(struct cp_private *cp) struct sk_buff *skb; dma_addr_t mapping; - skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); if (!skb) goto err_out; - skb_reserve(skb, NET_IP_ALIGN); - mapping = dma_map_single(&cp->pdev->dev, skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp->rx_skb[i] = skb; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 4a3628755026..7e333f73b228 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -2004,9 +2004,8 @@ no_early_rx: /* Malloc up new buffer, compatible with net-2e. */ /* Omit the four octet CRC from the length. */ - skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(dev, pkt_size); if (likely(skb)) { - skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */ #if RX_BUF_IDX == 3 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); #else diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 955da733c2ad..8b889ab544b0 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -1433,14 +1433,12 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK) - 4; /* CRC */ - skb = netdev_alloc_skb(netdev, - packet_size + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(netdev, packet_size); if (skb == NULL) { dev_warn(&pdev->dev, "%s: Memory squeeze," "deferring packet.\n", netdev->name); goto skip_pkt; } - skb_reserve(skb, NET_IP_ALIGN); skb->dev = netdev; memcpy(skb->data, (u8 *)(prrs + 1), packet_size); skb_put(skb, packet_size); diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 00569dc1313c..963df502260a 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1864,21 +1864,14 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); - skb = netdev_alloc_skb(adapter->netdev, - adapter->rx_buffer_len + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(adapter->netdev, + adapter->rx_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ adapter->netdev->stats.rx_dropped++; break; } - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->alloced = 1; buffer_info->skb = skb; buffer_info->length = (u16) adapter->rx_buffer_len; diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c index ab688862093f..0d268075bad5 100644 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c @@ -409,7 +409,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter) if (rxd->status.ok && rxd->status.pkt_size >= 60) { int rx_size = (int)(rxd->status.pkt_size - 4); /* alloc new buffer */ - skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(netdev, rx_size); if (NULL == skb) { printk(KERN_WARNING "%s: Mem squeeze, deferring packet.\n", @@ -421,7 +421,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter) netdev->stats.rx_dropped++; break; } - skb_reserve(skb, NET_IP_ALIGN); skb->dev = netdev; memcpy(skb->data, rxd->packet, rx_size); skb_put(skb, rx_size); diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c index ba29dc319b34..1f6c5486d715 100644 --- a/drivers/net/bcm63xx_enet.c +++ b/drivers/net/bcm63xx_enet.c @@ -320,16 +320,13 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) if (len < copybreak) { struct sk_buff *nskb; - nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); + nskb = netdev_alloc_skb_ip_align(dev, len); if (!nskb) { /* forget packet, just rearm desc */ priv->stats.rx_dropped++; continue; } - /* since we're copying the data, we can align - * them properly */ - skb_reserve(nskb, NET_IP_ALIGN); dma_sync_single_for_cpu(kdev, desc->address, len, DMA_FROM_DEVICE); memcpy(nskb->data, skb->data, len); diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 0e92a1f055a2..e0f9d6477184 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -756,7 +756,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, if ((adapter->cap == 0x400) && !vtm) vlanf = 0; - skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); if (!skb) { if (net_ratelimit()) dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); @@ -764,8 +764,6 @@ static void be_rx_compl_process(struct be_adapter *adapter, return; } - skb_reserve(skb, NET_IP_ALIGN); - skb_fill_rx_data(adapter, skb, rxcp); if (do_pkt_csum(rxcp, adapter->rx_csum)) diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 61f9da2b4943..678222389407 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -380,9 +380,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, return NULL; } - skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); + skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); if (likely(skb)) { - skb_reserve(skb, 2); skb_put(desc->skb, desc->datalen); desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); desc->skb->ip_summed = CHECKSUM_NONE; @@ -991,12 +990,11 @@ static int cpmac_open(struct net_device *dev) priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { - skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); + skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); if (unlikely(!skb)) { res = -ENOMEM; goto fail_desc; } - skb_reserve(skb, 2); desc->skb = skb; desc->data_mapping = dma_map_single(&dev->dev, skb->data, CPMAC_SKB_SIZE, diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 7fa7a907f134..ce8fef184f2c 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -505,7 +505,8 @@ rio_timer (unsigned long data) entry = np->old_rx % RX_RING_SIZE; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb (dev, np->rx_buf_sz); + skb = netdev_alloc_skb_ip_align(dev, + np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO @@ -514,8 +515,6 @@ rio_timer (unsigned long data) break; } np->rx_skbuff[entry] = skb; - /* 16 byte align the IP header */ - skb_reserve (skb, 2); np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, @@ -576,7 +575,9 @@ alloc_list (struct net_device *dev) /* Allocate the rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ - struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz); + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) { printk (KERN_ERR @@ -584,7 +585,6 @@ alloc_list (struct net_device *dev) dev->name); break; } - skb_reserve (skb, 2); /* 16 byte align the IP header. */ /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( @@ -871,13 +871,11 @@ receive_packet (struct net_device *dev) PCI_DMA_FROMDEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; - } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) { + } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { pci_dma_sync_single_for_cpu(np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); - /* 16 byte align the IP header */ - skb_reserve (skb, 2); skb_copy_to_linear_data (skb, np->rx_skbuff[entry]->data, pkt_len); @@ -907,7 +905,7 @@ receive_packet (struct net_device *dev) struct sk_buff *skb; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb(dev, np->rx_buf_sz); + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO @@ -917,8 +915,6 @@ receive_packet (struct net_device *dev) break; } np->rx_skbuff[entry] = skb; - /* 16 byte align the IP header */ - skb_reserve (skb, 2); np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 679965c2bb86..ff83efd47b0d 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1839,11 +1839,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx) #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) { - if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN))) + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) return -ENOMEM; - /* Align, init, and map the RFD. */ - skb_reserve(rx->skb, NET_IP_ALIGN); + /* Init, and map the RFD. */ skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 6a6141482979..c938114a34ab 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -3866,9 +3866,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, * of reassembly being done in the stack */ if (length < copybreak) { struct sk_buff *new_skb = - netdev_alloc_skb(netdev, length + NET_IP_ALIGN); + netdev_alloc_skb_ip_align(netdev, length); if (new_skb) { - skb_reserve(new_skb, NET_IP_ALIGN); skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, (skb->data - @@ -3937,9 +3936,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; - unsigned int bufsz = 256 - - 16 /*for skb_reserve */ - - NET_IP_ALIGN; + unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; @@ -3951,7 +3948,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, goto check_page; } - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (unlikely(!skb)) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; @@ -3964,7 +3961,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " "at %p\n", bufsz, skb->data); /* Try again, without freeing the previous */ - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ if (!skb) { dev_kfree_skb(oldskb); @@ -3982,12 +3979,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, /* Use new allocation */ dev_kfree_skb(oldskb); } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; check_page: @@ -4044,7 +4035,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; - unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + unsigned int bufsz = adapter->rx_buffer_len; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; @@ -4056,7 +4047,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, goto map_skb; } - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (unlikely(!skb)) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; @@ -4069,7 +4060,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " "at %p\n", bufsz, skb->data); /* Try again, without freeing the previous */ - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); /* Failed allocation, critical failure */ if (!skb) { dev_kfree_skb(oldskb); @@ -4088,12 +4079,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, /* Use new allocation */ dev_kfree_skb(oldskb); } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; map_skb: diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 21af3984e5c2..376924804f3f 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -167,7 +167,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; - unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + unsigned int bufsz = adapter->rx_buffer_len; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; @@ -179,20 +179,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, goto map_skb; } - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (!skb) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; map_skb: buffer_info->dma = pci_map_single(pdev, skb->data, @@ -284,21 +277,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, cpu_to_le64(ps_page->dma); } - skb = netdev_alloc_skb(netdev, - adapter->rx_ps_bsize0 + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(netdev, + adapter->rx_ps_bsize0); if (!skb) { adapter->alloc_rx_buff_failed++; break; } - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; buffer_info->dma = pci_map_single(pdev, skb->data, adapter->rx_ps_bsize0, @@ -359,9 +345,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; - unsigned int bufsz = 256 - - 16 /* for skb_reserve */ - - NET_IP_ALIGN; + unsigned int bufsz = 256 - 16 /* for skb_reserve */; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; @@ -373,19 +357,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, goto check_page; } - skb = netdev_alloc_skb(netdev, bufsz); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (unlikely(!skb)) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; check_page: /* allocate a new page if necessary */ @@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, */ if (length < copybreak) { struct sk_buff *new_skb = - netdev_alloc_skb(netdev, length + NET_IP_ALIGN); + netdev_alloc_skb_ip_align(netdev, length); if (new_skb) { - skb_reserve(new_skb, NET_IP_ALIGN); skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, (skb->data - diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 41bd7aeafd82..7f8fcc2fa748 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -447,7 +447,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, max_index_mask = q_skba->len - 1; for (i = 0; i < fill_wqes; i++) { u64 tmp_addr; - struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, packet_size); if (!skb) { q_skba->os_skbs = fill_wqes - i; if (q_skba->os_skbs == q_skba->len - 2) { @@ -457,7 +459,6 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, } break; } - skb_reserve(skb, NET_IP_ALIGN); skb_arr[index] = skb; tmp_addr = ehea_map_vaddr(skb->data); @@ -500,7 +501,7 @@ static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) { return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, nr_of_wqes, EHEA_RWQE2_TYPE, - EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); + EHEA_RQ2_PKT_SIZE); } @@ -508,7 +509,7 @@ static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) { return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, nr_of_wqes, EHEA_RWQE3_TYPE, - EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); + EHEA_MAX_PACKET_SIZE); } static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index d69d52ed7726..f875751af15e 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -870,19 +870,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) dev_kfree_skb_any(buf->os_buf); } -static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev, - unsigned int size) -{ - struct sk_buff *skb; - - skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN); - - if (skb) - skb_reserve(skb, NET_IP_ALIGN); - - return skb; -} - static int enic_rq_alloc_buf(struct vnic_rq *rq) { struct enic *enic = vnic_dev_priv(rq->vdev); @@ -892,7 +879,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) unsigned int os_buf_index = 0; dma_addr_t dma_addr; - skb = enic_rq_alloc_skb(netdev, len); + skb = netdev_alloc_skb_ip_align(netdev, len); if (!skb) return -ENOMEM; diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 34d0c69e67f7..0c229a5fa82a 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -404,10 +404,10 @@ static int ethoc_rx(struct net_device *dev, int limit) if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; - struct sk_buff *skb = netdev_alloc_skb(dev, size); + struct sk_buff *skb; size -= 4; /* strip the CRC */ - skb_reserve(skb, 2); /* align TCP/IP header */ + skb = netdev_alloc_skb_ip_align(dev, size); if (likely(skb)) { void *src = phys_to_virt(bd.addr); diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 1d5064a09aca..18bd9fe20d77 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -406,10 +406,9 @@ that case. /* A few values that may be tweaked. */ /* Size of each temporary Rx buffer, calculated as: * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for - * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum + - * 2 more because we use skb_reserve. + * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum */ -#define PKT_BUF_SZ 1538 +#define PKT_BUF_SZ 1536 /* For now, this is going to be set to the maximum size of an ethernet * packet. Eventually, we may want to make it a variable that is @@ -1151,12 +1150,13 @@ static void hamachi_tx_timeout(struct net_device *dev) } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { - struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz); + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz); hmp->rx_skbuff[i] = skb; if (skb == NULL) break; - skb_reserve(skb, 2); /* 16 byte align the IP header. */ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | @@ -1195,7 +1195,7 @@ static void hamachi_init_ring(struct net_device *dev) * card. -KDU */ hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ : - (((dev->mtu+26+7) & ~7) + 2 + 16)); + (((dev->mtu+26+7) & ~7) + 16)); /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 428d50475351..2ffe0997b838 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -4934,18 +4934,12 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, } if (!buffer_info->skb) { - skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; buffer_info->dma = pci_map_single(pdev, skb->data, bufsz, diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 91024a3cdad3..fad7f348dd1b 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } if (!buffer_info->skb) { - skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(netdev, bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; buffer_info->dma = pci_map_single(pdev, skb->data, bufsz, diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 9f7b5d4172b8..63056e7b9e22 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -738,17 +738,12 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry) IPG_DEBUG_MSG("_get_rxbuff\n"); - skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); if (!skb) { sp->rx_buff[entry] = NULL; return -ENOMEM; } - /* Adjust the data start location within the buffer to - * align IP address field to a 16 byte boundary. - */ - skb_reserve(skb, NET_IP_ALIGN); - /* Associate the receive buffer with the IPG NIC. */ skb->dev = dev; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index f9f633c134bd..1bd0ca1b0465 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1972,9 +1972,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) * of reassembly being done in the stack */ if (length < copybreak) { struct sk_buff *new_skb = - netdev_alloc_skb(netdev, length + NET_IP_ALIGN); + netdev_alloc_skb_ip_align(netdev, length); if (new_skb) { - skb_reserve(new_skb, NET_IP_ALIGN); skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, (skb->data - @@ -2057,20 +2056,13 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) goto map_skb; } - skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len - + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } - /* Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; map_skb: diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index eb3abd79e4ee..4c8a44919705 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -616,22 +616,14 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, if (!bi->skb) { struct sk_buff *skb; - skb = netdev_alloc_skb(adapter->netdev, - (rx_ring->rx_buf_len + - NET_IP_ALIGN)); + skb = netdev_alloc_skb_ip_align(adapter->netdev, + rx_ring->rx_buf_len); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - bi->skb = skb; bi->dma = pci_map_single(pdev, skb->data, rx_ring->rx_buf_len, diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 127243461a51..6baf3c94b3e8 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -108,9 +108,8 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget) if (unlikely(!netif_running(nds[desc->channel]))) goto err; - skb = netdev_alloc_skb(dev, desc->pkt_length + 2); + skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length); if (likely(skb != NULL)) { - skb_reserve(skb, 2); skb_copy_to_linear_data(skb, buf, desc->pkt_length); skb_put(skb, desc->pkt_length); skb->protocol = eth_type_trans(skb, nds[desc->channel]); diff --git a/drivers/net/korina.c b/drivers/net/korina.c index 03199fa10003..a07a5972b57e 100644 --- a/drivers/net/korina.c +++ b/drivers/net/korina.c @@ -400,7 +400,7 @@ static int korina_rx(struct net_device *dev, int limit) dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); /* Malloc up new buffer. */ - skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); + skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); if (!skb_new) break; @@ -417,9 +417,6 @@ static int korina_rx(struct net_device *dev, int limit) if (devcs & ETH_RX_MP) dev->stats.multicast++; - /* 16 bit align */ - skb_reserve(skb_new, 2); - lp->rx_skb[lp->rx_next_done] = skb_new; } diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c index 99e954167fa6..5c45cb58d023 100644 --- a/drivers/net/ks8842.c +++ b/drivers/net/ks8842.c @@ -357,7 +357,7 @@ static void ks8842_rx_frame(struct net_device *netdev, /* check the status */ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { - struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2); + struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", __func__, len); @@ -369,9 +369,6 @@ static void ks8842_rx_frame(struct net_device *netdev, if (status & RXSR_MULTICAST) netdev->stats.multicast++; - /* Align socket buffer in 4-byte boundary for - better performance. */ - skb_reserve(skb, 2); data = (u32 *)skb_put(skb, len); ks8842_select_bank(adapter, 17); diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 51e11c3e53e1..5b24c67de25e 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev) for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { dma_addr_t dma_addr; - struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); + struct sk_buff *skb; + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); if (skb == NULL) return -1; - skb_reserve(skb, 2); dma_addr = dma_map_single(dev->dev.parent, skb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); rbd->v_next = rbd+1; @@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev) (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); /* Get fresh skbuff to replace filled one. */ - newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); + newskb = netdev_alloc_skb_ip_align(dev, + PKT_BUF_SZ); if (newskb == NULL) { skb = NULL; /* drop pkt */ goto memory_squeeze; } - skb_reserve(newskb, 2); /* Pass up the skb already on the Rx ring. */ skb_put(skb, pkt_len); @@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev) rbd->b_data = SWAP32(dma_addr); DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); } else - skb = netdev_alloc_skb(dev, pkt_len + 2); + skb = netdev_alloc_skb_ip_align(dev, pkt_len); memory_squeeze: if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ @@ -730,7 +730,6 @@ memory_squeeze: dma_sync_single_for_cpu(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE); - skb_reserve(skb, 2); memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); dma_sync_single_for_device(dev->dev.parent, (dma_addr_t)SWAP32(rbd->b_data), diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 50c6a3cfe439..97b170448ce6 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3555,13 +3555,12 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, if (pkt_size >= rx_copybreak) goto out; - skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); if (!skb) goto out; pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, PCI_DMA_FROMDEVICE); - skb_reserve(skb, NET_IP_ALIGN); skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); *sk_buff = skb; done = true; diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 8d6030022d14..b7e0eb40a8bd 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -793,7 +793,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev) rx_len -= rx_size_align + 4; - skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(dev, pkt_size); if (unlikely(!skb)) { if (printk_ratelimit()) printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", @@ -801,8 +801,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev) goto next; } - skb_reserve(skb, NET_IP_ALIGN); - if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index f4dfd1f679a9..6b364a6c6c60 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -365,11 +365,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp } skb_reserve(newskb, 2); } else { - skb = netdev_alloc_skb(dev, len + 2); - if (skb) { - skb_reserve(skb, 2); + skb = netdev_alloc_skb_ip_align(dev, len); + if (skb) skb_copy_to_linear_data(skb, rd->skb->data, len); - } + newskb = rd->skb; } memory_squeeze: diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 7cc9898f4e00..31233b4c44a0 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -536,13 +536,12 @@ static bool sis190_try_rx_copy(struct sis190_private *tp, if (pkt_size >= rx_copybreak) goto out; - skb = netdev_alloc_skb(tp->dev, pkt_size + 2); + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); if (!skb) goto out; pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, PCI_DMA_FROMDEVICE); - skb_reserve(skb, 2); skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); *sk_buff = skb; done = true; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 01f6811f1324..be28ebb3811c 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3070,11 +3070,10 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, goto error; if (len < RX_COPY_THRESHOLD) { - skb = netdev_alloc_skb(dev, len + 2); + skb = netdev_alloc_skb_ip_align(dev, len); if (!skb) goto resubmit; - skb_reserve(skb, 2); pci_dma_sync_single_for_cpu(skge->hw->pdev, pci_unmap_addr(e, mapaddr), len, PCI_DMA_FROMDEVICE); @@ -3085,11 +3084,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, skge_rx_reuse(e, skge->rx_buf_size); } else { struct sk_buff *nskb; - nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); + + nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); if (!nskb) goto resubmit; - skb_reserve(nskb, NET_IP_ALIGN); pci_unmap_single(skge->hw->pdev, pci_unmap_addr(e, mapaddr), pci_unmap_len(e, maplen), diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 2ab5c39f33ca..3a449d012d4b 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -2191,9 +2191,8 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, { struct sk_buff *skb; - skb = netdev_alloc_skb(sky2->netdev, length + 2); + skb = netdev_alloc_skb_ip_align(sky2->netdev, length); if (likely(skb)) { - skb_reserve(skb, 2); pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(re->skb, skb->data, length); diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 3d31b47332bb..16f23f84920b 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -1549,7 +1549,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) if (tmpCStat & TLAN_CSTAT_EOC) eoc = 1; - new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); + new_skb = netdev_alloc_skb_ip_align(dev, + TLAN_MAX_FRAME_SIZE + 5); if ( !new_skb ) goto drop_and_reuse; @@ -1563,7 +1564,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); - skb_reserve( new_skb, NET_IP_ALIGN ); head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, @@ -1967,13 +1967,12 @@ static void TLan_ResetLists( struct net_device *dev ) list->cStat = TLAN_CSTAT_READY; list->frameSize = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; - skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); + skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); if ( !skb ) { pr_err("TLAN: out of memory for received data.\n" ); break; } - skb_reserve( skb, NET_IP_ALIGN ); list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, TLAN_MAX_FRAME_SIZE, diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 7030bd5e9848..a69c4a48bab9 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -802,13 +802,11 @@ static int tsi108_refill_rx(struct net_device *dev, int budget) int rx = data->rxhead; struct sk_buff *skb; - data->rxskbs[rx] = skb = netdev_alloc_skb(dev, - TSI108_RXBUF_SIZE + 2); + skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE); + data->rxskbs[rx] = skb; if (!skb) break; - skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */ - data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, TSI108_RX_SKB_SIZE, DMA_FROM_DEVICE); @@ -1356,7 +1354,7 @@ static int tsi108_open(struct net_device *dev) for (i = 0; i < TSI108_RXRING_LEN; i++) { struct sk_buff *skb; - skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE); if (!skb) { /* Bah. No memory for now, but maybe we'll get * some more later. @@ -1370,8 +1368,6 @@ static int tsi108_open(struct net_device *dev) } data->rxskbs[i] = skb; - /* Align the payload on a 4-byte boundary */ - skb_reserve(skb, 2); data->rxskbs[i] = skb; data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 1fd70583be44..4535e89dfff1 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -1484,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit) } } } else { - struct sk_buff *skb; + struct sk_buff *skb = NULL; /* Length should omit the CRC */ int pkt_len = data_size - 4; /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ - if (pkt_len < rx_copybreak && - (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { - skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ + if (pkt_len < rx_copybreak) + skb = netdev_alloc_skb_ip_align(dev, pkt_len); + if (skb) { pci_dma_sync_single_for_cpu(rp->pdev, rp->rx_skbuff_dma[entry], rp->rx_buf_sz, diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index e04e5bee005c..144db6395c95 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1949,10 +1949,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, if (pkt_size < rx_copybreak) { struct sk_buff *new_skb; - new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); + new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); if (new_skb) { new_skb->ip_summed = rx_skb[0]->ip_summed; - skb_reserve(new_skb, 2); skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); *rx_skb = new_skb; ret = 0; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8d009760277c..556512dc6072 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -283,13 +283,12 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp) do { struct skb_vnet_hdr *hdr; - skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); if (unlikely(!skb)) { oom = true; break; } - skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, MAX_PACKET_LEN); hdr = skb_vnet_hdr(skb); @@ -344,14 +343,12 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) do { skb_frag_t *f; - skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); + skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) { oom = true; break; } - skb_reserve(skb, NET_IP_ALIGN); - f = &skb_shinfo(skb)->frags[0]; f->page = get_a_page(vi, gfp); if (!f->page) { diff --git a/net/core/dev.c b/net/core/dev.c index 510ff205d5db..28b0b9e992a0 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2604,20 +2604,13 @@ EXPORT_SYMBOL(napi_reuse_skb); struct sk_buff *napi_get_frags(struct napi_struct *napi) { - struct net_device *dev = napi->dev; struct sk_buff *skb = napi->skb; if (!skb) { - skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN); - if (!skb) - goto out; - - skb_reserve(skb, NET_IP_ALIGN); - - napi->skb = skb; + skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); + if (skb) + napi->skb = skb; } - -out: return skb; } EXPORT_SYMBOL(napi_get_frags); |