diff options
author | Jonathan Lemon <jonathan.lemon@gmail.com> | 2019-07-30 17:40:33 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-07-31 00:21:32 +0300 |
commit | b54c9d5bd6e38edac9ce3a3f95f14a1292b5268d (patch) | |
tree | b09848a92e53ce9533b4df27953ef59874080b53 /drivers/net | |
parent | 7240b60c98d6309363a9f8d5a4ecd5b0626f2aff (diff) | |
download | linux-b54c9d5bd6e38edac9ce3a3f95f14a1292b5268d.tar.xz |
net: Use skb_frag_off accessors
Use accessor functions for skb fragment's page_offset instead
of direct references, in preparation for bvec conversion.
Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
23 files changed, 42 insertions, 42 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ac61c9352535..c23fbb34f0e9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -957,7 +957,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, frag = &skb_shinfo(skb)->frags[0]; skb_frag_size_sub(frag, payload); - frag->page_offset += payload; + skb_frag_off_add(frag, payload); skb->data_len -= payload; skb->tail += payload; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index c0266a87794c..4ab57d33a87e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1594,7 +1594,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, size = skb_frag_size(frag); dma_addr = dma_map_page_attrs(&nic->pdev->dev, skb_frag_page(frag), - frag->page_offset, size, + skb_frag_off(frag), size, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&nic->pdev->dev, dma_addr)) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 310a232e00f0..6dabbf1502c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, rx_frag += nr_frags; __skb_frag_set_page(rx_frag, sd->pg_chunk.page); - rx_frag->page_offset = sd->pg_chunk.offset + offset; + skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset); skb_frag_size_set(rx_frag, len); skb->len += len; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e00a94a03879..1c9883019767 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, memcpy(skb->data, start, hdr_len); skb_shinfo(skb)->nr_frags = 1; skb_frag_set_page(skb, 0, page_info->page); - skb_shinfo(skb)->frags[0].page_offset = - page_info->page_offset + hdr_len; + skb_frag_off_set(&skb_shinfo(skb)->frags[0], + page_info->page_offset + hdr_len); skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); skb->data_len = curr_frag_len - hdr_len; @@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, /* Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); - skb_shinfo(skb)->frags[j].page_offset = - page_info->page_offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[j], + page_info->page_offset); skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); skb_shinfo(skb)->nr_frags++; } else { @@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, /* First frag or Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); - skb_shinfo(skb)->frags[j].page_offset = - page_info->page_offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[j], + page_info->page_offset); skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); } else { put_page(page_info->page); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 5fad73b2e123..3981c06f082f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) nr_frags = skb_shinfo(skb)->nr_frags; frag = skb_shinfo(skb)->frags; for (i = 0; i < nr_frags; i++, frag++) { - if (!IS_ALIGNED(frag->page_offset, 4)) { + if (!IS_ALIGNED(skb_frag_off(frag), 4)) { is_aligned = 0; break; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3da680073265..81a05ea38237 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f162252f01b5..e3f29dc8b290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > I40E_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (I40E_MAX_READ_REQ_SIZE - 1); sum -= align_pad; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index fae7cd1c618a..7a30d5d5ef53 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb) * descriptor associated with the fragment. */ if (stale_size > IAVF_MAX_DATA_PER_TXD) { - int align_pad = -(stale->page_offset) & + int align_pad = -(skb_frag_off(stale)) & (IAVF_MAX_READ_REQ_SIZE - 1); sum -= align_pad; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e12d23d1fa64..dc7b128c780e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1807,7 +1807,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; + skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } @@ -1844,7 +1844,7 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 9c3ab00643bd..6d52cf5ce20e 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2040,8 +2040,8 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) ctxbi = txbi + ((idx + i + 2) & (mask)); ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, - skb_frag_page(frag), - frag->page_offset, skb_frag_size(frag), hidma); + skb_frag_page(frag), skb_frag_off(frag), + skb_frag_size(frag), hidma); if (ret) { jme_drop_tx_map(jme, idx, i); goto out; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 88ea5ac83c93..82ea55ae5053 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; - if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) + if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7) return 1; } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 9ead6ecb7586..99eaadba555f 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) skb->len -= VLAN_HLEN; skb->data_len -= VLAN_HLEN; frag = skb_shinfo(skb)->frags; - frag->page_offset += VLAN_HLEN; - skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN); + skb_frag_off_add(frag, VLAN_HLEN); + skb_frag_size_sub(frag, VLAN_HLEN); } } @@ -1364,7 +1364,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) } /* remove padding */ - rx_frags[0].page_offset += MXGEFW_PAD; + skb_frag_off_add(&rx_frags[0], MXGEFW_PAD); skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD); len -= MXGEFW_PAD; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 31ec56091a5d..65e81ec1b314 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, vaddr = kmap_atomic(skb_frag_page(f)); - efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), skb_frag_size(f), copy_buf); kunmap_atomic(vaddr); } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 6fc05c106afc..c91876f8c536 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); - frag->page_offset = off; + skb_frag_off_set(frag, off); skb_frag_size_set(frag, hlen - swivel); /* any more data? */ @@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, hlen); RX_USED_ADD(page, hlen + cp->crc_size); } @@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, DMA_TO_DEVICE); - tabort = cas_calc_tabort(cp, fragp->page_offset, len); + tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); if (unlikely(tabort)) { void *addr; @@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, addr = cas_page_map(skb_frag_page(fragp)); memcpy(tx_tiny_buf(cp, ring, entry), - addr + fragp->page_offset + len - tabort, + addr + skb_frag_off(fragp) + len - tabort, tabort); cas_page_unmap(addr); mapping = tx_tiny_map(cp, ring, entry, tentry); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 0bc5863bffeb..f5fd1f3c07cc 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, len = skb_frag_size(frag); mapping = np->ops->map_page(np->device, skb_frag_page(frag), - frag->page_offset, len, + skb_frag_off(frag), len, DMA_TO_DEVICE); rp->tx_buffs[prod].skb = NULL; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index baa3088b475c..646e67236b65 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, vaddr = kmap_atomic(skb_frag_page(f)); blen = skb_frag_size(f); blen += 8 - (blen & 7); - err = ldc_map_single(lp, vaddr + f->page_offset, + err = ldc_map_single(lp, vaddr + skb_frag_off(f), blen, cookies + nc, ncookies - nc, map_perm); kunmap_atomic(vaddr); @@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - docopy |= f->page_offset & 7; + docopy |= skb_frag_off(f) & 7; } if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || skb_tailroom(skb) < pad || diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 642843945031..1b2702f74455 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; struct page *page = skb_frag_page(frag); - u32 page_offset = frag->page_offset; + u32 page_offset = skb_frag_off(frag); u32 buf_len = skb_frag_size(frag); dma_addr_t desc_dma; u32 desc_dma_32; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3544e1991579..86884c863013 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -435,7 +435,7 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, skb_frag_t *frag = skb_shinfo(skb)->frags + i; slots_used += fill_pg_buf(skb_frag_page(frag), - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), &pb[slots_used]); } return slots_used; @@ -449,7 +449,7 @@ static int count_skb_frag_slots(struct sk_buff *skb) for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index fcf31335a8b6..dacb4f680fd4 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1005,7 +1005,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; *len = skb_frag_size(frag); - return kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ace7ffaf3913..58952a79b05f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1328,7 +1328,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb) total_len += skb_frag_size(f); sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f), - f->page_offset); + skb_frag_off(f)); } urb->transfer_buffer_length = total_len; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 03feaeae89cd..216acf37ca7c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -662,7 +662,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); __skb_frag_set_page(frag, rbi->page); - frag->page_offset = 0; + skb_frag_off_set(frag, 0); skb_frag_size_set(frag, rcd->len); skb->data_len += rcd->len; skb->truesize += PAGE_SIZE; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index a96c5c2a2c5a..3ef07b63613e 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -136,12 +136,12 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) static u16 frag_get_pending_idx(skb_frag_t *frag) { - return (u16)frag->page_offset; + return (u16)skb_frag_off(frag); } static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) { - frag->page_offset = pending_idx; + skb_frag_off_set(frag, pending_idx); } static inline pending_ring_idx_t pending_index(unsigned i) @@ -1068,7 +1068,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s offset += len; __skb_frag_set_page(&frags[i], page); - frags[i].page_offset = 0; + skb_frag_off_set(&frags[i], 0); skb_frag_size_set(&frags[i], len); } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8d33970a2950..b930d5f95222 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -531,7 +531,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb) for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag); /* Skip unused frames from start of page */ offset &= ~PAGE_MASK; @@ -674,8 +674,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tx = xennet_make_txreqs(queue, tx, skb, - skb_frag_page(frag), frag->page_offset, + tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), + skb_frag_off(frag), skb_frag_size(frag)); } @@ -1040,7 +1040,7 @@ err: if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); skb->data_len = rx->status; skb->len += rx->status; |