summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2019-02-13 21:51:03 +0300
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-03-25 19:22:21 +0300
commit1857ca42a734d41261f4c30e5f625fa7e2b70b0d (patch)
treea02fd255e935d8519c5a18415158d4e99e854cd1 /drivers/net
parentbbb97808a0eff71fd841d297dba8cd3ebc4d700d (diff)
downloadlinux-1857ca42a734d41261f4c30e5f625fa7e2b70b0d.tar.xz
ice: Get rid of ice_pull_tail
Instead of adding a frag and later when dealing with EOP frame accessing that frag in order to copy the headers onto linear part of skb, we can do this in ice_add_rx_frag in case where the data_len is still 0 and frame won't fit onto the linear part as a whole. Function comment of ice_pull_tail was a bit misleading because of mentioned optimizations that can be performed (drop a frag/maintaining accurate truesize of skb) - it seems that this part of logic was dropped and the comment was not updated to reflect this change. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c70
1 files changed, 24 insertions, 46 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 50321d39e463..becee476002d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -562,13 +562,17 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
#endif /* PAGE_SIZE < 8192) */
struct page *page = rx_buf->page;
+ unsigned int pull_len;
+ unsigned char *va;
+
+ va = page_address(page) + rx_buf->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
/* will the data fit in the skb we allocated? if so, just
* copy it as it is pretty small anyway
*/
- if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buf->page_offset;
-
+ if (size <= ICE_RX_HDR_SIZE) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */
@@ -580,8 +584,24 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* the header from the frame that we're adding as a frag was added to
+ * linear part of skb so move the pointer past that header and
+ * reduce the size of data
+ */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buf->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return ice_can_reuse_rx_page(rx_buf, truesize);
}
@@ -692,44 +712,6 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
}
/**
- * ice_pull_tail - ice specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ice specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void ice_pull_tail(struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int pull_len;
- unsigned char *va;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
* ice_cleanup_headers - Correct empty headers
* @skb: pointer to current skb being fixed
*
@@ -743,10 +725,6 @@ static void ice_pull_tail(struct sk_buff *skb)
*/
static bool ice_cleanup_headers(struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ice_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;