diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 46 |
1 files changed, 25 insertions, 21 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index fe5bbabbb41e..49fc38094185 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, /** * ice_setup_tx_ring - Allocate the Tx descriptors - * @tx_ring: the tx ring to set up + * @tx_ring: the Tx ring to set up * * Return 0 on success, negative on error */ @@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) /** * ice_setup_rx_ring - Allocate the Rx descriptors - * @rx_ring: the rx ring to set up + * @rx_ring: the Rx ring to set up * * Return 0 on success, negative on error */ @@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) rx_ring->next_to_alloc = val; /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only + * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ @@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, /** * ice_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on + * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter @@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring, /** * ice_fetch_rx_buf - Allocate skb and populate it - * @rx_ring: rx descriptor ring to transact packets on + * @rx_ring: Rx descriptor ring to transact packets on * @rx_desc: descriptor containing info written by hardware * * This function allocates an skb on the fly, and populates it with the page @@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, * ice_pull_tail - ice specific version of skb_pull_tail * @skb: pointer to current skb being adjusted * - * This function is an ice specific version of __pskb_pull_tail. The + * This function is an ice specific version of __pskb_pull_tail. The * main difference between this version and the original function is that * this function can make several assumptions about the state of things * that allow for significant optimizations versus the standard function. @@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * - * This function updates next to clean. If the buffer is an EOP buffer + * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. @@ -904,7 +904,7 @@ checksum_fail: /** * ice_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * @ptype: the packet type decoded by hardware @@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring, /** * ice_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play + * @rx_ring: Rx ring in play * @skb: packet to send up * @vlan_tag: vlan tag for packet * @@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, /** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf - * @rx_ring: rx descriptor ring to transact packets on + * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt - * processing. The advantage to this is that on systems that have + * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * @@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete_done(napi, work_done); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); - return 0; + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); + + return min(work_done, budget - 1); } /* helper function for building cmd/type/offset */ @@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * __ice_maybe_stop_tx - 2nd level check for tx stop conditions + * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * @@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) } /** - * ice_maybe_stop_tx - 1st level check for tx stop conditions + * ice_maybe_stop_tx - 1st level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * @@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) { if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) return 0; + return __ice_maybe_stop_tx(tx_ring, size); } @@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) * Finally, we add one to round up. Because 256 isn't an exact multiple of * 3, we'll underestimate near each multiple of 12K. This is actually more * accurate as we have 4K - 1 of wiggle room that we can fit into the last - * segment. For our purposes this is accurate out to 1M which is orders of + * segment. For our purposes this is accurate out to 1M which is orders of * magnitude greater than our largest possible GSO size. * * This would then be implemented as: @@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size) } /** - * ice_xmit_desc_count - calculate number of tx descriptors needed + * ice_xmit_desc_count - calculate number of Tx descriptors needed * @skb: send buffer * * Returns number of data descriptors needed for this skb. @@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) nr_frags -= ICE_MAX_BUF_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; - /* Initialize size to the negative value of gso_size minus 1. We + /* Initialize size to the negative value of gso_size minus 1. We * use this as the worst case scenerio in which the frag ahead * of us only provides one byte which is why we are limited to 6 * descriptors for a single transmit as the header and previous |