diff options
author | Joe Damato <jdamato@fastly.com> | 2021-12-17 22:35:19 +0300 |
---|---|---|
committer | Tony Nguyen <anthony.l.nguyen@intel.com> | 2022-02-08 19:21:52 +0300 |
commit | b76bc129839d65fa8dbeefd3581dacd54596706f (patch) | |
tree | ab43e9b8d151e8b35daa2ca8b9b1297fee7f0e2b /drivers/net/ethernet/intel/i40e/i40e_txrx.c | |
parent | cb963b989755ed49f002b7b7c8c7a9c744e21bb0 (diff) | |
download | linux-b76bc129839d65fa8dbeefd3581dacd54596706f.tar.xz |
i40e: Add a stat for tracking busy rx pages
In some cases, pages cannot be reused by i40e because the page is busy. Add
a counter for this event.
Busy page count is accessible via ethtool.
Signed-off-by: Joe Damato <jdamato@fastly.com>
Tested-by: Dave Switzer <david.switzer@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3d91b1655aec..a628f4b43fe8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1990,8 +1990,8 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, * pointing to; otherwise, the DMA mapping needs to be destroyed and * page freed. * - * rx_stats will be updated to indicate if the page was waived because it was - * not reusable. + * rx_stats will be updated to indicate whether the page was waived + * or busy if it could not be reused. */ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, struct i40e_rx_queue_stats *rx_stats, @@ -2008,13 +2008,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) { + rx_stats->page_busy_count++; return false; + } #else #define I40E_LAST_OFFSET \ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) - if (rx_buffer->page_offset > I40E_LAST_OFFSET) + if (rx_buffer->page_offset > I40E_LAST_OFFSET) { + rx_stats->page_busy_count++; return false; + } #endif /* If we have drained the page fragment pool we need to update |