diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2017-02-07 05:27:03 +0300 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2017-03-17 22:11:44 +0300 |
commit | 8649aaef4044681257ed38cf8706aea88430f2c4 (patch) | |
tree | 62114a5d944105be3e4c5921e71fa8c29a50f4e4 /drivers/net/ethernet/intel/igb/igb.h | |
parent | e08912985b296b33b18a563cc126e3e2f018c2e1 (diff) | |
download | linux-8649aaef4044681257ed38cf8706aea88430f2c4.tar.xz |
igb: Add support for using order 1 pages to receive large frames
This patch adds support for using 3K buffers in order 1 pages the same way
we were using 2K buffers in 4K pages. We are reserving 1K of room for now
to have space available for future headroom and tailroom when we enable
build_skb support.
One side effect of this patch is that we can end up using a larger buffer
if jumbo frames is enabled. The impact shouldn't be too great, but it
could hurt small packet performance for UDP workloads if jumbo frames is
enabled as the truesize of frames will be larger.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb.h')
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index bffdfe65a0b6..eb91c87e0c1d 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -142,9 +142,9 @@ struct vf_data_storage { /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_3072 3072 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_TS_HDR_LEN 16 -#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 #define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #if (PAGE_SIZE < 8192) @@ -313,12 +313,40 @@ struct igb_q_vector { }; enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_3K_BUFFER, IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, IGB_RING_FLAG_TX_DETECT_HANG }; +#define ring_uses_large_buffer(ring) \ + test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define set_ring_uses_large_buffer(ring) \ + set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) +#define clear_ring_uses_large_buffer(ring) \ + clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) + +static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return IGB_RXBUFFER_3072; +#endif + return IGB_RXBUFFER_2048; +} + +static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring_uses_large_buffer(ring)) + return 1; +#endif + return 0; +} + +#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) + #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_RX_DESC(R, i) \ |