diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf/i40e_txrx.h')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 122 |
1 files changed, 91 insertions, 31 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 8274ba68bd32..901282c87cf6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -104,10 +104,9 @@ enum i40e_dyn_idx_t { /* Supported Rx Buffer Sizes (a multiple of 128) */ #define I40E_RXBUFFER_256 256 +#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ #define I40E_RXBUFFER_2048 2048 -#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ -#define I40E_RXBUFFER_4096 4096 -#define I40E_RXBUFFER_8192 8192 +#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we @@ -120,6 +119,61 @@ enum i40e_dyn_idx_t { #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the legacy + * receive path. + */ +#if (PAGE_SIZE < 8192) +#define I40E_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) + +static inline int i40e_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int i40e_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (I40E_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = I40E_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return i40e_compute_pad(rx_buf_len); +} + +#define I40E_SKB_PAD i40e_skb_pad() +#else +#define I40E_2K_TOO_SMALL_WITH_PADDING false +#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + /** * i40e_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) @@ -241,7 +295,12 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct i40e_queue_stats { @@ -321,7 +380,8 @@ struct i40e_ring { u8 packet_stride; u16 flags; -#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) /* stats structs */ struct i40e_queue_stats stats; @@ -349,6 +409,21 @@ struct i40e_ring { */ } ____cacheline_internodealigned_in_smp; +static inline bool ring_uses_build_skb(struct i40e_ring *ring) +{ + return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); +} + +static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + +static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) +{ + ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; +} + enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, @@ -370,6 +445,17 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) +static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) + bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); @@ -385,20 +471,6 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40evf_chk_linearize(struct sk_buff *skb); /** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: Tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - -/** * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on @@ -460,19 +532,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) /* we can support up to 8 data buffers for a single send */ return count != I40E_MAX_BUFFER_TXD; } - -/** - * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE - * @ptype: the packet type field from Rx descriptor write-back - **/ -static inline bool i40e_rx_is_fcoe(u16 ptype) -{ - return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && - (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); -} - /** - * txring_txq - Find the netdev Tx ring based on the i40e Tx ring * @ring: Tx ring to find the netdev equivalent of **/ static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) |