diff options
author | Sunil Goutham <sgoutham@cavium.com> | 2015-12-10 10:55:19 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-12-12 07:38:17 +0300 |
commit | 40fb5f8a60f33133d36afde35a9ad865d35e4423 (patch) | |
tree | b6c4a48659a4c5692f92645d5326ef9333d086ed /drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |
parent | 2ad7b7560fd586a8b72eb52df9483c203943eec9 (diff) | |
download | linux-40fb5f8a60f33133d36afde35a9ad865d35e4423.tar.xz |
net: thunderx: HW TSO support for pass-2 hardware
This adds support for offloading TCP segmentation to HW in pass-2
revision of hardware. Both driver level SW TSO for pass1.x chips
and HW TSO for pass-2 chip will co-exist. Modified SQ descriptor
structures to reflect pass-2 hw implementation.
Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder/nicvf_queues.c')
-rw-r--r-- | drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 20 |
1 files changed, 15 insertions, 5 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 1fbd9084333e..b11fc094f769 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -925,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) { int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; - if (skb_shinfo(skb)->gso_size) { + if (skb_shinfo(skb)->gso_size && !nic->hw_tso) { subdesc_cnt = nicvf_tso_count_subdescs(skb); return subdesc_cnt; } @@ -940,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) * First subdescriptor for every send descriptor. */ static inline void -nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, +nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, int subdesc_cnt, struct sk_buff *skb, int len) { int proto; @@ -976,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, break; } } + + if (nic->hw_tso && skb_shinfo(skb)->gso_size) { + hdr->tso = 1; + hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb); + hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; + /* For non-tunneled pkts, point this to L2 ethertype */ + hdr->inner_l3_offset = skb_network_offset(skb) - 2; + nic->drv_stats.tx_tso++; + } } /* SQ GATHER subdescriptor @@ -1045,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, data_left -= size; tso_build_data(skb, &tso, size); } - nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, + nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, seg_subdescs - 1, skb, seg_len); sq->skbuff[hdr_qentry] = (u64)NULL; qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1098,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) qentry = nicvf_get_sq_desc(sq, subdesc_cnt); /* Check if its a TSO packet */ - if (skb_shinfo(skb)->gso_size) + if (skb_shinfo(skb)->gso_size && !nic->hw_tso) return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); /* Add SQ header subdesc */ - nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); + nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, + skb, skb->len); /* Add SQ gather subdescs */ qentry = nicvf_get_nxt_sqentry(sq, qentry); |