diff options
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4vf/sge.c')
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 38 |
1 files changed, 29 insertions, 9 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 14d7e673c656..dfce5df7538e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1202,6 +1202,10 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) BUG_ON(qidx >= pi->nqsets); txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + if (pi->vlan_id && !skb_vlan_tag_present(skb)) + __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), + pi->vlan_id); + /* * Take this opportunity to reclaim any TX Descriptors whose DMA * transfers have completed. @@ -1570,6 +1574,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, { struct adapter *adapter = rxq->rspq.adapter; struct sge *s = &adapter->sge; + struct port_info *pi; int ret; struct sk_buff *skb; @@ -1586,8 +1591,9 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); + pi = netdev_priv(skb->dev); - if (pkt->vlan_ex) { + if (pkt->vlan_ex && !pi->vlan_id) { __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); rxq->stats.vlan_ex++; @@ -1620,6 +1626,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); struct adapter *adapter = rspq->adapter; struct sge *s = &adapter->sge; + struct port_info *pi; /* * If this is a good TCP packet and we have Generic Receive Offload @@ -1644,6 +1651,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, __skb_pull(skb, s->pktshift); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); + pi = netdev_priv(skb->dev); rxq->stats.pkts++; if (csum_ok && !pkt->err_vec && @@ -1660,9 +1668,10 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, } else skb_checksum_none_assert(skb); - if (pkt->vlan_ex) { + if (pkt->vlan_ex && !pi->vlan_id) { rxq->stats.vlan_ex++; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(pkt->vlan)); } netif_receive_skb(skb); @@ -2619,8 +2628,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; /* @@ -2628,9 +2637,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) != @@ -2642,8 +2662,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); |