diff options
author | Thomas Falcon <tlfalcon@linux.vnet.ibm.com> | 2018-03-17 04:00:29 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-18 03:12:39 +0300 |
commit | 86b61a5f2e39bbee254aa4d2c5445059c7656f98 (patch) | |
tree | 3b30dd206b66a9d533ab19473afc46378a2f2c3d /drivers/net/ethernet/ibm | |
parent | 06b3e35788a4c6f0afa931b6251ecfe20ce4787b (diff) | |
download | linux-86b61a5f2e39bbee254aa4d2c5445059c7656f98.tar.xz |
ibmvnic: Improve TX buffer accounting
Improve TX pool buffer accounting to prevent the producer
index from overruning the consumer. First, set the next free
index to an invalid value if it is in use. If next buffer
to be consumed is in use, drop the packet.
Finally, if the transmit fails for some other reason, roll
back the consumer index and set the free map entry to its original
value. This should also be done if the DMA map fails.
Signed-off-by: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 672e9221d4a5..af6f8193cb67 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1426,6 +1426,16 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) index = tx_pool->free_map[tx_pool->consumer_index]; + if (index == IBMVNIC_INVALID_MAP) { + dev_kfree_skb_any(skb); + tx_send_failed++; + tx_dropped++; + ret = NETDEV_TX_OK; + goto out; + } + + tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; + offset = index * tx_pool->buf_size; dst = tx_pool->long_term_buff.buff + offset; memset(dst, 0, tx_pool->buf_size); @@ -1522,7 +1532,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_map_failed++; tx_dropped++; ret = NETDEV_TX_OK; - goto out; + goto tx_err_out; } lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], (u64)tx_buff->indir_dma, @@ -1534,13 +1544,6 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) } if (lpar_rc != H_SUCCESS) { dev_err(dev, "tx failed with code %ld\n", lpar_rc); - - if (tx_pool->consumer_index == 0) - tx_pool->consumer_index = - tx_pool->num_buffers - 1; - else - tx_pool->consumer_index--; - dev_kfree_skb_any(skb); tx_buff->skb = NULL; @@ -1556,7 +1559,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - goto out; + goto tx_err_out; } if (atomic_add_return(num_entries, &tx_scrq->used) @@ -1569,7 +1572,16 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_bytes += skb->len; txq->trans_start = jiffies; ret = NETDEV_TX_OK; + goto out; +tx_err_out: + /* roll back consumer index and map array*/ + if (tx_pool->consumer_index == 0) + tx_pool->consumer_index = + tx_pool->num_buffers - 1; + else + tx_pool->consumer_index--; + tx_pool->free_map[tx_pool->consumer_index] = index; out: netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; |