summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorwilly tarreau <w@1wt.eu>2014-01-16 11:20:12 +0400
committerDavid S. Miller <davem@davemloft.net>2014-01-17 03:15:42 +0400
commit6c4989748759f433bf999f33c1ff3853ed5ca945 (patch)
treeb4b0bbf5fec97d52a515c8bef433ceff2161cff4 /drivers
parent71f6d1b31fb1f278a345a30a2180515adc7d80ae (diff)
downloadlinux-6c4989748759f433bf999f33c1ff3853ed5ca945.tar.xz
net: mvneta: remove tests for impossible cases in the tx_done path
Currently, mvneta_txq_bufs_free() calls mvneta_tx_done_policy() with a non-null cause to retrieve the pointer to the next queue to process. There are useless tests on the return queue number and on the pointer, all of which are well defined within a known limited set. This code path is fast, although not critical. Removing 3 tests here that the compiler could not optimize (verified) is always desirable. Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: Gregory CLEMENT <gregory.clement@free-electrons.com> Tested-by: Arnaud Ebalard <arno@natisbad.org> Signed-off-by: Willy Tarreau <w@1wt.eu> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index df75a2302740..2fb9559ba6fe 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1276,13 +1276,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp,
skb->ip_summed = CHECKSUM_NONE;
}
-/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
+/* Return tx queue pointer (find last set bit) according to <cause> returned
+ * form tx_done reg. <cause> must not be null. The return value is always a
+ * valid queue for matching the first one found in <cause>.
+ */
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
u32 cause)
{
int queue = fls(cause) - 1;
- return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
+ return &pp->txqs[queue];
}
/* Free tx queue skbuffs */
@@ -1651,7 +1654,9 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
txq->txq_get_index = 0;
}
-/* handle tx done - called from tx done timer callback */
+/* Handle tx done - called in softirq context. The <cause_tx_done> argument
+ * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
+ */
static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
int *tx_todo)
{
@@ -1660,10 +1665,8 @@ static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
struct netdev_queue *nq;
*tx_todo = 0;
- while (cause_tx_done != 0) {
+ while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
- if (!txq)
- break;
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, smp_processor_id());