diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-09-01 05:29:19 +0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-09-01 06:42:45 +0400 |
commit | db5e8718eac0b8166d6fd05b1ed7f8114c243988 (patch) | |
tree | e0adc928ffa6735e228c6fd2867381f8442875e7 /drivers/net/iseries_veth.c | |
parent | e0808494ff44d5cedcaf286bb8a93d08e8d9af49 (diff) | |
download | linux-db5e8718eac0b8166d6fd05b1ed7f8114c243988.tar.xz |
[PATCH] iseries_veth: Fix bogus counting of TX errors
There's a number of problems with the way iseries_veth counts TX errors.
Firstly it counts conditions which aren't really errors as TX errors. This
includes if we don't have a connection struct for the other LPAR, or if the
other LPAR is currently down (or just doesn't want to talk to us). Neither
of these should count as TX errors.
Secondly, it counts one TX error for each LPAR that fails to accept the packet.
This can lead to TX error counts higher than the total number of packets sent
through the interface. This is confusing for users.
This patch fixes that behaviour. The non-error conditions are no longer
counted, and we introduce a new and I think saner meaning to the TX counts.
If a packet is successfully transmitted to any LPAR then it is transmitted
and tx_packets is incremented by 1.
If there is an error transmitting a packet to any LPAR then that is counted
as one error, ie. tx_errors is incremented by 1.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/iseries_veth.c')
-rw-r--r-- | drivers/net/iseries_veth.c | 47 |
1 files changed, 19 insertions, 28 deletions
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index eaff17cc9fb8..b945bf02d257 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -938,31 +938,25 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, struct veth_port *port = (struct veth_port *) dev->priv; HvLpEvent_Rc rc; struct veth_msg *msg = NULL; - int err = 0; unsigned long flags; - if (! cnx) { - port->stats.tx_errors++; - dev_kfree_skb(skb); + if (! cnx) return 0; - } spin_lock_irqsave(&cnx->lock, flags); if (! (cnx->state & VETH_STATE_READY)) - goto drop; + goto no_error; - if ((skb->len - 14) > VETH_MAX_MTU) + if ((skb->len - ETH_HLEN) > VETH_MAX_MTU) goto drop; msg = veth_stack_pop(cnx); - - if (! msg) { - err = 1; + if (! msg) goto drop; - } msg->in_use = 1; + msg->skb = skb_get(skb); msg->data.addr[0] = dma_map_single(port->dev, skb->data, skb->len, DMA_TO_DEVICE); @@ -970,9 +964,6 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, if (dma_mapping_error(msg->data.addr[0])) goto recycle_and_drop; - /* Is it really necessary to check the length and address - * fields of the first entry here? */ - msg->skb = skb; msg->dev = port->dev; msg->data.len[0] = skb->len; msg->data.eofmask = 1 << VETH_EOF_SHIFT; @@ -992,43 +983,43 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, if (veth_stack_is_empty(cnx)) veth_stop_queues(cnx); + no_error: spin_unlock_irqrestore(&cnx->lock, flags); return 0; recycle_and_drop: - /* we free the skb below, so tell veth_recycle_msg() not to. */ - msg->skb = NULL; veth_recycle_msg(cnx, msg); drop: - port->stats.tx_errors++; - dev_kfree_skb(skb); spin_unlock_irqrestore(&cnx->lock, flags); - return err; + return 1; } -static HvLpIndexMap veth_transmit_to_many(struct sk_buff *skb, +static void veth_transmit_to_many(struct sk_buff *skb, HvLpIndexMap lpmask, struct net_device *dev) { struct veth_port *port = (struct veth_port *) dev->priv; - int i; - int rc; + int i, success, error; + + success = error = 0; for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { if ((lpmask & (1 << i)) == 0) continue; - rc = veth_transmit_to_one(skb_get(skb), i, dev); - if (! rc) - lpmask &= ~(1<<i); + if (veth_transmit_to_one(skb, i, dev)) + error = 1; + else + success = 1; } - if (! lpmask) { + if (error) + port->stats.tx_errors++; + + if (success) { port->stats.tx_packets++; port->stats.tx_bytes += skb->len; } - - return lpmask; } static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) |