summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/brocade/bna
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/brocade/bna')
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h6
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h46
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h18
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h5
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c476
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h64
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h11
13 files changed, 453 insertions, 316 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index b45b8eb3b9b0..8e627186507c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -16,8 +16,6 @@
* www.brocade.com
*/
-#include "bfa_defs_cna.h"
-#include "cna.h"
#include "bfa_cee.h"
#include "bfi_cna.h"
#include "bfa_ioc.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 205b92b3709c..a81c0ccfc2f8 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -251,10 +251,10 @@ struct bfa_mfg_block {
* ---------------------- pci definitions ------------
*/
-#define bfa_asic_id_ct(devid) \
- ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
- (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
-#define bfa_asic_id_ctc(devid) (bfa_asic_id_ct(devid))
+#define bfa_asic_id_ct(device) \
+ ((device) == PCI_DEVICE_ID_BROCADE_CT || \
+ (device) == PCI_DEVICE_ID_BROCADE_CT_FC)
+#define bfa_asic_id_ctc(device) (bfa_asic_id_ct(device))
enum bfa_mode {
BFA_MODE_HBA = 1,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 7ddd16f819f9..7e5df90528fc 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -18,7 +18,6 @@
#ifndef __BFA_DEFS_MFG_COMM_H__
#define __BFA_DEFS_MFG_COMM_H__
-#include "cna.h"
#include "bfa_defs.h"
/**
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index f5a3d4e82078..9116324865cc 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -274,8 +274,10 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->asic_mode))
-#define bfa_ioc_isr_mode_set(__ioc, __msix) \
- ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
+ if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
+} while (0)
#define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 19654cc7abab..4e04c140c84c 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -73,20 +73,6 @@ struct bfi_mhdr {
****************************************************************************
*/
-#define BFI_SGE_INLINE 1
-#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
-
-/**
- * SG Flags
- */
-enum {
- BFI_SGE_DATA = 0, /*!< data address, not last */
- BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
- BFI_SGE_DATA_LAST = 3, /*!< data address, last */
- BFI_SGE_LINK = 2, /*!< link address */
- BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
-};
-
/**
* DMA addresses
*/
@@ -97,33 +83,6 @@ union bfi_addr_u {
} a32;
};
-/**
- * Scatter Gather Element
- */
-struct bfi_sge {
-#ifdef __BIGENDIAN
- u32 flags:2,
- rsvd:2,
- sg_len:28;
-#else
- u32 sg_len:28,
- rsvd:2,
- flags:2;
-#endif
- union bfi_addr_u sga;
-};
-
-/**
- * Scatter Gather Page
- */
-#define BFI_SGPG_DATA_SGES 7
-#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
-#define BFI_SGPG_RSVD_WD_LEN 8
-struct bfi_sgpg {
- struct bfi_sge sges[BFI_SGPG_SGES_MAX];
- u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
-};
-
/*
* Large Message structure - 128 Bytes size Msgs
*/
@@ -131,11 +90,6 @@ struct bfi_sgpg {
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-struct bfi_msg {
- struct bfi_mhdr mhdr;
- u32 pl[BFI_LMSG_PL_WSZ];
-};
-
/**
* Mailbox message structure
*/
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 2a587c5fdc20..3a6e7906149c 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -10,12 +10,17 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
#ifndef __BNA_H__
#define __BNA_H__
-#include "bfa_cs.h"
+#include "bfa_defs.h"
#include "bfa_ioc.h"
-#include "cna.h"
+#include "bfi_enet.h"
#include "bna_types.h"
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
@@ -395,12 +400,8 @@ void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
void bna_uninit(struct bna *bna);
int bna_num_txq_set(struct bna *bna, int num_txq);
int bna_num_rxp_set(struct bna *bna, int num_rxp);
-void bna_stats_get(struct bna *bna);
-void bna_get_perm_mac(struct bna *bna, u8 *mac);
void bna_hw_stats_get(struct bna *bna);
-/* APIs for Rx */
-
/* APIs for RxF */
struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
@@ -521,11 +522,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-void bna_rx_hds_enable(struct bna_rx *rx, struct bna_hds_config *hds_config,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-void bna_rx_hds_disable(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-
/**
* ENET
*/
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 68a275d66fcf..26f5c5abfd1f 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -167,13 +167,14 @@ bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
* Store only if not set earlier, since BNAD can override the HW
* attributes
*/
- if (!ioceth->attr.num_txq)
+ if (!ioceth->attr.fw_query_complete) {
ioceth->attr.num_txq = ntohl(rsp->max_cfg);
- if (!ioceth->attr.num_rxp)
ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
- ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
- ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
- ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+ ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
+ ioceth->attr.fw_query_complete = true;
+ }
bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
}
@@ -1693,6 +1694,16 @@ static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
bna_cb_ioceth_reset
};
+static void bna_attr_init(struct bna_ioceth *ioceth)
+{
+ ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
+ ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
+ ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
+ ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
+ ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
+ ioceth->attr.fw_query_complete = false;
+}
+
static void
bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
struct bna_res_info *res_info)
@@ -1738,6 +1749,8 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
ioceth->stop_cbfn = NULL;
ioceth->stop_cbarg = NULL;
+ bna_attr_init(ioceth);
+
bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
}
@@ -2036,7 +2049,8 @@ bna_uninit(struct bna *bna)
int
bna_num_txq_set(struct bna *bna, int num_txq)
{
- if (num_txq > 0 && (num_txq <= bna->ioceth.attr.num_txq)) {
+ if (bna->ioceth.attr.fw_query_complete &&
+ (num_txq <= bna->ioceth.attr.num_txq)) {
bna->ioceth.attr.num_txq = num_txq;
return BNA_CB_SUCCESS;
}
@@ -2047,7 +2061,8 @@ bna_num_txq_set(struct bna *bna, int num_txq)
int
bna_num_rxp_set(struct bna *bna, int num_rxp)
{
- if (num_rxp > 0 && (num_rxp <= bna->ioceth.attr.num_rxp)) {
+ if (bna->ioceth.attr.fw_query_complete &&
+ (num_rxp <= bna->ioceth.attr.num_rxp)) {
bna->ioceth.attr.num_rxp = num_rxp;
return BNA_CB_SUCCESS;
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 07bb79289824..dde8a463b8d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -30,6 +30,10 @@
* SW imposed limits
*
*/
+#define BFI_ENET_DEF_TXQ 1
+#define BFI_ENET_DEF_RXP 1
+#define BFI_ENET_DEF_UCAM 1
+#define BFI_ENET_DEF_RITSZ 1
#define BFI_ENET_MAX_MCAM 256
@@ -99,6 +103,7 @@
(_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \
(_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \
(_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \
+ (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \
}
#define ct2_reg_addr_init(_bna, _pcidev) \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index 8a6da0c3cd89..242d7997ffb2 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -21,7 +21,6 @@
#include "cna.h"
#include "bna_hw_defs.h"
#include "bfa_cee.h"
-#include "bfi_enet.h"
#include "bfa_msgq.h"
/**
@@ -324,6 +323,7 @@ struct bna_qpt {
};
struct bna_attr {
+ bool fw_query_complete;
int num_txq;
int num_rxp;
int num_ucmac;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 181561c13c50..b7f96ab8b30c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -102,6 +102,28 @@ bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
}
}
+static u32
+bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
+ u32 index, u32 depth, struct sk_buff *skb, u32 frag)
+{
+ int j;
+ array[index].skb = NULL;
+
+ dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+
+ for (j = 0; j < frag; j++) {
+ dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
+ skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&array[index], dma_addr, 0);
+ BNA_QE_INDX_ADD(index, 1, depth);
+ }
+
+ return index;
+}
+
/*
* Frees all pending Tx Bufs
* At this point no activity is expected on the Q,
@@ -115,39 +137,20 @@ bnad_free_all_txbufs(struct bnad *bnad,
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb = NULL;
- int i;
+ int q;
unmap_array = unmap_q->unmap_array;
- unmap_cons = 0;
- while (unmap_cons < unmap_q->q_depth) {
- skb = unmap_array[unmap_cons].skb;
- if (!skb) {
- unmap_cons++;
+ for (q = 0; q < unmap_q->q_depth; q++) {
+ skb = unmap_array[q].skb;
+ if (!skb)
continue;
- }
- unmap_array[unmap_cons].skb = NULL;
-
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr), skb_headlen(skb),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- if (++unmap_cons >= unmap_q->q_depth)
- break;
+ unmap_cons = q;
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- dma_unmap_page(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
- 0);
- if (++unmap_cons >= unmap_q->q_depth)
- break;
- }
dev_kfree_skb_any(skb);
}
}
@@ -164,12 +167,11 @@ static u32
bnad_free_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 sent_packets = 0, sent_bytes = 0;
- u16 wis, unmap_cons, updated_hw_cons;
+ u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
+ u16 wis, updated_hw_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- int i;
/*
* Just return if TX is stopped. This check is useful
@@ -195,32 +197,14 @@ bnad_free_txbufs(struct bnad *bnad,
while (wis) {
skb = unmap_array[unmap_cons].skb;
- unmap_array[unmap_cons].skb = NULL;
-
sent_packets++;
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- dma_unmap_single(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr), skb_headlen(skb),
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
- BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
-
- prefetch(&unmap_array[unmap_cons + 1]);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- prefetch(&unmap_array[unmap_cons + 1]);
-
- dma_unmap_page(&bnad->pcidev->dev,
- dma_unmap_addr(&unmap_array[unmap_cons],
- dma_addr),
- skb_shinfo(skb)->frags[i].size,
- DMA_TO_DEVICE);
- dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
- 0);
- BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
- }
+ unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
+ unmap_cons, unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+
dev_kfree_skb_any(skb);
}
@@ -383,14 +367,14 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
while (to_alloc--) {
- if (!wi_range) {
+ if (!wi_range)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
wi_range);
- }
skb = netdev_alloc_skb_ip_align(bnad->netdev,
rcb->rxq->buffer_size);
if (unlikely(!skb)) {
BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
+ rcb->rxq->rxbuf_alloc_failed++;
goto finishing;
}
unmap_array[unmap_prod].skb = skb;
@@ -535,16 +519,12 @@ next:
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
- if (likely(ccb)) {
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- bna_ib_ack(ccb->i_dbell, packets);
- bnad_refill_rxq(bnad, ccb->rcb[0]);
- if (ccb->rcb[1])
- bnad_refill_rxq(bnad, ccb->rcb[1]);
- } else {
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- bna_ib_ack(ccb->i_dbell, 0);
- }
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
+ bna_ib_ack_disable_irq(ccb->i_dbell, packets);
+
+ bnad_refill_rxq(bnad, ccb->rcb[0]);
+ if (ccb->rcb[1])
+ bnad_refill_rxq(bnad, ccb->rcb[1]);
clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
@@ -552,37 +532,15 @@ next:
}
static void
-bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
- return;
-
- bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
- bna_ib_ack(ccb->i_dbell, 0);
-}
-
-static void
-bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
-{
- unsigned long flags;
-
- /* Because of polling context */
- spin_lock_irqsave(&bnad->bna_lock, flags);
- bnad_enable_rx_irq_unsafe(ccb);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
-}
-
-static void
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
{
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
struct napi_struct *napi = &rx_ctrl->napi;
if (likely(napi_schedule_prep(napi))) {
- bnad_disable_rx_irq(bnad, ccb);
__napi_schedule(napi);
+ rx_ctrl->rx_schedule++;
}
- BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
}
/* MSIX Rx Path Handler */
@@ -590,9 +548,11 @@ static irqreturn_t
bnad_msix_rx(int irq, void *data)
{
struct bna_ccb *ccb = (struct bna_ccb *)data;
- struct bnad *bnad = ccb->bnad;
- bnad_netif_rx_schedule_poll(bnad, ccb);
+ if (ccb) {
+ ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
+ bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
+ }
return IRQ_HANDLED;
}
@@ -607,10 +567,11 @@ bnad_msix_mbox_handler(int irq, void *data)
unsigned long flags;
struct bnad *bnad = (struct bnad *)data;
- if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
- return IRQ_HANDLED;
-
spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ return IRQ_HANDLED;
+ }
bna_intr_status_get(&bnad->bna, intr_status);
@@ -633,15 +594,18 @@ bnad_isr(int irq, void *data)
struct bnad_rx_ctrl *rx_ctrl;
struct bna_tcb *tcb = NULL;
- if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
return IRQ_NONE;
+ }
bna_intr_status_get(&bnad->bna, intr_status);
- if (unlikely(!intr_status))
+ if (unlikely(!intr_status)) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
return IRQ_NONE;
-
- spin_lock_irqsave(&bnad->bna_lock, flags);
+ }
if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
bna_mbox_handler(&bnad->bna, intr_status);
@@ -1001,7 +965,7 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
mdelay(BNAD_TXRX_SYNC_MDELAY);
- for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
ccb = rx_ctrl->ccb;
if (!ccb)
@@ -1030,7 +994,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
int i;
int j;
- for (i = 0; i < BNAD_MAX_RXPS_PER_RX; i++) {
+ for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
rx_ctrl = &rx_info->rx_ctrl[i];
ccb = rx_ctrl->ccb;
if (!ccb)
@@ -1658,32 +1622,32 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
{
struct bnad_rx_ctrl *rx_ctrl =
container_of(napi, struct bnad_rx_ctrl, napi);
- struct bna_ccb *ccb;
- struct bnad *bnad;
+ struct bnad *bnad = rx_ctrl->bnad;
int rcvd = 0;
- ccb = rx_ctrl->ccb;
-
- bnad = ccb->bnad;
+ rx_ctrl->rx_poll_ctr++;
if (!netif_carrier_ok(bnad->netdev))
goto poll_exit;
- rcvd = bnad_poll_cq(bnad, ccb, budget);
- if (rcvd == budget)
+ rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+ if (rcvd >= budget)
return rcvd;
poll_exit:
- napi_complete((napi));
+ napi_complete(napi);
+
+ rx_ctrl->rx_complete++;
- BNAD_UPDATE_CTR(bnad, netif_rx_complete);
+ if (rx_ctrl->ccb)
+ bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
- bnad_enable_rx_irq(bnad, ccb);
return rcvd;
}
+#define BNAD_NAPI_POLL_QUOTA 64
static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+bnad_napi_init(struct bnad *bnad, u32 rx_id)
{
struct bnad_rx_ctrl *rx_ctrl;
int i;
@@ -1691,9 +1655,20 @@ bnad_napi_enable(struct bnad *bnad, u32 rx_id)
/* Initialize & enable NAPI */
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, 64);
+ bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ }
+}
+
+static void
+bnad_napi_enable(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_ctrl *rx_ctrl;
+ int i;
+
+ /* Initialize & enable NAPI */
+ for (i = 0; i < bnad->num_rxp_per_rx; i++) {
+ rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
napi_enable(&rx_ctrl->napi);
}
@@ -1732,6 +1707,9 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
bnad_tx_msix_unregister(bnad, tx_info,
bnad->num_txq_per_tx);
+ if (0 == tx_id)
+ tasklet_kill(&bnad->tx_free_tasklet);
+
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_tx_destroy(tx_info->tx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1739,9 +1717,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
tx_info->tx = NULL;
tx_info->tx_id = 0;
- if (0 == tx_id)
- tasklet_kill(&bnad->tx_free_tasklet);
-
bnad_tx_res_free(bnad, res_info);
}
@@ -1852,6 +1827,16 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
}
+static void
+bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
+{
+ struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
+ int i;
+
+ for (i = 0; i < bnad->num_rxp_per_rx; i++)
+ rx_info->rx_ctrl[i].bnad = bnad;
+}
+
/* Called with mutex_lock(&bnad->conf_mutex) held */
void
bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
@@ -1860,23 +1845,23 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
unsigned long flags;
- int dim_timer_del = 0;
+ int to_del = 0;
if (!rx_info->rx)
return;
if (0 == rx_id) {
spin_lock_irqsave(&bnad->bna_lock, flags);
- dim_timer_del = bnad_dim_timer_running(bnad);
- if (dim_timer_del)
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
+ to_del = 1;
+ }
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (dim_timer_del)
+ if (to_del)
del_timer_sync(&bnad->dim_timer);
}
- bnad_napi_disable(bnad, rx_id);
-
init_completion(&bnad->bnad_completions.rx_comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
@@ -1886,11 +1871,14 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
+ bnad_napi_disable(bnad, rx_id);
+
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_destroy(rx_info->rx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
rx_info->rx = NULL;
+ rx_info->rx_id = 0;
bnad_rx_res_free(bnad, res_info);
}
@@ -1939,15 +1927,25 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
if (err)
return err;
+ bnad_rx_ctrl_init(bnad, rx_id);
+
/* Ask BNA to create one Rx object, supplying required resources */
spin_lock_irqsave(&bnad->bna_lock, flags);
rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
rx_info);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- if (!rx)
+ if (!rx) {
+ err = -ENOMEM;
goto err_return;
+ }
rx_info->rx = rx;
+ /*
+ * Init NAPI, so that state is set to NAPI_STATE_SCHED,
+ * so that IRQ handler cannot schedule NAPI at this point.
+ */
+ bnad_napi_init(bnad, rx_id);
+
/* Register ISR for the Rx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
err = bnad_rx_msix_register(bnad, rx_info, rx_id,
@@ -1956,9 +1954,6 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
goto err_return;
}
- /* Enable NAPI */
- bnad_napi_enable(bnad, rx_id);
-
spin_lock_irqsave(&bnad->bna_lock, flags);
if (0 == rx_id) {
/* Set up Dynamic Interrupt Moderation Vector */
@@ -1975,6 +1970,9 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
bna_rx_enable(rx);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ /* Enable scheduling of NAPI */
+ bnad_napi_enable(bnad, rx_id);
+
return 0;
err_return:
@@ -2014,7 +2012,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad)
/*
* Called with bnad->bna_lock held
*/
-static int
+int
bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
{
int ret;
@@ -2034,7 +2032,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
}
/* Should be called with conf_lock held */
-static int
+int
bnad_enable_default_bcast(struct bnad *bnad)
{
struct bnad_rx_info *rx_info = &bnad->rx_info[0];
@@ -2059,15 +2057,13 @@ bnad_enable_default_bcast(struct bnad *bnad)
return 0;
}
-/* Called with bnad_conf_lock() held */
-static void
+/* Called with mutex_lock(&bnad->conf_mutex) held */
+void
bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
{
u16 vid;
unsigned long flags;
- BUG_ON(!(VLAN_N_VID == BFI_ENET_VLAN_ID_MAX));
-
for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
@@ -2176,9 +2172,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
{
int err;
- /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
- BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) {
@@ -2205,7 +2198,6 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
} else {
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
ipv6h->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
@@ -2227,7 +2219,7 @@ bnad_q_num_init(struct bnad *bnad)
int rxps;
rxps = min((uint)num_online_cpus(),
- (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
+ (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
if (!(bnad->cfg_flags & BNAD_CF_MSIX))
rxps = 1; /* INTx */
@@ -2356,15 +2348,16 @@ bnad_enable_msix(struct bnad *bnad)
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
if (ret > 0) {
/* Not enough MSI-X vectors. */
+ pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
- bnad_q_num_adjust(bnad, ret, 0);
+ bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
+ (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
- + (bnad->num_rx
- * bnad->num_rxp_per_rx) +
+ bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
if (bnad->msix_num > ret)
@@ -2385,6 +2378,7 @@ bnad_enable_msix(struct bnad *bnad)
return;
intx_mode:
+ pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
@@ -2521,30 +2515,44 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 unmap_prod, wis, wis_used, wi_range;
u32 vectors, vect_id, i, acked;
int err;
+ unsigned int len;
+ u32 gso_size;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
dma_addr_t dma_addr;
struct bna_txq_entry *txqent;
u16 flags;
- if (unlikely
- (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
+ if (unlikely(skb->len <= ETH_HLEN)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely(skb_headlen(skb) == 0)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
return NETDEV_TX_OK;
}
/*
* Takes care of the Tx that is scheduled between clearing the flag
- * and the netif_stop_all_queue() call.
+ * and the netif_tx_stop_all_queues() call.
*/
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
return NETDEV_TX_OK;
}
vectors = 1 + skb_shinfo(skb)->nr_frags;
- if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
+ if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
return NETDEV_TX_OK;
}
wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
@@ -2582,18 +2590,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
unmap_prod = unmap_q->producer_index;
- wis_used = 1;
- vect_id = 0;
flags = 0;
txq_prod = tcb->producer_index;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
txqent->hdr.wi.reserved = 0;
txqent->hdr.wi.num_vectors = vectors;
- txqent->hdr.wi.opcode =
- htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
- BNA_TXQ_WI_SEND));
if (vlan_tx_tag_present(skb)) {
vlan_tag = (u16) vlan_tx_tag_get(skb);
@@ -2608,62 +2610,93 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
txqent->hdr.wi.vlan_tag = htons(vlan_tag);
if (skb_is_gso(skb)) {
+ gso_size = skb_shinfo(skb)->gso_size;
+
+ if (unlikely(gso_size > netdev->mtu)) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
+ return NETDEV_TX_OK;
+ }
+ if (unlikely((gso_size + skb_transport_offset(skb) +
+ tcp_hdrlen(skb)) >= skb->len)) {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND);
+ txqent->hdr.wi.lso_mss = 0;
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
+ } else {
+ txqent->hdr.wi.opcode =
+ __constant_htons(BNA_TXQ_WI_SEND_LSO);
+ txqent->hdr.wi.lso_mss = htons(gso_size);
+ }
+
err = bnad_tso_prepare(bnad, skb);
- if (err) {
+ if (unlikely(err)) {
dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
return NETDEV_TX_OK;
}
- txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
txqent->hdr.wi.l4_hdr_size_n_offset =
htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
(tcp_hdrlen(skb) >> 2,
skb_transport_offset(skb)));
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 proto = 0;
-
+ } else {
+ txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
- if (skb->protocol == htons(ETH_P_IP))
- proto = ip_hdr(skb)->protocol;
- else if (skb->protocol == htons(ETH_P_IPV6)) {
- /* nexthdr may not be TCP immediately. */
- proto = ipv6_hdr(skb)->nexthdr;
+ if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
+ return NETDEV_TX_OK;
}
- if (proto == IPPROTO_TCP) {
- flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
- txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (0, skb_transport_offset(skb)));
- BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 proto = 0;
- BUG_ON(!(skb_headlen(skb) >=
- skb_transport_offset(skb) + tcp_hdrlen(skb)));
-
- } else if (proto == IPPROTO_UDP) {
- flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
- txqent->hdr.wi.l4_hdr_size_n_offset =
- htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
- (0, skb_transport_offset(skb)));
-
- BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (skb->protocol == __constant_htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol ==
+ __constant_htons(ETH_P_IPV6)) {
+ /* nexthdr may not be TCP immediately. */
+ proto = ipv6_hdr(skb)->nexthdr;
+ }
+ if (proto == IPPROTO_TCP) {
+ flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
+ return NETDEV_TX_OK;
+ }
- BUG_ON(!(skb_headlen(skb) >=
- skb_transport_offset(skb) +
- sizeof(struct udphdr)));
- } else {
- err = skb_checksum_help(skb);
- BNAD_UPDATE_CTR(bnad, csum_help);
- if (err) {
+ } else if (proto == IPPROTO_UDP) {
+ flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+ txqent->hdr.wi.l4_hdr_size_n_offset =
+ htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+ (0, skb_transport_offset(skb)));
+
+ BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+ if (unlikely(skb_headlen(skb) <
+ skb_transport_offset(skb) +
+ sizeof(struct udphdr))) {
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
+ return NETDEV_TX_OK;
+ }
+ } else {
dev_kfree_skb(skb);
- BNAD_UPDATE_CTR(bnad, csum_help_err);
+ BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
return NETDEV_TX_OK;
}
+ } else {
+ txqent->hdr.wi.l4_hdr_size_n_offset = 0;
}
- } else {
- txqent->hdr.wi.lso_mss = 0;
- txqent->hdr.wi.l4_hdr_size_n_offset = 0;
}
txqent->hdr.wi.flags = htons(flags);
@@ -2671,20 +2704,37 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
txqent->hdr.wi.frame_length = htonl(skb->len);
unmap_q->unmap_array[unmap_prod].skb = skb;
- BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
- txqent->vector[vect_id].length = htons(skb_headlen(skb));
+ len = skb_headlen(skb);
+ txqent->vector[0].length = htons(len);
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
- BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+ BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
+ vect_id = 0;
+ wis_used = 1;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
u16 size = frag->size;
+ if (unlikely(size == 0)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array,
+ unmap_prod, unmap_q->q_depth, skb,
+ i);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
+ return NETDEV_TX_OK;
+ }
+
+ len += size;
+
if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
vect_id = 0;
if (--wi_range)
@@ -2695,10 +2745,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis_used = 0;
BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
txqent, wi_range);
- BUG_ON(!(wi_range <= tcb->q_depth));
}
wis_used++;
- txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+ txqent->hdr.wi_ext.opcode =
+ __constant_htons(BNA_TXQ_WI_EXTENSION);
}
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
@@ -2711,6 +2761,18 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
}
+ if (unlikely(len != skb->len)) {
+ unmap_prod = unmap_q->producer_index;
+
+ unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
+ unmap_q->unmap_array, unmap_prod,
+ unmap_q->q_depth, skb,
+ skb_shinfo(skb)->nr_frags);
+ dev_kfree_skb(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
+ return NETDEV_TX_OK;
+ }
+
unmap_q->producer_index = unmap_prod;
BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
tcb->producer_index = txq_prod;
@@ -2721,6 +2783,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
bna_txq_prod_indx_doorbell(tcb);
+ smp_mb();
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
tasklet_schedule(&bnad->tx_free_tasklet);
@@ -2748,7 +2811,7 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
return stats;
}
-static void
+void
bnad_set_rx_mode(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -2787,6 +2850,9 @@ bnad_set_rx_mode(struct net_device *netdev)
}
}
+ if (bnad->rx_info[0].rx == NULL)
+ goto unlock;
+
bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
if (!netdev_mc_empty(netdev)) {
@@ -2933,18 +2999,21 @@ bnad_netpoll(struct net_device *netdev)
bnad_isr(bnad->pcidev->irq, netdev);
bna_intx_enable(&bnad->bna, curr_mask);
} else {
+ /*
+ * Tx processing may happen in sending context, so no need
+ * to explicitly process completions here
+ */
+
+ /* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
if (!rx_info->rx)
continue;
for (j = 0; j < bnad->num_rxp_per_rx; j++) {
rx_ctrl = &rx_info->rx_ctrl[j];
- if (rx_ctrl->ccb) {
- bnad_disable_rx_irq(bnad,
- rx_ctrl->ccb);
+ if (rx_ctrl->ccb)
bnad_netif_rx_schedule_poll(bnad,
rx_ctrl->ccb);
- }
}
}
}
@@ -3126,7 +3195,7 @@ static int __devinit
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac = false;
+ bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3249,12 +3318,19 @@ bnad_pci_probe(struct pci_dev *pdev,
bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
err = -EIO;
}
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (err)
+ goto disable_ioceth;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
- if (err)
+ if (err) {
+ err = -EIO;
goto disable_ioceth;
+ }
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
@@ -3266,6 +3342,8 @@ bnad_pci_probe(struct pci_dev *pdev,
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ mutex_unlock(&bnad->conf_mutex);
+
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
@@ -3274,6 +3352,8 @@ bnad_pci_probe(struct pci_dev *pdev,
}
set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
+ return 0;
+
probe_success:
mutex_unlock(&bnad->conf_mutex);
return 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 5b5451edf497..1c9328d564d2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -38,12 +38,12 @@
#define BNAD_TXQ_DEPTH 2048
#define BNAD_RXQ_DEPTH 2048
-#define BNAD_MAX_TXS 1
+#define BNAD_MAX_TX 1
#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
#define BNAD_TXQ_NUM 1
-#define BNAD_MAX_RXS 1
-#define BNAD_MAX_RXPS_PER_RX 16
+#define BNAD_MAX_RX 1
+#define BNAD_MAX_RXP_PER_RX 16
#define BNAD_MAX_RXQ_PER_RXP 2
/*
@@ -53,21 +53,25 @@
*/
struct bnad_rx_ctrl {
struct bna_ccb *ccb;
+ struct bnad *bnad;
unsigned long flags;
struct napi_struct napi;
+ u64 rx_intr_ctr;
+ u64 rx_poll_ctr;
+ u64 rx_schedule;
+ u64 rx_keep_poll;
+ u64 rx_complete;
};
#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
-#define BNAD_GET_TX_ID(_skb) (0)
-
/*
* GLOBAL #defines (CONSTANTS)
*/
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
-#define BNAD_VERSION "3.0.2.0"
+#define BNAD_VERSION "3.0.2.1"
#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -82,6 +86,10 @@ struct bnad_rx_ctrl {
#define BNAD_MAX_Q_DEPTH 0x10000
#define BNAD_MIN_Q_DEPTH 0x200
+#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq)
+/* keeping MAX TX and RX Q depth equal */
+#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH
+
#define BNAD_JUMBO_MTU 9000
#define BNAD_NETIF_WAKE_THRESHOLD 8
@@ -146,16 +154,26 @@ struct bnad_drv_stats {
u64 tcpcsum_offload;
u64 udpcsum_offload;
u64 csum_help;
- u64 csum_help_err;
+ u64 tx_skb_too_short;
+ u64 tx_skb_stopping;
+ u64 tx_skb_max_vectors;
+ u64 tx_skb_mss_too_long;
+ u64 tx_skb_tso_too_short;
+ u64 tx_skb_tso_prepare;
+ u64 tx_skb_non_tso_too_long;
+ u64 tx_skb_tcp_hdr;
+ u64 tx_skb_udp_hdr;
+ u64 tx_skb_csum_err;
+ u64 tx_skb_headlen_too_long;
+ u64 tx_skb_headlen_zero;
+ u64 tx_skb_frag_zero;
+ u64 tx_skb_len_mismatch;
u64 hw_stats_updates;
- u64 netif_rx_schedule;
- u64 netif_rx_complete;
u64 netif_rx_dropped;
u64 link_toggle;
u64 cee_toggle;
- u64 cee_up;
u64 rxp_info_alloc_failed;
u64 mbox_intr_disabled;
@@ -190,7 +208,7 @@ struct bnad_tx_info {
struct bnad_rx_info {
struct bna_rx *rx; /* 1:1 between rx_info & rx */
- struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
+ struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
u32 rx_id;
} ____cacheline_aligned;
@@ -234,8 +252,8 @@ struct bnad {
struct net_device *netdev;
/* Data path */
- struct bnad_tx_info tx_info[BNAD_MAX_TXS];
- struct bnad_rx_info rx_info[BNAD_MAX_RXS];
+ struct bnad_tx_info tx_info[BNAD_MAX_TX];
+ struct bnad_rx_info rx_info[BNAD_MAX_RX];
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/*
@@ -255,8 +273,8 @@ struct bnad {
u8 tx_coalescing_timeo;
u8 rx_coalescing_timeo;
- struct bna_rx_config rx_config[BNAD_MAX_RXS];
- struct bna_tx_config tx_config[BNAD_MAX_TXS];
+ struct bna_rx_config rx_config[BNAD_MAX_RX];
+ struct bna_tx_config tx_config[BNAD_MAX_TX];
void __iomem *bar0; /* BAR0 address */
@@ -283,8 +301,8 @@ struct bnad {
/* Control path resources, memory & irq */
struct bna_res_info res_info[BNA_RES_T_MAX];
struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX];
- struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
- struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
+ struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX];
+ struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX];
struct bnad_completion bnad_completions;
@@ -314,6 +332,12 @@ extern u32 bnad_rxqs_per_cq;
*/
extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
/* Netdev entry point prototypes */
+extern void bnad_set_rx_mode(struct net_device *netdev);
+extern struct net_device_stats *bnad_get_netdev_stats(
+ struct net_device *netdev);
+extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+extern int bnad_enable_default_bcast(struct bnad *bnad);
+extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
extern void bnad_set_ethtool_ops(struct net_device *netdev);
/* Configuration & setup */
@@ -345,15 +369,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
#define bnad_enable_rx_irq_unsafe(_ccb) \
{ \
- if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
+ if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
(_ccb)->rx_coalescing_timeo); \
bna_ib_ack((_ccb)->i_dbell, 0); \
} \
}
-#define bnad_dim_timer_running(_bnad) \
- (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
- (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
-
#endif /* __BNAD_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 1c19dcea83c2..48422244397c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -75,14 +75,25 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tcpcsum_offload",
"udpcsum_offload",
"csum_help",
- "csum_help_err",
+ "tx_skb_too_short",
+ "tx_skb_stopping",
+ "tx_skb_max_vectors",
+ "tx_skb_mss_too_long",
+ "tx_skb_tso_too_short",
+ "tx_skb_tso_prepare",
+ "tx_skb_non_tso_too_long",
+ "tx_skb_tcp_hdr",
+ "tx_skb_udp_hdr",
+ "tx_skb_csum_err",
+ "tx_skb_headlen_too_long",
+ "tx_skb_headlen_zero",
+ "tx_skb_frag_zero",
+ "tx_skb_len_mismatch",
"hw_stats_updates",
- "netif_rx_schedule",
- "netif_rx_complete",
"netif_rx_dropped",
"link_toggle",
- "cee_up",
+ "cee_toggle",
"rxp_info_alloc_failed",
"mbox_intr_disabled",
@@ -201,6 +212,20 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"rad_rx_bcast_vlan",
"rad_rx_drops",
+ "rlb_rad_rx_frames",
+ "rlb_rad_rx_octets",
+ "rlb_rad_rx_vlan_frames",
+ "rlb_rad_rx_ucast",
+ "rlb_rad_rx_ucast_octets",
+ "rlb_rad_rx_ucast_vlan",
+ "rlb_rad_rx_mcast",
+ "rlb_rad_rx_mcast_octets",
+ "rlb_rad_rx_mcast_vlan",
+ "rlb_rad_rx_bcast",
+ "rlb_rad_rx_bcast_octets",
+ "rlb_rad_rx_bcast_vlan",
+ "rlb_rad_rx_drops",
+
"fc_rx_ucast_octets",
"fc_rx_ucast",
"fc_rx_ucast_vlan",
@@ -321,7 +346,7 @@ bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct bnad *bnad = netdev_priv(netdev);
unsigned long flags;
- int dim_timer_del = 0;
+ int to_del = 0;
if (coalesce->rx_coalesce_usecs == 0 ||
coalesce->rx_coalesce_usecs >
@@ -348,14 +373,17 @@ bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
} else {
if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
- dim_timer_del = bnad_dim_timer_running(bnad);
- if (dim_timer_del) {
+ if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
+ test_bit(BNAD_RF_DIM_TIMER_RUNNING,
+ &bnad->run_flags)) {
clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
&bnad->run_flags);
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
- del_timer_sync(&bnad->dim_timer);
- spin_lock_irqsave(&bnad->bna_lock, flags);
+ to_del = 1;
}
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (to_del)
+ del_timer_sync(&bnad->dim_timer);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_rx_coalescing_timeo_set(bnad);
}
}
@@ -390,10 +418,10 @@ bnad_get_ringparam(struct net_device *netdev,
{
struct bnad *bnad = netdev_priv(netdev);
- ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
+ ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
ringparam->rx_mini_max_pending = 0;
ringparam->rx_jumbo_max_pending = 0;
- ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
+ ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
ringparam->rx_pending = bnad->rxq_depth;
ringparam->rx_mini_max_pending = 0;
@@ -407,6 +435,7 @@ bnad_set_ringparam(struct net_device *netdev,
{
int i, current_err, err = 0;
struct bnad *bnad = netdev_priv(netdev);
+ unsigned long flags;
mutex_lock(&bnad->conf_mutex);
if (ringparam->rx_pending == bnad->rxq_depth &&
@@ -416,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev,
}
if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
- ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
+ ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
!BNA_POWER_OF_2(ringparam->rx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
- ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
+ ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
!BNA_POWER_OF_2(ringparam->tx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
@@ -430,6 +459,11 @@ bnad_set_ringparam(struct net_device *netdev,
if (ringparam->rx_pending != bnad->rxq_depth) {
bnad->rxq_depth = ringparam->rx_pending;
+ if (!netif_running(netdev)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
for (i = 0; i < bnad->num_rx; i++) {
if (!bnad->rx_info[i].rx)
continue;
@@ -437,10 +471,26 @@ bnad_set_ringparam(struct net_device *netdev,
current_err = bnad_setup_rx(bnad, i);
if (current_err && !err)
err = current_err;
+ if (!err)
+ bnad_restore_vlans(bnad, i);
+ }
+
+ if (!err && bnad->rx_info[0].rx) {
+ /* restore rx configuration */
+ bnad_enable_default_bcast(bnad);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ bnad_set_rx_mode(netdev);
}
}
if (ringparam->tx_pending != bnad->txq_depth) {
bnad->txq_depth = ringparam->tx_pending;
+ if (!netif_running(netdev)) {
+ mutex_unlock(&bnad->conf_mutex);
+ return 0;
+ }
+
for (i = 0; i < bnad->num_tx; i++) {
if (!bnad->tx_info[i].tx)
continue;
@@ -578,6 +628,16 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
sprintf(string, "cq%d_hw_producer_index",
q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_intr", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_poll", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_schedule", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_keep_poll", q_num);
+ string += ETH_GSTRING_LEN;
+ sprintf(string, "cq%d_complete", q_num);
+ string += ETH_GSTRING_LEN;
q_num++;
}
}
@@ -660,7 +720,7 @@ static int
bnad_get_stats_count_locked(struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- int i, j, count, rxf_active_num = 0, txf_active_num = 0;
+ int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
u32 bmap;
bmap = bna_tx_rid_mask(&bnad->bna);
@@ -718,6 +778,17 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
buf[bi++] = 0; /* ccb->consumer_index */
buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
ccb->hw_producer_index);
+
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_intr_ctr;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_poll_ctr;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_schedule;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_keep_poll;
+ buf[bi++] = bnad->rx_info[i].
+ rx_ctrl[j].rx_complete;
}
}
for (i = 0; i < bnad->num_rx; i++) {
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 50fce15feacc..cb4874210aa3 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -21,21 +21,18 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
#include <linux/if_ether.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/string.h>
-
-#include <linux/list.h>
#define bfa_sm_fault(__event) do { \
- pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
- __event); \
+ pr_err("SM Assertion failure: %s: %d: event = %d\n", \
+ __FILE__, __LINE__, __event); \
} while (0)
extern char bfa_version[];