summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c486
1 files changed, 294 insertions, 192 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 99389c8dda21..f1f3ca65667a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,46 +32,6 @@
/**
- * bnx2x_bz_fp - zero content of the fastpath structure.
- *
- * @bp: driver handle
- * @index: fastpath index to be zeroed
- *
- * Makes sure the contents of the bp->fp[index].napi is kept
- * intact.
- */
-static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
-{
- struct bnx2x_fastpath *fp = &bp->fp[index];
- struct napi_struct orig_napi = fp->napi;
- /* bzero bnx2x_fastpath contents */
- memset(fp, 0, sizeof(*fp));
-
- /* Restore the NAPI object as it has been already initialized */
- fp->napi = orig_napi;
-
- fp->bp = bp;
- fp->index = index;
- if (IS_ETH_FP(fp))
- fp->max_cos = bp->max_cos;
- else
- /* Special queues support only one CoS */
- fp->max_cos = 1;
-
- /*
- * set the tpa flag for each queue. The tpa flag determines the queue
- * minimal size so it must be set prior to queue memory allocation
- */
- fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-#ifdef BCM_CNIC
- /* We don't want TPA on an FCoE L2 ring */
- if (IS_FCOE_FP(fp))
- fp->disable_tpa = 1;
-#endif
-}
-
-/**
* bnx2x_move_fp - move content of the fastpath structure.
*
* @bp: driver handle
@@ -115,11 +75,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
- DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
/* unmap first bd */
- DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
@@ -150,7 +109,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* now free frags */
while (nbd > 0) {
- DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
@@ -160,10 +118,11 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
- if (skb) {
+ if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
}
+
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -191,8 +150,8 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
pkt_cons = TX_BD(sw_cons);
- DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
- " pkt_cons %u\n",
+ DP(NETIF_MSG_TX_DONE,
+ "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
@@ -249,13 +208,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
fp->last_max_sge = idx;
}
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
- struct eth_fast_path_rx_cqe *fp_cqe)
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ u16 sge_len,
+ struct eth_end_agg_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
- u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
- le16_to_cpu(fp_cqe->len_on_bd)) >>
- SGE_PAGE_SHIFT;
u16 last_max, last_elem, first_elem;
u16 delta = 0;
u16 i;
@@ -266,15 +223,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
- RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+ RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
- sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
/* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask));
bnx2x_update_last_max_sge(fp,
- le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
@@ -368,6 +325,22 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ if (fp->mode == TPA_MODE_GRO) {
+ u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
+ tpa_info->full_page =
+ SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ /*
+ * FW 7.2.16 BUG workaround:
+ * if SGE size is (exactly) multiple gro_size
+ * fw will place one less frag on SGE.
+ * the calculation is done only for potentially
+ * dangerous MTUs.
+ */
+ if (unlikely(bp->gro_check))
+ if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
+ tpa_info->full_page -= gro_size;
+ tpa_info->gro_size = gro_size;
+ }
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -424,25 +397,40 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
}
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct sk_buff *skb,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct sk_buff *skb,
struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
- u32 i, frag_len, frag_size, pages;
- int err;
- int j;
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u32 i, frag_len, frag_size;
+ int err, j, frag_id = 0;
u16 len_on_bd = tpa_info->len_on_bd;
+ u16 full_page = 0, gro_size = 0;
frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
- pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+ if (fp->mode == TPA_MODE_GRO) {
+ gro_size = tpa_info->gro_size;
+ full_page = tpa_info->full_page;
+ }
/* This is needed in order to enable forwarding support */
- if (frag_size)
+ if (frag_size) {
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
tpa_info->parsing_flags, len_on_bd);
+ /* set for GRO */
+ if (fp->mode == TPA_MODE_GRO)
+ skb_shinfo(skb)->gso_type =
+ (GET_FLAG(tpa_info->parsing_flags,
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6) ?
+ SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
+ }
+
+
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
@@ -459,7 +447,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
- frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+ if (fp->mode == TPA_MODE_GRO)
+ frag_len = min_t(u32, frag_size, (u32)full_page);
+ else /* LRO */
+ frag_len = min_t(u32, frag_size,
+ (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
@@ -475,9 +468,21 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
/* Add one frag and update the appropriate fields in the skb */
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ if (fp->mode == TPA_MODE_LRO)
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ else { /* GRO */
+ int rem;
+ int offset = 0;
+ for (rem = frag_len; rem > 0; rem -= gro_size) {
+ int len = rem > gro_size ? gro_size : rem;
+ skb_fill_page_desc(skb, frag_id++,
+ old_rx_pg.page, offset, len);
+ if (offset)
+ get_page(old_rx_pg.page);
+ offset += len;
+ }
+ }
skb->data_len += frag_len;
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
@@ -489,18 +494,17 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct eth_end_agg_rx_cqe *cqe,
- u16 cqe_idx)
+static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
{
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u32 pad = tpa_info->placement_offset;
+ u8 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
struct sk_buff *skb = NULL;
- u8 *data = rx_buf->data;
- /* alloc new skb */
- u8 *new_data;
+ u8 *new_data, *data = rx_buf->data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -525,8 +529,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
- BNX2X_ERR("skb_put is about to fail... "
- "pad %d len %d rx_buf_size %d\n",
+ BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
@@ -540,13 +543,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
+ skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
napi_gro_receive(&fp->napi, skb);
} else {
- DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
- " - dropping packet!\n");
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate new pages - dropping packet!\n");
dev_kfree_skb_any(skb);
}
@@ -605,7 +609,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
- u16 len, pad;
+ u16 len, pad, queue;
u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
@@ -622,28 +626,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
- DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
- " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
+ DP(NETIF_MSG_RX_STATUS,
+ "CQE type %x err %x status %x queue %x vlan %x len %u\n",
+ CQE_TYPE(cqe_fp_flags),
cqe_fp_flags, cqe_fp->status_flags,
le32_to_cpu(cqe_fp->rss_hash_result),
- le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+ le16_to_cpu(cqe_fp->vlan_tag),
+ le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
/* is this a slowpath msg? */
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
}
+
rx_buf = &fp->rx_buf_ring[bd_cons];
data = rx_buf->data;
if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ struct bnx2x_agg_info *tpa_info;
+ u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
if (fp->disable_tpa &&
(CQE_TYPE_START(cqe_fp_type) ||
CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
+ BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
CQE_TYPE(cqe_fp_type));
#endif
@@ -656,28 +664,38 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bnx2x_tpa_start(fp, queue,
bd_cons, bd_prod,
cqe_fp);
+
goto next_rx;
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ }
+ queue = cqe->end_agg_cqe.queue_index;
+ tpa_info = &fp->tpa_info[queue];
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
+ tpa_info->len_on_bd;
+
+ if (fp->mode == TPA_MODE_GRO)
+ pages = (frag_size + tpa_info->full_page - 1) /
+ tpa_info->full_page;
+ else
+ pages = SGE_PAGE_ALIGN(frag_size) >>
+ SGE_PAGE_SHIFT;
+
+ bnx2x_tpa_stop(bp, fp, tpa_info, pages,
+ &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
+ goto next_cqe;
}
/* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
+ len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
pad = cqe_fp->placement_offset;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
@@ -687,7 +705,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
prefetch(data + pad); /* speedup eth_type_trans() */
/* is this an error packet? */
if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
- DP(NETIF_MSG_RX_ERR,
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
fp->eth_q_stats.rx_err_discard_pkt++;
@@ -701,7 +719,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
(len <= RX_COPY_THRESH)) {
skb = netdev_alloc_skb_ip_align(bp->dev, len);
if (skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
@@ -722,9 +740,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
skb_reserve(skb, pad);
} else {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped because "
- "of alloc failure\n");
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+ "ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
@@ -793,8 +810,8 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
struct bnx2x *bp = fp->bp;
u8 cos;
- DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
- "[fp %d fw_sd %d igusb %d]\n",
+ DP(NETIF_MSG_INTR,
+ "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1007,10 +1024,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
GFP_ATOMIC);
if (!first_buf->data) {
- BNX2X_ERR("Failed to allocate TPA "
- "skb pool for queue[%d] - "
- "disabling TPA on this "
- "queue!\n", j);
+ BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
+ j);
bnx2x_free_tpa_pool(bp, fp, i);
fp->disable_tpa = 1;
break;
@@ -1030,10 +1045,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx sges\n", i);
- BNX2X_ERR("disabling TPA for "
- "queue[%d]\n", j);
+ BNX2X_ERR("was only able to allocate %d rx sges\n",
+ i);
+ BNX2X_ERR("disabling TPA for queue[%d]\n",
+ j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp,
ring_prod);
@@ -1188,8 +1203,8 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
for_each_eth_queue(bp, i) {
if (nvecs == offset)
return;
- DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
- "irq\n", i, bp->msix_table[offset].vector);
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
+ i, bp->msix_table[offset].vector);
free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
}
@@ -1211,21 +1226,21 @@ int bnx2x_enable_msix(struct bnx2x *bp)
int msix_vec = 0, i, rc, req_cnt;
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
bp->msix_table[0].entry);
msix_vec++;
#ifdef BCM_CNIC
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
/* We need separate vectors for ETH queues only (not FCoE) */
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
- "(fastpath #%u)\n", msix_vec, msix_vec, i);
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
+ msix_vec, msix_vec, i);
msix_vec++;
}
@@ -1241,14 +1256,12 @@ int bnx2x_enable_msix(struct bnx2x *bp)
/* how less vectors we will have? */
int diff = req_cnt - rc;
- DP(NETIF_MSG_IFUP,
- "Trying to use less MSI-X vectors: %d\n", rc);
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
if (rc) {
- DP(NETIF_MSG_IFUP,
- "MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
return rc;
}
/*
@@ -1256,13 +1269,13 @@ int bnx2x_enable_msix(struct bnx2x *bp)
*/
bp->num_queues -= diff;
- DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
bp->num_queues);
} else if (rc) {
/* fall to INTx if not enough memory */
if (rc == -ENOMEM)
bp->flags |= DISABLE_MSI_FLAG;
- DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -1305,8 +1318,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_PRESENT;
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
- " ... fp[%d] %d\n",
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
bp->msix_table[0].vector,
0, bp->msix_table[offset].vector,
i - 1, bp->msix_table[offset + i - 1].vector);
@@ -1320,7 +1332,7 @@ int bnx2x_enable_msi(struct bnx2x *bp)
rc = pci_enable_msi(bp->pdev);
if (rc) {
- DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+ BNX2X_DEV_INFO("MSI is not attainable\n");
return -1;
}
bp->flags |= USING_MSI_FLAG;
@@ -1441,8 +1453,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
}
#ifdef BCM_CNIC
- /* override in ISCSI SD mod */
- if (IS_MF_ISCSI_SD(bp))
+ /* override in STORAGE SD mode */
+ if (IS_MF_STORAGE_SD(bp))
bp->num_queues = 1;
#endif
/* Add special queues */
@@ -1497,7 +1509,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
return rc;
}
- DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
tx, rx);
return rc;
@@ -1562,7 +1574,7 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
{
- struct bnx2x_config_rss_params params = {0};
+ struct bnx2x_config_rss_params params = {NULL};
int i;
/* Although RSS is meaningless when there is a single HW queue we
@@ -1625,7 +1637,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -1646,7 +1658,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1678,8 +1690,8 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Add a DEL command... */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
- BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
- "object: %d\n", rc);
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
+ rc);
/* ...and wait until all pending commands are cleared */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
@@ -1717,8 +1729,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
int i, rc;
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't load NIC when there is panic\n");
return -EPERM;
+ }
#endif
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
@@ -1738,6 +1752,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
* allocated only once, fp index, max_cos, bp pointer.
* Also set fp->disable_tpa.
*/
+ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
@@ -1766,12 +1781,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_napi_enable(bp);
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
/* Send LOAD_REQUEST command to MCP
* Returns the type of LOAD command:
* if it is the first port to be initialized
* common blocks should be initialized, otherwise - not
*/
if (!BP_NOMCP(bp)) {
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
@@ -1779,9 +1809,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("Driver load refused\n");
rc = -EBUSY; /* other port in diagnostic mode */
LOAD_ERROR_EXIT(bp, load_error1);
}
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ BNX2X_ERR("bnx2x with FW %x already loaded, "
+ "which mismatches my %x FW. aborting",
+ loaded_fw, my_fw);
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+ }
} else {
int path = BP_PATH(bp);
@@ -1816,7 +1870,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} else
bp->port.pmf = 0;
- DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
/* Init Function state controlling object */
bnx2x__init_func_obj(bp);
@@ -1832,6 +1886,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
+ BNX2X_ERR("IRQs setup failed\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
@@ -1882,21 +1937,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
}
rc = bnx2x_init_rss_pf(bp);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
rc = bnx2x_set_eth_mac(bp, true);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
if (bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
@@ -1948,7 +2009,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
- bnx2x_inc_load_cnt(bp);
+
+ /* mark driver is loaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+ DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
/* Wait for all pending SP commands to complete */
if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
@@ -1988,6 +2057,8 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+ /* clear pf_load status, as it was already set */
+ bnx2x_clear_pf_load(bp);
load_error0:
bnx2x_free_mem(bp);
@@ -2001,6 +2072,14 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
int i;
bool global = false;
+ /* mark driver is unloaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
@@ -2015,8 +2094,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_release_leader_lock(bp);
smp_mb();
- DP(NETIF_MSG_HW, "Releasing a leadership...\n");
-
+ DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
+ BNX2X_ERR("Can't unload in closed or error state\n");
return -EINVAL;
}
@@ -2045,6 +2124,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_drv_pulse(bp);
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
@@ -2108,7 +2188,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
return 0;
@@ -2120,7 +2200,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/* If there is no power capability, silently succeed */
if (!bp->pm_cap) {
- DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+ BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
@@ -2161,6 +2241,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
default:
+ dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
return -EINVAL;
}
return 0;
@@ -2230,7 +2311,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
napi_complete(napi);
/* Re-enable interrupts */
- DP(NETIF_MSG_HW,
+ DP(NETIF_MSG_RX_STATUS,
"Update index to %d\n", fp->fp_hc_idx);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
le16_to_cpu(fp->fp_hc_idx),
@@ -2264,9 +2345,8 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
- "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
- h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -2406,8 +2486,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
exit_lbl:
if (unlikely(to_copy))
DP(NETIF_MSG_TX_QUEUED,
- "Linearization IS REQUIRED for %s packet. "
- "num_frags %d hlen %d first_bd_sz %d\n",
+ "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
(xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
@@ -2615,7 +2694,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
/* enable this debug print to view the transmission queue being used
- DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
+ DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
/* locate the fastpath and the txdata */
@@ -2623,8 +2702,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata = &fp->txdata[txdata_index];
/* enable this debug print to view the tranmission details
- DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
- " tx_data ptr %p fp pointer %p\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
@@ -2635,8 +2714,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
- "protocol(%x,%x) gso type %x xmit_type %x\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
@@ -2658,8 +2737,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Statistics of linearization */
bp->lin_cnt++;
if (skb_linearize(skb) != 0) {
- DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
- "silently dropping this SKB\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB linearization failed - silently dropping this SKB\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2669,8 +2748,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
- "silently dropping this SKB\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB mapping failed - silently dropping this SKB\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2765,8 +2844,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
- DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
- " nbytes %d flags %x vlan %x\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
@@ -2809,8 +2888,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
unsigned int pkts_compl = 0, bytes_compl = 0;
- DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
- "dropping packet...\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "Unable to map page - dropping packet...\n");
/* we need unmap all buffers already mapped
* for this SKB;
@@ -2866,8 +2945,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e1x)
DP(NETIF_MSG_TX_QUEUED,
- "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
- " tcp_flags %x xsum %x seq %u hlen %u\n",
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
@@ -2943,23 +3021,22 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* requested to support too many traffic classes */
if (num_tc > bp->max_cos) {
- DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
- " requested: %d. max supported is %d\n",
- num_tc, bp->max_cos);
+ BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
+ num_tc, bp->max_cos);
return -EINVAL;
}
/* declare amount of supported traffic classes */
if (netdev_set_num_tc(dev, num_tc)) {
- DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
- num_tc);
+ BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
return -EINVAL;
}
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
- DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping priority %d to tc %d\n",
prio, bp->prio_to_cos[prio]);
}
@@ -2979,7 +3056,8 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
count = BNX2X_NUM_ETH_QUEUES(bp);
offset = cos * MAX_TXQS_PER_COS;
netdev_set_tc_queue(dev, cos, count, offset);
- DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping tc %d to offset %d count %d\n",
cos, offset, count);
}
@@ -2993,12 +3071,16 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
+ BNX2X_ERR("Requested MAC address is not valid\n");
return -EINVAL;
+ }
#ifdef BCM_CNIC
- if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
+ BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
return -EINVAL;
+ }
#endif
if (netif_running(dev)) {
@@ -3007,6 +3089,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3071,7 +3154,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- DP(BNX2X_MSG_SP,
+ DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
fp_index, cos, txdata->cid);
@@ -3116,15 +3199,22 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
int rx_ring_size = 0;
#ifdef BCM_CNIC
- if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
+ if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
} else
#endif
if (!bp->rx_ring_size) {
+ u32 cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+ /* Dercease ring size for 1G functions */
+ if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SGMII)
+ rx_ring_size /= 10;
+
/* allocate at least number of buffers required by FW */
rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA, rx_ring_size);
@@ -3163,8 +3253,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- DP(BNX2X_MSG_SP, "allocating tx memory of "
- "fp %d cos %d\n",
+ DP(NETIF_MSG_IFUP,
+ "allocating tx memory of fp %d cos %d\n",
index, cos);
BNX2X_ALLOC(txdata->tx_buf_ring,
@@ -3401,6 +3491,7 @@ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
cp->fcoe_wwn_port_name_lo);
break;
default:
+ BNX2X_ERR("Wrong WWN type requested - %d\n", type);
return -EINVAL;
}
@@ -3414,13 +3505,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ BNX2X_ERR("Can't perform change MTU during parity recovery\n");
return -EAGAIN;
}
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ BNX2X_ERR("Can't support requested MTU size\n");
return -EINVAL;
+ }
/* This does not race with packet allocation
* because the actual alloc size is
@@ -3428,17 +3521,21 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ bp->gro_check = bnx2x_need_gro_check(new_mtu);
+
return bnx2x_reload_if_running(dev);
}
netdev_features_t bnx2x_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
features &= ~NETIF_F_LRO;
+ features &= ~NETIF_F_GRO;
+ }
return features;
}
@@ -3454,6 +3551,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
else
flags &= ~TPA_ENABLE_FLAG;
+ if (features & NETIF_F_GRO)
+ flags |= GRO_ENABLE_FLAG;
+ else
+ flags &= ~GRO_ENABLE_FLAG;
+
if (features & NETIF_F_LOOPBACK) {
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
@@ -3541,7 +3643,7 @@ int bnx2x_resume(struct pci_dev *pdev)
bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -3557,8 +3659,6 @@ int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- /* Since the chip was reset, clear the FW sequence number */
- bp->fw_seq = 0;
rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();
@@ -3588,8 +3688,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
REG_WR8(bp, addr, ticks);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
- port, fw_sb_id, sb_index, ticks);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
}
static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
@@ -3604,8 +3705,9 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
REG_WR16(bp, addr, flags);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
- port, fw_sb_id, sb_index, disable);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
}
void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,