summaryrefslogtreecommitdiff
path: root/drivers/net/bnx2x/bnx2x_cmn.c
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2011-01-10 01:46:24 +0300
committerJames Morris <jmorris@namei.org>2011-01-10 01:46:24 +0300
commitd2e7ad19229f982fc1eb731827d82ceac90abfb3 (patch)
tree98a3741b4d4b27a48b3c7ea9babe331e539416a8 /drivers/net/bnx2x/bnx2x_cmn.c
parentd03a5d888fb688c832d470b749acc5ed38e0bc1d (diff)
parent0c21e3aaf6ae85bee804a325aa29c325209180fd (diff)
downloadlinux-d2e7ad19229f982fc1eb731827d82ceac90abfb3.tar.xz
Merge branch 'master' into next
Conflicts: security/smack/smack_lsm.c Verified and added fix by Stephen Rothwell <sfr@canb.auug.org.au> Ok'd by Casey Schaufler <casey@schaufler-ca.com> Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c197
1 files changed, 149 insertions, 48 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 94d5f59d5a6f..710ce5d04c53 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -698,6 +698,29 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
mutex_unlock(&bp->port.phy_mutex);
}
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+ u16 line_speed = bp->link_vars.line_speed;
+ if (IS_MF(bp)) {
+ u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ /* Calculate the current MAX line speed limit for the DCC
+ * capable devices
+ */
+ if (IS_MF_SD(bp)) {
+ u16 vn_max_rate = maxCfg * 100;
+
+ if (vn_max_rate < line_speed)
+ line_speed = vn_max_rate;
+ } else /* IS_MF_SI(bp)) */
+ line_speed = (line_speed * maxCfg) / 100;
+ }
+
+ return line_speed;
+}
+
void bnx2x_link_report(struct bnx2x *bp)
{
if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +736,8 @@ void bnx2x_link_report(struct bnx2x *bp)
netif_carrier_on(bp->dev);
netdev_info(bp->dev, "NIC Link is Up, ");
- line_speed = bp->link_vars.line_speed;
- if (IS_MF(bp)) {
- u16 vn_max_rate;
+ line_speed = bnx2x_get_mf_speed(bp);
- vn_max_rate =
- ((bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
- if (vn_max_rate < line_speed)
- line_speed = vn_max_rate;
- }
pr_cont("%d Mbps ", line_speed);
if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -813,7 +827,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
if (!fp->disable_tpa) {
@@ -866,7 +880,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
@@ -897,7 +911,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i) {
+ for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
u16 bd_cons = fp->tx_bd_cons;
@@ -915,7 +929,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
int i, j;
- for_each_queue(bp, j) {
+ for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 0; i < NUM_RX_BD; i++) {
@@ -956,7 +970,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
"state %x\n", i, bp->msix_table[i + offset].vector,
bnx2x_fp(bp, i, state));
@@ -990,14 +1004,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
"(fastpath #%u)\n", msix_vec, msix_vec, i);
msix_vec++;
}
- req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@@ -1053,7 +1067,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
- for_each_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
bp->dev->name, i);
@@ -1070,7 +1084,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
fp->state = BNX2X_FP_STATE_IRQ;
}
- i = BNX2X_NUM_QUEUES(bp);
+ i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_CONTEXT_USE;
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
" ... fp[%d] %d\n",
@@ -1117,7 +1131,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
@@ -1125,7 +1139,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_queue(bp, i)
+ for_each_napi_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
@@ -1153,6 +1167,35 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
netif_tx_disable(bp->dev);
}
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef BCM_CNIC
+ struct bnx2x *bp = netdev_priv(dev);
+ if (NO_FCOE(bp))
+ return skb_tx_hash(dev, skb);
+ else {
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ /* Skip VLAN tag if present */
+ if (ether_type == ETH_P_8021Q) {
+ struct vlan_ethhdr *vhdr =
+ (struct vlan_ethhdr *)skb->data;
+
+ ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+ }
+
+ /* If ethertype is FCoE or FIP - use FCoE ring */
+ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+ return bnx2x_fcoe(bp, index);
+ }
+#endif
+ /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
+ */
+ return __skb_tx_hash(dev, skb,
+ dev->real_num_tx_queues - FCOE_CONTEXT_USE);
+}
+
void bnx2x_set_num_queues(struct bnx2x *bp)
{
switch (bp->multi_mode) {
@@ -1167,8 +1210,23 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
break;
}
+
+ /* Add special queues */
+ bp->num_queues += NONE_ETH_CONTEXT_USE;
}
+#ifdef BCM_CNIC
+static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
+{
+ if (!NO_FCOE(bp)) {
+ if (!IS_MF_SD(bp))
+ bnx2x_set_fip_eth_mac_addr(bp, 1);
+ bnx2x_set_all_enode_macs(bp, 1);
+ bp->flags |= FCOE_MACS_SET;
+ }
+}
+#endif
+
static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
@@ -1177,6 +1235,20 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
release_firmware(bp->firmware);
}
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+ int rc, num = bp->num_queues;
+
+#ifdef BCM_CNIC
+ if (NO_FCOE(bp))
+ num -= FCOE_CONTEXT_USE;
+
+#endif
+ netif_set_real_num_tx_queues(bp->dev, num);
+ rc = netif_set_real_num_rx_queues(bp->dev, num);
+ return rc;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@@ -1203,10 +1275,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
- netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
- rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
+ rc = bnx2x_set_real_num_queues(bp);
if (rc) {
- BNX2X_ERR("Unable to update real_num_rx_queues\n");
+ BNX2X_ERR("Unable to set real_num_queues\n");
goto load_error0;
}
@@ -1214,6 +1285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
+#ifdef BCM_CNIC
+ /* We don't want TPA on FCoE L2 ring */
+ bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
@@ -1296,6 +1371,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
+ bnx2x_dcbx_init(bp);
+
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
rc = bnx2x_func_start(bp);
@@ -1344,6 +1421,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
+#ifdef BCM_CNIC
+ bnx2x_set_fcoe_eth_macs(bp);
+#endif
+
bnx2x_set_eth_mac(bp, 1);
if (bp->port.pmf)
@@ -1402,7 +1483,7 @@ load_error3:
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
/* Release IRQs */
@@ -1473,7 +1554,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
- for_each_queue(bp, i)
+ for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
@@ -1577,6 +1658,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+ /* No need to update SB for FCoE L2 ring as long as
+ * it's connected to the default SB and the SB
+ * has been updated when NAPI was scheduled.
+ */
+ if (IS_FCOE_FP(fp)) {
+ napi_complete(napi);
+ break;
+ }
+#endif
+
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
@@ -1692,11 +1784,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
}
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (skb_is_gso_v6(skb))
+ rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+ else if (skb_is_gso(skb))
+ rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
return rc;
}
@@ -1782,15 +1873,15 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
- ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+ *parsing_data |= (skb_shinfo(skb)->gso_size <<
+ ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+ ETH_TX_PARSE_BD_E2_LSO_MSS;
if ((xmit_type & XMIT_GSO_V6) &&
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+ *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
/**
@@ -1835,15 +1926,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @return header len
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e2 *pbd,
- u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
- pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+ *parsing_data |= ((tcp_hdrlen(skb)/4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
- skb->data) / 2) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+ *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
}
@@ -1912,6 +2003,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, fp_index;
dma_addr_t mapping;
@@ -2033,8 +2125,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
- hlen = bnx2x_set_pbd_csum_e2(bp,
- skb, pbd_e2, xmit_type);
+ hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
} else {
pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2169,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
hlen, bd_prod, ++nbd);
if (CHIP_IS_E2(bp))
- bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+ bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+ xmit_type);
else
bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
}
+
+ /* Set the PBD's parsing_data field if not zero
+ * (for the chips newer than 57711).
+ */
+ if (pbd_e2_parsing_data)
+ pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
/* Handle fragmented skb */
@@ -2232,7 +2333,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* msix table */
- tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
+ tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
GFP_KERNEL);
if (!tbl)
goto alloc_err;