diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2.c | 1022 |
1 files changed, 515 insertions, 507 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index d4310700c7a7..a1adfaf87f49 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -71,7 +71,7 @@ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (5*HZ) -static char version[] __devinitdata = +static char version[] = "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); @@ -106,7 +106,7 @@ typedef enum { /* indexed by board_t, above */ static struct { char *name; -} board_info[] __devinitdata = { +} board_info[] = { { "Broadcom NetXtreme II BCM5706 1000Base-T" }, { "HP NC370T Multifunction Gigabit Server Adapter" }, { "HP NC370i Multifunction Gigabit Server Adapter" }, @@ -260,10 +260,10 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) * needs to be skipped. */ diff = txr->tx_prod - txr->tx_cons; - if (unlikely(diff >= TX_DESC_CNT)) { + if (unlikely(diff >= BNX2_TX_DESC_CNT)) { diff &= 0xffff; - if (diff == TX_DESC_CNT) - diff = MAX_TX_DESC_CNT; + if (diff == BNX2_TX_DESC_CNT) + diff = BNX2_MAX_TX_DESC_CNT; } return bp->tx_ring_size - diff; } @@ -274,8 +274,8 @@ bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) u32 val; spin_lock_bh(&bp->indirect_lock); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); - val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); + val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); spin_unlock_bh(&bp->indirect_lock); return val; } @@ -284,8 +284,8 @@ static void bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) { spin_lock_bh(&bp->indirect_lock); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); spin_unlock_bh(&bp->indirect_lock); } @@ -306,21 +306,21 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) { offset += cid_addr; spin_lock_bh(&bp->indirect_lock); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { int i; - REG_WR(bp, BNX2_CTX_CTX_DATA, val); - REG_WR(bp, BNX2_CTX_CTX_CTRL, - offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); + BNX2_WR(bp, BNX2_CTX_CTX_DATA, val); + BNX2_WR(bp, BNX2_CTX_CTX_CTRL, + offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); for (i = 0; i < 5; i++) { - val = REG_RD(bp, BNX2_CTX_CTX_CTRL); + val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL); if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) break; udelay(5); } } else { - REG_WR(bp, BNX2_CTX_DATA_ADR, offset); - REG_WR(bp, BNX2_CTX_DATA, val); + BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); + BNX2_WR(bp, BNX2_CTX_DATA, val); } spin_unlock_bh(&bp->indirect_lock); } @@ -434,7 +434,6 @@ struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) return cp; } -EXPORT_SYMBOL(bnx2_cnic_probe); static void bnx2_cnic_stop(struct bnx2 *bp) @@ -494,11 +493,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) int i, ret; if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); udelay(40); } @@ -506,16 +505,16 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) val1 = (bp->phy_addr << 21) | (reg << 16) | BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | BNX2_EMAC_MDIO_COMM_START_BUSY; - REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); + BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); for (i = 0; i < 50; i++) { udelay(10); - val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { udelay(5); - val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); val1 &= BNX2_EMAC_MDIO_COMM_DATA; break; @@ -532,11 +531,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) } if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); udelay(40); } @@ -551,11 +550,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) int i, ret; if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); udelay(40); } @@ -563,12 +562,12 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) val1 = (bp->phy_addr << 21) | (reg << 16) | val | BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; - REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); + BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); for (i = 0; i < 50; i++) { udelay(10); - val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { udelay(5); break; @@ -581,11 +580,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) ret = 0; if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { - val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; - REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); - REG_RD(bp, BNX2_EMAC_MDIO_MODE); + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); udelay(40); } @@ -601,10 +600,10 @@ bnx2_disable_int(struct bnx2 *bp) for (i = 0; i < bp->irq_nvecs; i++) { bnapi = &bp->bnx2_napi[i]; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); } - REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); + BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); } static void @@ -616,16 +615,16 @@ bnx2_enable_int(struct bnx2 *bp) for (i = 0; i < bp->irq_nvecs; i++) { bnapi = &bp->bnx2_napi[i]; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | - bnapi->last_status_idx); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); } - REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); + BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); } static void @@ -824,7 +823,7 @@ bnx2_free_mem(struct bnx2 *bp) for (i = 0; i < bp->ctx_pages; i++) { if (bp->ctx_blk[i]) { - dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, + dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE, bp->ctx_blk[i], bp->ctx_blk_mapping[i]); bp->ctx_blk[i] = NULL; @@ -887,13 +886,13 @@ bnx2_alloc_mem(struct bnx2 *bp) bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE; if (bp->ctx_pages == 0) bp->ctx_pages = 1; for (i = 0; i < bp->ctx_pages; i++) { bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, - BCM_PAGE_SIZE, + BNX2_PAGE_SIZE, &bp->ctx_blk_mapping[i], GFP_KERNEL); if (bp->ctx_blk[i] == NULL) @@ -1034,7 +1033,7 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp) } if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5708)) { + (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { u32 val; bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); @@ -1294,14 +1293,14 @@ bnx2_set_mac_link(struct bnx2 *bp) { u32 val; - REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); + BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); if (bp->link_up && (bp->line_speed == SPEED_1000) && (bp->duplex == DUPLEX_HALF)) { - REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); + BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); } /* Configure the EMAC mode register. */ - val = REG_RD(bp, BNX2_EMAC_MODE); + val = BNX2_RD(bp, BNX2_EMAC_MODE); val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | @@ -1310,7 +1309,7 @@ bnx2_set_mac_link(struct bnx2 *bp) if (bp->link_up) { switch (bp->line_speed) { case SPEED_10: - if (CHIP_NUM(bp) != CHIP_NUM_5706) { + if (BNX2_CHIP(bp) != BNX2_CHIP_5706) { val |= BNX2_EMAC_MODE_PORT_MII_10M; break; } @@ -1333,25 +1332,25 @@ bnx2_set_mac_link(struct bnx2 *bp) /* Set the MAC to operate in the appropriate duplex mode. */ if (bp->duplex == DUPLEX_HALF) val |= BNX2_EMAC_MODE_HALF_DUPLEX; - REG_WR(bp, BNX2_EMAC_MODE, val); + BNX2_WR(bp, BNX2_EMAC_MODE, val); /* Enable/disable rx PAUSE. */ bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; if (bp->flow_ctrl & FLOW_CTRL_RX) bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; - REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); + BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); /* Enable/disable tx PAUSE. */ - val = REG_RD(bp, BNX2_EMAC_TX_MODE); + val = BNX2_RD(bp, BNX2_EMAC_TX_MODE); val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; if (bp->flow_ctrl & FLOW_CTRL_TX) val |= BNX2_EMAC_TX_MODE_FLOW_EN; - REG_WR(bp, BNX2_EMAC_TX_MODE, val); + BNX2_WR(bp, BNX2_EMAC_TX_MODE, val); /* Acknowledge the interrupt. */ - REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); + BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); bnx2_init_all_rx_contexts(bp); } @@ -1360,7 +1359,7 @@ static void bnx2_enable_bmsr1(struct bnx2 *bp) { if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5709)) + (BNX2_CHIP(bp) == BNX2_CHIP_5709)) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); } @@ -1369,7 +1368,7 @@ static void bnx2_disable_bmsr1(struct bnx2 *bp) { if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5709)) + (BNX2_CHIP(bp) == BNX2_CHIP_5709)) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); } @@ -1386,7 +1385,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp) if (bp->autoneg & AUTONEG_SPEED) bp->advertising |= ADVERTISED_2500baseX_Full; - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_read_phy(bp, bp->mii_up1, &up1); @@ -1396,7 +1395,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp) ret = 0; } - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); @@ -1412,7 +1411,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return 0; - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_read_phy(bp, bp->mii_up1, &up1); @@ -1422,7 +1421,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) ret = 1; } - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); @@ -1438,7 +1437,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { u32 val; bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, @@ -1454,7 +1453,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) MII_BNX2_BLK_ADDR_COMBO_IEEEB0); err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); if (!err) bmcr |= BCM5708S_BMCR_FORCE_2500; @@ -1482,7 +1481,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { u32 val; bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, @@ -1496,7 +1495,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp) MII_BNX2_BLK_ADDR_COMBO_IEEEB0); err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); - } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); if (!err) bmcr &= ~BCM5708S_BMCR_FORCE_2500; @@ -1547,14 +1546,14 @@ bnx2_set_link(struct bnx2 *bp) bnx2_disable_bmsr1(bp); if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && - (CHIP_NUM(bp) == CHIP_NUM_5706)) { + (BNX2_CHIP(bp) == BNX2_CHIP_5706)) { u32 val, an_dbg; if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { bnx2_5706s_force_link_dn(bp, 0); bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; } - val = REG_RD(bp, BNX2_EMAC_STATUS); + val = BNX2_RD(bp, BNX2_EMAC_STATUS); bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); @@ -1571,11 +1570,11 @@ bnx2_set_link(struct bnx2 *bp) bp->link_up = 1; if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - if (CHIP_NUM(bp) == CHIP_NUM_5706) + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_5706s_linkup(bp); - else if (CHIP_NUM(bp) == CHIP_NUM_5708) + else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_5708s_linkup(bp); - else if (CHIP_NUM(bp) == CHIP_NUM_5709) + else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_5709s_linkup(bp); } else { @@ -1757,7 +1756,7 @@ __acquires(&bp->phy_lock) new_bmcr = bmcr & ~BMCR_ANENABLE; new_bmcr |= BMCR_SPEED1000; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { if (bp->req_line_speed == SPEED_2500) bnx2_enable_forced_2g5(bp); else if (bp->req_line_speed == SPEED_1000) { @@ -1765,7 +1764,7 @@ __acquires(&bp->phy_lock) new_bmcr &= ~0x2000; } - } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { if (bp->req_line_speed == SPEED_2500) new_bmcr |= BCM5708S_BMCR_FORCE_2500; else @@ -1942,8 +1941,8 @@ bnx2_send_heart_beat(struct bnx2 *bp) spin_lock(&bp->indirect_lock); msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); addr = bp->shmem_base + BNX2_DRV_PULSE_MB; - REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); - REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); spin_unlock(&bp->indirect_lock); } @@ -2230,9 +2229,9 @@ bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy) bnx2_write_phy(bp, BCM5708S_UP1, val); } - if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || - (CHIP_ID(bp) == CHIP_ID_5708_B0) || - (CHIP_ID(bp) == CHIP_ID_5708_B1)) { + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) { /* increase tx signal amplitude */ bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_TX_MISC); @@ -2268,8 +2267,8 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; - if (CHIP_NUM(bp) == CHIP_NUM_5706) - REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) + BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); if (bp->dev->mtu > 1500) { u32 val; @@ -2368,7 +2367,7 @@ __acquires(&bp->phy_lock) bp->mii_adv = MII_ADVERTISE; bp->mii_lpa = MII_LPA; - REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) goto setup_phy; @@ -2379,11 +2378,11 @@ __acquires(&bp->phy_lock) bp->phy_id |= val & 0xffff; if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - if (CHIP_NUM(bp) == CHIP_NUM_5706) + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) rc = bnx2_init_5706s_phy(bp, reset_phy); - else if (CHIP_NUM(bp) == CHIP_NUM_5708) + else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) rc = bnx2_init_5708s_phy(bp, reset_phy); - else if (CHIP_NUM(bp) == CHIP_NUM_5709) + else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) rc = bnx2_init_5709s_phy(bp, reset_phy); } else { @@ -2402,10 +2401,10 @@ bnx2_set_mac_loopback(struct bnx2 *bp) { u32 mac_mode; - mac_mode = REG_RD(bp, BNX2_EMAC_MODE); + mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); mac_mode &= ~BNX2_EMAC_MODE_PORT; mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; - REG_WR(bp, BNX2_EMAC_MODE, mac_mode); + BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); bp->link_up = 1; return 0; } @@ -2431,13 +2430,13 @@ bnx2_set_phy_loopback(struct bnx2 *bp) msleep(100); } - mac_mode = REG_RD(bp, BNX2_EMAC_MODE); + mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | BNX2_EMAC_MODE_25G_MODE); mac_mode |= BNX2_EMAC_MODE_PORT_GMII; - REG_WR(bp, BNX2_EMAC_MODE, mac_mode); + BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); bp->link_up = 1; return 0; } @@ -2449,7 +2448,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp) u32 mcp_p0, mcp_p1; netdev_err(dev, "<--- start MCP states dump --->\n"); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { mcp_p0 = BNX2_MCP_STATE_P0; mcp_p1 = BNX2_MCP_STATE_P1; } else { @@ -2538,10 +2537,10 @@ bnx2_init_5709_context(struct bnx2 *bp) u32 val; val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); - val |= (BCM_PAGE_BITS - 8) << 16; - REG_WR(bp, BNX2_CTX_COMMAND, val); + val |= (BNX2_PAGE_BITS - 8) << 16; + BNX2_WR(bp, BNX2_CTX_COMMAND, val); for (i = 0; i < 10; i++) { - val = REG_RD(bp, BNX2_CTX_COMMAND); + val = BNX2_RD(bp, BNX2_CTX_COMMAND); if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) break; udelay(2); @@ -2553,20 +2552,20 @@ bnx2_init_5709_context(struct bnx2 *bp) int j; if (bp->ctx_blk[i]) - memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE); + memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE); else return -ENOMEM; - REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, - (bp->ctx_blk_mapping[i] & 0xffffffff) | - BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); - REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, - (u64) bp->ctx_blk_mapping[i] >> 32); - REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | - BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (bp->ctx_blk_mapping[i] & 0xffffffff) | + BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); + BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) bp->ctx_blk_mapping[i] >> 32); + BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); for (j = 0; j < 10; j++) { - val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); + val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) break; udelay(5); @@ -2591,7 +2590,7 @@ bnx2_init_context(struct bnx2 *bp) vcid--; - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { u32 new_vcid; vcid_addr = GET_PCID_ADDR(vcid); @@ -2612,8 +2611,8 @@ bnx2_init_context(struct bnx2 *bp) vcid_addr += (i << PHY_CTX_SHIFT); pcid_addr += (i << PHY_CTX_SHIFT); - REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); - REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); + BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); + BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); /* Zero out the context. */ for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) @@ -2633,7 +2632,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) if (good_mbuf == NULL) return -ENOMEM; - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); good_mbuf_cnt = 0; @@ -2678,21 +2677,21 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) val = (mac_addr[0] << 8) | mac_addr[1]; - REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); + BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; - REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); + BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); } static inline int bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { dma_addr_t mapping; - struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; - struct rx_bd *rxbd = - &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; + struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index]; + struct bnx2_rx_bd *rxbd = + &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)]; struct page *page = alloc_page(gfp); if (!page) @@ -2714,7 +2713,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf static void bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) { - struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; + struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index]; struct page *page = rx_pg->page; if (!page) @@ -2731,9 +2730,10 @@ static inline int bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) { u8 *data; - struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; + struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index]; dma_addr_t mapping; - struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; + struct bnx2_rx_bd *rxbd = + &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)]; data = kmalloc(bp->rx_buf_size, gfp); if (!data) @@ -2770,9 +2770,9 @@ bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) old_link_state = sblk->status_attn_bits_ack & event; if (new_link_state != old_link_state) { if (new_link_state) - REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); + BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); else - REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); + BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); } else is_set = 0; @@ -2802,7 +2802,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) barrier(); cons = *bnapi->hw_tx_cons_ptr; barrier(); - if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) + if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT)) cons++; return cons; } @@ -2823,11 +2823,11 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) sw_cons = txr->tx_cons; while (sw_cons != hw_cons) { - struct sw_tx_bd *tx_buf; + struct bnx2_sw_tx_bd *tx_buf; struct sk_buff *skb; int i, last; - sw_ring_cons = TX_RING_IDX(sw_cons); + sw_ring_cons = BNX2_TX_RING_IDX(sw_cons); tx_buf = &txr->tx_buf_ring[sw_ring_cons]; skb = tx_buf->skb; @@ -2841,7 +2841,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) last_idx = sw_cons + tx_buf->nr_frags + 1; last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1; - if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { + if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) { last_idx++; } if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { @@ -2856,17 +2856,18 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) last = tx_buf->nr_frags; for (i = 0; i < last; i++) { - sw_cons = NEXT_TX_BD(sw_cons); + struct bnx2_sw_tx_bd *tx_buf; + sw_cons = BNX2_NEXT_TX_BD(sw_cons); + + tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)]; dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr( - &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], - mapping), + dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), PCI_DMA_TODEVICE); } - sw_cons = NEXT_TX_BD(sw_cons); + sw_cons = BNX2_NEXT_TX_BD(sw_cons); tx_bytes += skb->len; dev_kfree_skb(skb); @@ -2905,8 +2906,8 @@ static void bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb, int count) { - struct sw_pg *cons_rx_pg, *prod_rx_pg; - struct rx_bd *cons_bd, *prod_bd; + struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg; + struct bnx2_rx_bd *cons_bd, *prod_bd; int i; u16 hw_prod, prod; u16 cons = rxr->rx_pg_cons; @@ -2933,12 +2934,14 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, hw_prod = rxr->rx_pg_prod; for (i = 0; i < count; i++) { - prod = RX_PG_RING_IDX(hw_prod); + prod = BNX2_RX_PG_RING_IDX(hw_prod); prod_rx_pg = &rxr->rx_pg_ring[prod]; cons_rx_pg = &rxr->rx_pg_ring[cons]; - cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; - prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)] + [BNX2_RX_IDX(cons)]; + prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)] + [BNX2_RX_IDX(prod)]; if (prod != cons) { prod_rx_pg->page = cons_rx_pg->page; @@ -2950,8 +2953,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } - cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); - hw_prod = NEXT_RX_BD(hw_prod); + cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons)); + hw_prod = BNX2_NEXT_RX_BD(hw_prod); } rxr->rx_pg_prod = hw_prod; rxr->rx_pg_cons = cons; @@ -2961,8 +2964,8 @@ static inline void bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, u16 cons, u16 prod) { - struct sw_bd *cons_rx_buf, *prod_rx_buf; - struct rx_bd *cons_bd, *prod_bd; + struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf; + struct bnx2_rx_bd *cons_bd, *prod_bd; cons_rx_buf = &rxr->rx_buf_ring[cons]; prod_rx_buf = &rxr->rx_buf_ring[prod]; @@ -2981,8 +2984,8 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, dma_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr(cons_rx_buf, mapping)); - cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; - prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)]; + prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)]; prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } @@ -3022,7 +3025,7 @@ error: return skb; } else { unsigned int i, frag_len, frag_size, pages; - struct sw_pg *rx_pg; + struct bnx2_sw_pg *rx_pg; u16 pg_cons = rxr->rx_pg_cons; u16 pg_prod = rxr->rx_pg_prod; @@ -3065,7 +3068,7 @@ error: rx_pg->page = NULL; err = bnx2_alloc_rx_page(bp, rxr, - RX_PG_RING_IDX(pg_prod), + BNX2_RX_PG_RING_IDX(pg_prod), GFP_ATOMIC); if (unlikely(err)) { rxr->rx_pg_cons = pg_cons; @@ -3083,8 +3086,8 @@ error: skb->truesize += PAGE_SIZE; skb->len += frag_len; - pg_prod = NEXT_RX_BD(pg_prod); - pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); + pg_prod = BNX2_NEXT_RX_BD(pg_prod); + pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons)); } rxr->rx_pg_prod = pg_prod; rxr->rx_pg_cons = pg_cons; @@ -3101,7 +3104,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) barrier(); cons = *bnapi->hw_rx_cons_ptr; barrier(); - if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) + if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT)) cons++; return cons; } @@ -3125,13 +3128,14 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) while (sw_cons != hw_cons) { unsigned int len, hdr_len; u32 status; - struct sw_bd *rx_buf, *next_rx_buf; + struct bnx2_sw_bd *rx_buf, *next_rx_buf; struct sk_buff *skb; dma_addr_t dma_addr; u8 *data; + u16 next_ring_idx; - sw_ring_cons = RX_RING_IDX(sw_cons); - sw_ring_prod = RX_RING_IDX(sw_prod); + sw_ring_cons = BNX2_RX_RING_IDX(sw_cons); + sw_ring_prod = BNX2_RX_RING_IDX(sw_prod); rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; data = rx_buf->data; @@ -3146,8 +3150,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - next_rx_buf = - &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; + next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons)); + next_rx_buf = &rxr->rx_buf_ring[next_ring_idx]; prefetch(get_l2_fhdr(next_rx_buf->data)); len = rx_hdr->l2_fhdr_pkt_len; @@ -3239,8 +3243,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) rx_pkt++; next_rx: - sw_cons = NEXT_RX_BD(sw_cons); - sw_prod = NEXT_RX_BD(sw_prod); + sw_cons = BNX2_NEXT_RX_BD(sw_cons); + sw_prod = BNX2_NEXT_RX_BD(sw_prod); if ((rx_pkt == budget)) break; @@ -3255,11 +3259,11 @@ next_rx: rxr->rx_prod = sw_prod; if (pg_ring_used) - REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); + BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); - REG_WR16(bp, rxr->rx_bidx_addr, sw_prod); + BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod); - REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); + BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); mmiowb(); @@ -3277,7 +3281,7 @@ bnx2_msi(int irq, void *dev_instance) struct bnx2 *bp = bnapi->bp; prefetch(bnapi->status_blk.msi); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); @@ -3321,18 +3325,18 @@ bnx2_interrupt(int irq, void *dev_instance) * the status block write. */ if ((sblk->status_idx == bnapi->last_status_idx) && - (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & + (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) & BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) return IRQ_NONE; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); /* Read back to deassert IRQ immediately to avoid too many * spurious interrupts. */ - REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); + BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); /* Return here if interrupt is shared and is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) @@ -3388,14 +3392,14 @@ bnx2_chk_missed_msi(struct bnx2 *bp) u32 msi_ctrl; if (bnx2_has_work(bnapi)) { - msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); + msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL); if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) return; if (bnapi->last_status_idx == bp->idle_chk_status_idx) { - REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & - ~BNX2_PCICFG_MSI_CONTROL_ENABLE); - REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); + BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & + ~BNX2_PCICFG_MSI_CONTROL_ENABLE); + BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); bnx2_msi(bp->irq_tbl[0].vector, bnapi); } } @@ -3434,9 +3438,9 @@ static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) /* This is needed to take care of transient status * during link changes. */ - REG_WR(bp, BNX2_HC_COMMAND, - bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - REG_RD(bp, BNX2_HC_COMMAND); + BNX2_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + BNX2_RD(bp, BNX2_HC_COMMAND); } } @@ -3473,9 +3477,9 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) if (likely(!bnx2_has_fast_work(bnapi))) { napi_complete(napi); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); break; } } @@ -3511,19 +3515,19 @@ static int bnx2_poll(struct napi_struct *napi, int budget) if (likely(!bnx2_has_work(bnapi))) { napi_complete(napi); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); break; } - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | - bnapi->last_status_idx); - - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bnapi->last_status_idx); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); + + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); break; } } @@ -3561,8 +3565,8 @@ bnx2_set_rx_mode(struct net_device *dev) } else if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); } sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; } @@ -3584,8 +3588,8 @@ bnx2_set_rx_mode(struct net_device *dev) } for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - mc_filter[i]); + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + mc_filter[i]); } sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; @@ -3610,12 +3614,12 @@ bnx2_set_rx_mode(struct net_device *dev) if (rx_mode != bp->rx_mode) { bp->rx_mode = rx_mode; - REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); + BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); } - REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); - REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); spin_unlock_bh(&bp->phy_lock); } @@ -3663,10 +3667,10 @@ static int bnx2_request_uncached_firmware(struct bnx2 *bp) const struct bnx2_rv2p_fw_file *rv2p_fw; int rc; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { mips_fw_file = FW_MIPS_FILE_09; - if ((CHIP_ID(bp) == CHIP_ID_5709_A0) || - (CHIP_ID(bp) == CHIP_ID_5709_A1)) + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1)) rv2p_fw_file = FW_RV2P_FILE_09_Ax; else rv2p_fw_file = FW_RV2P_FILE_09; @@ -3756,13 +3760,13 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, } for (i = 0; i < rv2p_code_len; i += 8) { - REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); + BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); rv2p_code++; - REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); + BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); rv2p_code++; val = (i / 8) | cmd; - REG_WR(bp, addr, val); + BNX2_WR(bp, addr, val); } rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); @@ -3772,22 +3776,22 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, loc = be32_to_cpu(fw_entry->fixup[i]); if (loc && ((loc * 4) < rv2p_code_len)) { code = be32_to_cpu(*(rv2p_code + loc - 1)); - REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code); + BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code); code = be32_to_cpu(*(rv2p_code + loc)); code = rv2p_fw_fixup(rv2p_proc, i, loc, code); - REG_WR(bp, BNX2_RV2P_INSTR_LOW, code); + BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code); val = (loc / 2) | cmd; - REG_WR(bp, addr, val); + BNX2_WR(bp, addr, val); } } /* Reset the processor, un-stall is done later. */ if (rv2p_proc == RV2P_PROC1) { - REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); + BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); } else { - REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); + BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); } return 0; @@ -3924,14 +3928,14 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) /* delay required during transition out of D3hot */ msleep(20); - val = REG_RD(bp, BNX2_EMAC_MODE); + val = BNX2_RD(bp, BNX2_EMAC_MODE); val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; val &= ~BNX2_EMAC_MODE_MPKT; - REG_WR(bp, BNX2_EMAC_MODE, val); + BNX2_WR(bp, BNX2_EMAC_MODE, val); - val = REG_RD(bp, BNX2_RPM_CONFIG); + val = BNX2_RD(bp, BNX2_RPM_CONFIG); val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - REG_WR(bp, BNX2_RPM_CONFIG, val); + BNX2_WR(bp, BNX2_RPM_CONFIG, val); break; } case PCI_D3hot: { @@ -3963,7 +3967,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - val = REG_RD(bp, BNX2_EMAC_MODE); + val = BNX2_RD(bp, BNX2_EMAC_MODE); /* Enable port mode. */ val &= ~BNX2_EMAC_MODE_PORT; @@ -3978,32 +3982,32 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) val |= BNX2_EMAC_MODE_25G_MODE; } - REG_WR(bp, BNX2_EMAC_MODE, val); + BNX2_WR(bp, BNX2_EMAC_MODE, val); /* receive all multicast */ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); } - REG_WR(bp, BNX2_EMAC_RX_MODE, - BNX2_EMAC_RX_MODE_SORT_MODE); + BNX2_WR(bp, BNX2_EMAC_RX_MODE, + BNX2_EMAC_RX_MODE_SORT_MODE); val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN; - REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - REG_WR(bp, BNX2_RPM_SORT_USER0, val); - REG_WR(bp, BNX2_RPM_SORT_USER0, val | - BNX2_RPM_SORT_USER0_ENA); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | + BNX2_RPM_SORT_USER0_ENA); /* Need to enable EMAC and RPM for WOL. */ - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); - val = REG_RD(bp, BNX2_RPM_CONFIG); + val = BNX2_RD(bp, BNX2_RPM_CONFIG); val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - REG_WR(bp, BNX2_RPM_CONFIG, val); + BNX2_WR(bp, BNX2_RPM_CONFIG, val); wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; } @@ -4016,8 +4020,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) 1, 0); pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) { + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { if (bp->wol) pmcsr |= 3; @@ -4050,9 +4054,9 @@ bnx2_acquire_nvram_lock(struct bnx2 *bp) int j; /* Request access to the flash interface. */ - REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); + BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - val = REG_RD(bp, BNX2_NVM_SW_ARB); + val = BNX2_RD(bp, BNX2_NVM_SW_ARB); if (val & BNX2_NVM_SW_ARB_ARB_ARB2) break; @@ -4072,10 +4076,10 @@ bnx2_release_nvram_lock(struct bnx2 *bp) u32 val; /* Relinquish nvram interface. */ - REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); + BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { - val = REG_RD(bp, BNX2_NVM_SW_ARB); + val = BNX2_RD(bp, BNX2_NVM_SW_ARB); if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) break; @@ -4094,20 +4098,20 @@ bnx2_enable_nvram_write(struct bnx2 *bp) { u32 val; - val = REG_RD(bp, BNX2_MISC_CFG); - REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); + val = BNX2_RD(bp, BNX2_MISC_CFG); + BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); if (bp->flash_info->flags & BNX2_NV_WREN) { int j; - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); - REG_WR(bp, BNX2_NVM_COMMAND, - BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + BNX2_WR(bp, BNX2_NVM_COMMAND, + BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { udelay(5); - val = REG_RD(bp, BNX2_NVM_COMMAND); + val = BNX2_RD(bp, BNX2_NVM_COMMAND); if (val & BNX2_NVM_COMMAND_DONE) break; } @@ -4123,8 +4127,8 @@ bnx2_disable_nvram_write(struct bnx2 *bp) { u32 val; - val = REG_RD(bp, BNX2_MISC_CFG); - REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); + val = BNX2_RD(bp, BNX2_MISC_CFG); + BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); } @@ -4133,10 +4137,10 @@ bnx2_enable_nvram_access(struct bnx2 *bp) { u32 val; - val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); + val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); /* Enable both bits, even on read. */ - REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, - val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); + BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, + val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); } static void @@ -4144,9 +4148,9 @@ bnx2_disable_nvram_access(struct bnx2 *bp) { u32 val; - val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); + val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); /* Disable both bits, even after read. */ - REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, + BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, val & ~(BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN)); } @@ -4166,13 +4170,13 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) BNX2_NVM_COMMAND_DOIT; /* Need to clear DONE bit separately. */ - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); /* Address of the NVRAM to read from. */ - REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); /* Issue an erase command. */ - REG_WR(bp, BNX2_NVM_COMMAND, cmd); + BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { @@ -4180,7 +4184,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) udelay(5); - val = REG_RD(bp, BNX2_NVM_COMMAND); + val = BNX2_RD(bp, BNX2_NVM_COMMAND); if (val & BNX2_NVM_COMMAND_DONE) break; } @@ -4208,13 +4212,13 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) } /* Need to clear DONE bit separately. */ - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); /* Address of the NVRAM to read from. */ - REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); /* Issue a read command. */ - REG_WR(bp, BNX2_NVM_COMMAND, cmd); + BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { @@ -4222,9 +4226,9 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) udelay(5); - val = REG_RD(bp, BNX2_NVM_COMMAND); + val = BNX2_RD(bp, BNX2_NVM_COMMAND); if (val & BNX2_NVM_COMMAND_DONE) { - __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ)); + __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ)); memcpy(ret_val, &v, 4); break; } @@ -4254,24 +4258,24 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) } /* Need to clear DONE bit separately. */ - REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); memcpy(&val32, val, 4); /* Write the data. */ - REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); + BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); /* Address of the NVRAM to write to. */ - REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); /* Issue the write command. */ - REG_WR(bp, BNX2_NVM_COMMAND, cmd); + BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); /* Wait for completion. */ for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { udelay(5); - if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) + if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) break; } if (j >= NVRAM_TIMEOUT_COUNT) @@ -4287,13 +4291,13 @@ bnx2_init_nvram(struct bnx2 *bp) int j, entry_count, rc = 0; const struct flash_spec *flash; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bp->flash_info = &flash_5709; goto get_flash_size; } /* Determine the selected interface. */ - val = REG_RD(bp, BNX2_NVM_CFG1); + val = BNX2_RD(bp, BNX2_NVM_CFG1); entry_count = ARRAY_SIZE(flash_table); @@ -4332,10 +4336,10 @@ bnx2_init_nvram(struct bnx2 *bp) bnx2_enable_nvram_access(bp); /* Reconfigure the flash interface */ - REG_WR(bp, BNX2_NVM_CFG1, flash->config1); - REG_WR(bp, BNX2_NVM_CFG2, flash->config2); - REG_WR(bp, BNX2_NVM_CFG3, flash->config3); - REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); + BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1); + BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2); + BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3); + BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1); /* Disable access to flash interface */ bnx2_disable_nvram_access(bp); @@ -4696,10 +4700,10 @@ bnx2_init_fw_cap(struct bnx2 *bp) static void bnx2_setup_msix_tbl(struct bnx2 *bp) { - REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); + BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); - REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); - REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); + BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); + BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); } static int @@ -4711,24 +4715,24 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) /* Wait for the current PCI transaction to complete before * issuing a reset. */ - if ((CHIP_NUM(bp) == CHIP_NUM_5706) || - (CHIP_NUM(bp) == CHIP_NUM_5708)) { - REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, - BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | - BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | - BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | - BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); - val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); + if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || + (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { + BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, + BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); + val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); udelay(5); } else { /* 5709 */ - val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); + val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; - REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); - val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); + BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); + val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); for (i = 0; i < 100; i++) { msleep(1); - val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); + val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND)) break; } @@ -4744,17 +4748,17 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) /* Do a dummy read to force the chip to complete all current transaction * before we issue a reset. */ - val = REG_RD(bp, BNX2_MISC_ID); + val = BNX2_RD(bp, BNX2_MISC_ID); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); - REG_RD(bp, BNX2_MISC_COMMAND); + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); + BNX2_RD(bp, BNX2_MISC_COMMAND); udelay(5); val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; - REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); } else { val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | @@ -4762,19 +4766,19 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; /* Chip reset. */ - REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); /* Reading back any register after chip reset will hang the * bus on 5706 A0 and A1. The msleep below provides plenty * of margin for write posting. */ - if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1)) + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) msleep(20); /* Reset takes approximate 30 usec */ for (i = 0; i < 10; i++) { - val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); + val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG); if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) break; @@ -4789,7 +4793,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) } /* Make sure byte swapping is properly configured. */ - val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); + val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0); if (val != 0x01020304) { pr_err("Chip not in correct endian mode\n"); return -ENODEV; @@ -4808,10 +4812,10 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) bnx2_set_default_remote_link(bp); spin_unlock_bh(&bp->phy_lock); - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { /* Adjust the voltage regular to two steps lower. The default * of this register is 0x0000000e. */ - REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); + BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); /* Remove bad rbuf memory from the free pool. */ rc = bnx2_alloc_bad_rbuf(bp); @@ -4820,7 +4824,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_setup_msix_tbl(bp); /* Prevent MSIX table reads and write from timing out */ - REG_WR(bp, BNX2_MISC_ECO_HW_CTL, + BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL, BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); } @@ -4834,7 +4838,7 @@ bnx2_init_chip(struct bnx2 *bp) int rc, i; /* Make sure the interrupt is not active. */ - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | BNX2_DMA_CONFIG_DATA_WORD_SWAP | @@ -4850,16 +4854,17 @@ bnx2_init_chip(struct bnx2 *bp) if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) val |= (1 << 23); - if ((CHIP_NUM(bp) == CHIP_NUM_5706) && - (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX)) + if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) && + (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) && + !(bp->flags & BNX2_FLAG_PCIX)) val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; - REG_WR(bp, BNX2_DMA_CONFIG, val); + BNX2_WR(bp, BNX2_DMA_CONFIG, val); - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - val = REG_RD(bp, BNX2_TDMA_CONFIG); + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + val = BNX2_RD(bp, BNX2_TDMA_CONFIG); val |= BNX2_TDMA_CONFIG_ONE_DMA; - REG_WR(bp, BNX2_TDMA_CONFIG, val); + BNX2_WR(bp, BNX2_TDMA_CONFIG, val); } if (bp->flags & BNX2_FLAG_PCIX) { @@ -4871,14 +4876,14 @@ bnx2_init_chip(struct bnx2 *bp) val16 & ~PCI_X_CMD_ERO); } - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | - BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | - BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | + BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | + BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); /* Initialize context mapping and zero out the quick contexts. The * context block must have already been enabled. */ - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { rc = bnx2_init_5709_context(bp); if (rc) return rc; @@ -4892,29 +4897,29 @@ bnx2_init_chip(struct bnx2 *bp) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); - val = REG_RD(bp, BNX2_MQ_CONFIG); + val = BNX2_RD(bp, BNX2_MQ_CONFIG); val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; - if (CHIP_REV(bp) == CHIP_REV_Ax) + if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) val |= BNX2_MQ_CONFIG_HALT_DIS; } - REG_WR(bp, BNX2_MQ_CONFIG, val); + BNX2_WR(bp, BNX2_MQ_CONFIG, val); val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); - REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); - REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); + BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); + BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val); - val = (BCM_PAGE_BITS - 8) << 24; - REG_WR(bp, BNX2_RV2P_CONFIG, val); + val = (BNX2_PAGE_BITS - 8) << 24; + BNX2_WR(bp, BNX2_RV2P_CONFIG, val); /* Configure page size. */ - val = REG_RD(bp, BNX2_TBDR_CONFIG); + val = BNX2_RD(bp, BNX2_TBDR_CONFIG); val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; - val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; - REG_WR(bp, BNX2_TBDR_CONFIG, val); + val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40; + BNX2_WR(bp, BNX2_TBDR_CONFIG, val); val = bp->mac_addr[0] + (bp->mac_addr[1] << 8) + @@ -4922,14 +4927,14 @@ bnx2_init_chip(struct bnx2 *bp) bp->mac_addr[3] + (bp->mac_addr[4] << 8) + (bp->mac_addr[5] << 16); - REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); + BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); /* Program the MTU. Also include 4 bytes for CRC32. */ mtu = bp->dev->mtu; val = mtu + ETH_HLEN + ETH_FCS_LEN; if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; - REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); + BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); if (mtu < 1500) mtu = 1500; @@ -4947,43 +4952,43 @@ bnx2_init_chip(struct bnx2 *bp) bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; /* Set up how to generate a link change interrupt. */ - REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); - REG_WR(bp, BNX2_HC_STATUS_ADDR_L, - (u64) bp->status_blk_mapping & 0xffffffff); - REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); + BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L, + (u64) bp->status_blk_mapping & 0xffffffff); + BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); - REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, - (u64) bp->stats_blk_mapping & 0xffffffff); - REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, - (u64) bp->stats_blk_mapping >> 32); + BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L, + (u64) bp->stats_blk_mapping & 0xffffffff); + BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H, + (u64) bp->stats_blk_mapping >> 32); - REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, - (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); + BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, + (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); - REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, - (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); + BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, + (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); - REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, - (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); + BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP, + (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); - REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); + BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); - REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); + BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); - REG_WR(bp, BNX2_HC_COM_TICKS, - (bp->com_ticks_int << 16) | bp->com_ticks); + BNX2_WR(bp, BNX2_HC_COM_TICKS, + (bp->com_ticks_int << 16) | bp->com_ticks); - REG_WR(bp, BNX2_HC_CMD_TICKS, - (bp->cmd_ticks_int << 16) | bp->cmd_ticks); + BNX2_WR(bp, BNX2_HC_CMD_TICKS, + (bp->cmd_ticks_int << 16) | bp->cmd_ticks); if (bp->flags & BNX2_FLAG_BROKEN_STATS) - REG_WR(bp, BNX2_HC_STATS_TICKS, 0); + BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0); else - REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); - REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ + BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); + BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ - if (CHIP_ID(bp) == CHIP_ID_5706_A1) + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) val = BNX2_HC_CONFIG_COLLECT_STATS; else { val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | @@ -4991,8 +4996,8 @@ bnx2_init_chip(struct bnx2 *bp) } if (bp->flags & BNX2_FLAG_USING_MSIX) { - REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, - BNX2_HC_MSIX_BIT_VECTOR_VAL); + BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, + BNX2_HC_MSIX_BIT_VECTOR_VAL); val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; } @@ -5000,7 +5005,7 @@ bnx2_init_chip(struct bnx2 *bp) if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM; - REG_WR(bp, BNX2_HC_CONFIG, val); + BNX2_WR(bp, BNX2_HC_CONFIG, val); if (bp->rx_ticks < 25) bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); @@ -5011,48 +5016,48 @@ bnx2_init_chip(struct bnx2 *bp) u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + BNX2_HC_SB_CONFIG_1; - REG_WR(bp, base, + BNX2_WR(bp, base, BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | BNX2_HC_SB_CONFIG_1_RX_TMR_MODE | BNX2_HC_SB_CONFIG_1_ONE_SHOT); - REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, + BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); - REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF, + BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF, (bp->tx_ticks_int << 16) | bp->tx_ticks); - REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, - (bp->rx_quick_cons_trip_int << 16) | + BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, + (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); - REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF, + BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF, (bp->rx_ticks_int << 16) | bp->rx_ticks); } /* Clear internal stats counters. */ - REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); + BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); - REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); + BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); /* Initialize the receive filter. */ bnx2_set_rx_mode(bp->dev); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; - REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); + BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); } rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 1, 0); - REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); - REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); + BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS); udelay(20); - bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); + bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND); return rc; } @@ -5086,7 +5091,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) u32 val, offset0, offset1, offset2, offset3; u32 cid_addr = GET_CID_ADDR(cid); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { offset0 = BNX2_L2CTX_TYPE_XI; offset1 = BNX2_L2CTX_CMD_TYPE_XI; offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; @@ -5113,7 +5118,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) static void bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) { - struct tx_bd *txbd; + struct bnx2_tx_bd *txbd; u32 cid = TX_CID; struct bnx2_napi *bnapi; struct bnx2_tx_ring_info *txr; @@ -5128,7 +5133,7 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) bp->tx_wake_thresh = bp->tx_ring_size / 2; - txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT]; + txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT]; txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32; txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff; @@ -5143,17 +5148,17 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) } static void -bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size, - int num_rings) +bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[], + u32 buf_size, int num_rings) { int i; - struct rx_bd *rxbd; + struct bnx2_rx_bd *rxbd; for (i = 0; i < num_rings; i++) { int j; rxbd = &rx_ring[i][0]; - for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { + for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) { rxbd->rx_bd_len = buf_size; rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; } @@ -5187,9 +5192,9 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) bnx2_init_rx_context(bp, cid); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - val = REG_RD(bp, BNX2_MQ_MAP_L2_5); - REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5); + BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); } bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); @@ -5208,8 +5213,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff; bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); - if (CHIP_NUM(bp) == CHIP_NUM_5709) - REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); } val = (u64) rxr->rx_desc_mapping[0] >> 32; @@ -5225,8 +5230,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_num, i, bp->rx_pg_ring_size); break; } - prod = NEXT_RX_BD(prod); - ring_prod = RX_PG_RING_IDX(prod); + prod = BNX2_NEXT_RX_BD(prod); + ring_prod = BNX2_RX_PG_RING_IDX(prod); } rxr->rx_pg_prod = prod; @@ -5237,8 +5242,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) ring_num, i, bp->rx_ring_size); break; } - prod = NEXT_RX_BD(prod); - ring_prod = RX_RING_IDX(prod); + prod = BNX2_NEXT_RX_BD(prod); + ring_prod = BNX2_RX_RING_IDX(prod); } rxr->rx_prod = prod; @@ -5246,10 +5251,10 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ; rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX; - REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); - REG_WR16(bp, rxr->rx_bidx_addr, prod); + BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); + BNX2_WR16(bp, rxr->rx_bidx_addr, prod); - REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); + BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); } static void @@ -5260,15 +5265,15 @@ bnx2_init_all_rings(struct bnx2 *bp) bnx2_clear_ring_states(bp); - REG_WR(bp, BNX2_TSCH_TSS_CFG, 0); + BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0); for (i = 0; i < bp->num_tx_rings; i++) bnx2_init_tx_ring(bp, i); if (bp->num_tx_rings > 1) - REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | - (TX_TSS_CID << 7)); + BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | + (TX_TSS_CID << 7)); - REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); + BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); for (i = 0; i < bp->num_rx_rings; i++) @@ -5282,8 +5287,8 @@ bnx2_init_all_rings(struct bnx2 *bp) tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; if ((i % 8) == 7) { - REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); - REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | + BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); + BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK | BNX2_RLUP_RSS_COMMAND_WRITE | BNX2_RLUP_RSS_COMMAND_HASH_MASK); @@ -5294,7 +5299,7 @@ bnx2_init_all_rings(struct bnx2 *bp) val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI; - REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val); + BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val); } } @@ -5303,8 +5308,8 @@ static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) { u32 max, num_rings = 1; - while (ring_size > MAX_RX_DESC_CNT) { - ring_size -= MAX_RX_DESC_CNT; + while (ring_size > BNX2_MAX_RX_DESC_CNT) { + ring_size -= BNX2_MAX_RX_DESC_CNT; num_rings++; } /* round to next power of 2 */ @@ -5337,13 +5342,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; jumbo_size = size * pages; - if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT) - jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT; + if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT) + jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT; bp->rx_pg_ring_size = jumbo_size; bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, - MAX_RX_PG_RINGS); - bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; + BNX2_MAX_RX_PG_RINGS); + bp->rx_max_pg_ring_idx = + (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1; rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET; bp->rx_copy_thresh = 0; } @@ -5354,8 +5360,8 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bp->rx_ring_size = size; - bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); - bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; + bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS); + bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1; } static void @@ -5371,13 +5377,13 @@ bnx2_free_tx_skbs(struct bnx2 *bp) if (txr->tx_buf_ring == NULL) continue; - for (j = 0; j < TX_DESC_CNT; ) { - struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; + for (j = 0; j < BNX2_TX_DESC_CNT; ) { + struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct sk_buff *skb = tx_buf->skb; int k, last; if (skb == NULL) { - j = NEXT_TX_BD(j); + j = BNX2_NEXT_TX_BD(j); continue; } @@ -5389,9 +5395,9 @@ bnx2_free_tx_skbs(struct bnx2 *bp) tx_buf->skb = NULL; last = tx_buf->nr_frags; - j = NEXT_TX_BD(j); - for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { - tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; + j = BNX2_NEXT_TX_BD(j); + for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) { + tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[k]), @@ -5417,7 +5423,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) return; for (j = 0; j < bp->rx_max_ring_idx; j++) { - struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; + struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j]; u8 *data = rx_buf->data; if (data == NULL) @@ -5615,7 +5621,7 @@ bnx2_test_registers(struct bnx2 *bp) ret = 0; is_5709 = 0; - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) is_5709 = 1; for (i = 0; reg_tbl[i].offset != 0xffff; i++) { @@ -5714,7 +5720,7 @@ bnx2_test_memory(struct bnx2 *bp) }; struct mem_entry *mem_tbl; - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) mem_tbl = mem_tbl_5709; else mem_tbl = mem_tbl_5706; @@ -5741,8 +5747,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) unsigned char *packet; u16 rx_start_idx, rx_idx; dma_addr_t map; - struct tx_bd *txbd; - struct sw_bd *rx_buf; + struct bnx2_tx_bd *txbd; + struct bnx2_sw_bd *rx_buf; struct l2_fhdr *rx_hdr; int ret = -ENODEV; struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; @@ -5784,17 +5790,17 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) return -EIO; } - REG_WR(bp, BNX2_HC_COMMAND, - bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + BNX2_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - REG_RD(bp, BNX2_HC_COMMAND); + BNX2_RD(bp, BNX2_HC_COMMAND); udelay(5); rx_start_idx = bnx2_get_hw_rx_cons(bnapi); num_pkts = 0; - txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)]; + txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)]; txbd->tx_bd_haddr_hi = (u64) map >> 32; txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; @@ -5802,18 +5808,18 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; num_pkts++; - txr->tx_prod = NEXT_TX_BD(txr->tx_prod); + txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod); txr->tx_prod_bseq += pkt_size; - REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); - REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); + BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); + BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); udelay(100); - REG_WR(bp, BNX2_HC_COMMAND, - bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + BNX2_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - REG_RD(bp, BNX2_HC_COMMAND); + BNX2_RD(bp, BNX2_HC_COMMAND); udelay(5); @@ -5962,14 +5968,14 @@ bnx2_test_intr(struct bnx2 *bp) if (!netif_running(bp->dev)) return -ENODEV; - status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; + status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; /* This register is not touched during run-time. */ - REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); - REG_RD(bp, BNX2_HC_COMMAND); + BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); + BNX2_RD(bp, BNX2_HC_COMMAND); for (i = 0; i < 10; i++) { - if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != + if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != status_idx) { break; @@ -6132,11 +6138,11 @@ bnx2_timer(unsigned long data) /* workaround occasional corrupted counters */ if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) - REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | - BNX2_HC_COMMAND_STATS_NOW); + BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | + BNX2_HC_COMMAND_STATS_NOW); if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { - if (CHIP_NUM(bp) == CHIP_NUM_5706) + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_5706_serdes_timer(bp); else bnx2_5708_serdes_timer(bp); @@ -6205,13 +6211,13 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) const int len = sizeof(bp->irq_tbl[0].name); bnx2_setup_msix_tbl(bp); - REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); - REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); - REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); + BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); + BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); + BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); /* Need to flush the previous three writes to ensure MSI-X * is setup properly */ - REG_RD(bp, BNX2_PCI_MSIX_CONTROL); + BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL); for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { msix_ent[i].entry = i; @@ -6274,7 +6280,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) !(bp->flags & BNX2_FLAG_USING_MSIX)) { if (pci_enable_msi(bp->pdev) == 0) { bp->flags |= BNX2_FLAG_USING_MSI; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; bp->irq_tbl[0].handler = bnx2_msi_1shot; } else @@ -6464,22 +6470,22 @@ bnx2_dump_ftq(struct bnx2 *bp) netdev_err(dev, "<--- end FTQ dump --->\n"); netdev_err(dev, "<--- start TBDC dump --->\n"); netdev_err(dev, "TBDC free cnt: %ld\n", - REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); + BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); netdev_err(dev, "LINE CID BIDX CMD VALIDS\n"); for (i = 0; i < 0x20; i++) { int j = 0; - REG_WR(bp, BNX2_TBDC_BD_ADDR, i); - REG_WR(bp, BNX2_TBDC_CAM_OPCODE, - BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ); - REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); - while ((REG_RD(bp, BNX2_TBDC_COMMAND) & + BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i); + BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE, + BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ); + BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); + while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) & BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100) j++; - cid = REG_RD(bp, BNX2_TBDC_CID); - bdidx = REG_RD(bp, BNX2_TBDC_BIDX); - valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE); + cid = BNX2_RD(bp, BNX2_TBDC_CID); + bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX); + valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE); netdev_err(dev, "%02x %06x %04lx %02x [%x]\n", i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX, bdidx >> 24, (valid >> 8) & 0x0ff); @@ -6500,15 +6506,15 @@ bnx2_dump_state(struct bnx2 *bp) pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", - REG_RD(bp, BNX2_EMAC_TX_STATUS), - REG_RD(bp, BNX2_EMAC_RX_STATUS)); + BNX2_RD(bp, BNX2_EMAC_TX_STATUS), + BNX2_RD(bp, BNX2_EMAC_RX_STATUS)); netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n", - REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); + BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", - REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); + BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); if (bp->flags & BNX2_FLAG_USING_MSIX) netdev_err(dev, "DEBUG: PBA[%08x]\n", - REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); + BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); } static void @@ -6533,8 +6539,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnx2 *bp = netdev_priv(dev); dma_addr_t mapping; - struct tx_bd *txbd; - struct sw_tx_bd *tx_buf; + struct bnx2_tx_bd *txbd; + struct bnx2_sw_tx_bd *tx_buf; u32 len, vlan_tag_flags, last_frag, mss; u16 prod, ring_prod; int i; @@ -6557,7 +6563,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } len = skb_headlen(skb); prod = txr->tx_prod; - ring_prod = TX_RING_IDX(prod); + ring_prod = BNX2_TX_RING_IDX(prod); vlan_tag_flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -6627,8 +6633,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < last_frag; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - prod = NEXT_TX_BD(prod); - ring_prod = TX_RING_IDX(prod); + prod = BNX2_NEXT_TX_BD(prod); + ring_prod = BNX2_TX_RING_IDX(prod); txbd = &txr->tx_desc_ring[ring_prod]; len = skb_frag_size(frag); @@ -6652,11 +6658,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(txq, skb->len); - prod = NEXT_TX_BD(prod); + prod = BNX2_NEXT_TX_BD(prod); txr->tx_prod_bseq += skb->len; - REG_WR16(bp, txr->tx_bidx_addr, prod); - REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); + BNX2_WR16(bp, txr->tx_bidx_addr, prod); + BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); mmiowb(); @@ -6682,7 +6688,7 @@ dma_error: /* start back at beginning and unmap skb */ prod = txr->tx_prod; - ring_prod = TX_RING_IDX(prod); + ring_prod = BNX2_TX_RING_IDX(prod); tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), @@ -6690,8 +6696,8 @@ dma_error: /* unmap remaining mapped pages */ for (i = 0; i < last_frag; i++) { - prod = NEXT_TX_BD(prod); - ring_prod = TX_RING_IDX(prod); + prod = BNX2_NEXT_TX_BD(prod); + ring_prod = BNX2_TX_RING_IDX(prod); tx_buf = &txr->tx_buf_ring[ring_prod]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), @@ -6810,8 +6816,8 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) + GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions); - if ((CHIP_NUM(bp) == CHIP_NUM_5706) || - (CHIP_ID(bp) == CHIP_ID_5708_A0)) + if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) net_stats->tx_carrier_errors = 0; else { net_stats->tx_carrier_errors = @@ -7030,7 +7036,7 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) offset = reg_boundaries[0]; p += offset; while (offset < BNX2_REGDUMP_LEN) { - *p++ = REG_RD(bp, offset); + *p++ = BNX2_RD(bp, offset); offset += 4; if (offset == reg_boundaries[i + 1]) { offset = reg_boundaries[i + 2]; @@ -7254,13 +7260,13 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bnx2 *bp = netdev_priv(dev); - ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; - ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT; + ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT; + ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT; ering->rx_pending = bp->rx_ring_size; ering->rx_jumbo_pending = bp->rx_pg_ring_size; - ering->tx_max_pending = MAX_TX_DESC_CNT; + ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT; ering->tx_pending = bp->tx_ring_size; } @@ -7326,8 +7332,8 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) struct bnx2 *bp = netdev_priv(dev); int rc; - if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || - (ering->tx_pending > MAX_TX_DESC_CNT) || + if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) || + (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) || (ering->tx_pending <= MAX_SKB_FRAGS)) { return -EINVAL; @@ -7614,10 +7620,10 @@ bnx2_get_ethtool_stats(struct net_device *dev, return; } - if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || - (CHIP_ID(bp) == CHIP_ID_5706_A1) || - (CHIP_ID(bp) == CHIP_ID_5706_A2) || - (CHIP_ID(bp) == CHIP_ID_5708_A0)) + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) stats_len_arr = bnx2_5706_stats_len_arr; else stats_len_arr = bnx2_5708_stats_len_arr; @@ -7655,26 +7661,26 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) case ETHTOOL_ID_ACTIVE: bnx2_set_power_state(bp, PCI_D0); - bp->leds_save = REG_RD(bp, BNX2_MISC_CFG); - REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); + bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); + BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: - REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | - BNX2_EMAC_LED_1000MB_OVERRIDE | - BNX2_EMAC_LED_100MB_OVERRIDE | - BNX2_EMAC_LED_10MB_OVERRIDE | - BNX2_EMAC_LED_TRAFFIC_OVERRIDE | - BNX2_EMAC_LED_TRAFFIC); + BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | + BNX2_EMAC_LED_1000MB_OVERRIDE | + BNX2_EMAC_LED_100MB_OVERRIDE | + BNX2_EMAC_LED_10MB_OVERRIDE | + BNX2_EMAC_LED_TRAFFIC_OVERRIDE | + BNX2_EMAC_LED_TRAFFIC); break; case ETHTOOL_ID_OFF: - REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); + BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); break; case ETHTOOL_ID_INACTIVE: - REG_WR(bp, BNX2_EMAC_LED, 0); - REG_WR(bp, BNX2_MISC_CFG, bp->leds_save); + BNX2_WR(bp, BNX2_EMAC_LED, 0); + BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); if (!netif_running(dev)) bnx2_set_power_state(bp, PCI_D3hot); @@ -7896,10 +7902,10 @@ poll_bnx2(struct net_device *dev) } #endif -static void __devinit +static void bnx2_get_5709_media(struct bnx2 *bp) { - u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); + u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; u32 strap; @@ -7934,18 +7940,18 @@ bnx2_get_5709_media(struct bnx2 *bp) } } -static void __devinit +static void bnx2_get_pci_speed(struct bnx2 *bp) { u32 reg; - reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); + reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS); if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { u32 clkreg; bp->flags |= BNX2_FLAG_PCIX; - clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); + clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; switch (clkreg) { @@ -7986,7 +7992,7 @@ bnx2_get_pci_speed(struct bnx2 *bp) } -static void __devinit +static void bnx2_read_vpd_fw_ver(struct bnx2 *bp) { int rc, i, j; @@ -8054,7 +8060,7 @@ vpd_done: kfree(data); } -static int __devinit +static int bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) { struct bnx2 *bp; @@ -8131,20 +8137,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) * Rely on CPU to do target byte swapping on big endian systems * The chip's target access swapping will not swap all accesses */ - REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, - BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | - BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); + BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, + BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); - bp->chip_id = REG_RD(bp, BNX2_MISC_ID); + bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID); - if (CHIP_NUM(bp) == CHIP_NUM_5709) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Not PCIE, aborting\n"); rc = -EIO; goto err_out_unmap; } bp->flags |= BNX2_FLAG_PCIE; - if (CHIP_REV(bp) == CHIP_REV_Ax) + if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) bp->flags |= BNX2_FLAG_JUMBO_BROKEN; /* AER (Advanced Error Reporting) hooks */ @@ -8163,18 +8169,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->flags |= BNX2_FLAG_BROKEN_STATS; } - if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && + BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) bp->flags |= BNX2_FLAG_MSIX_CAP; } - if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) { + if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && + BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) bp->flags |= BNX2_FLAG_MSI_CAP; } /* 5708 cannot support DMA addresses > 40-bit. */ - if (CHIP_NUM(bp) == CHIP_NUM_5708) + if (BNX2_CHIP(bp) == BNX2_CHIP_5708) persist_dma_mask = dma_mask = DMA_BIT_MASK(40); else persist_dma_mask = dma_mask = DMA_BIT_MASK(64); @@ -8197,12 +8205,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bnx2_get_pci_speed(bp); /* 5706A0 may falsely detect SERR and PERR. */ - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { - reg = REG_RD(bp, PCI_COMMAND); + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + reg = BNX2_RD(bp, PCI_COMMAND); reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); - REG_WR(bp, PCI_COMMAND, reg); - } - else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && + BNX2_WR(bp, PCI_COMMAND, reg); + } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && !(bp->flags & BNX2_FLAG_PCIX)) { dev_err(&pdev->dev, @@ -8299,7 +8306,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->mac_addr[4] = (u8) (reg >> 8); bp->mac_addr[5] = (u8) reg; - bp->tx_ring_size = MAX_TX_DESC_CNT; + bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT; bnx2_set_rx_ring_size(bp, 255); bp->tx_quick_cons_trip_int = 2; @@ -8319,9 +8326,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->phy_addr = 1; /* Disable WOL support if we are running on a SERDES chip. */ - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_get_5709_media(bp); - else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) + else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT) bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bp->phy_port = PORT_TP; @@ -8332,7 +8339,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->flags |= BNX2_FLAG_NO_WOL; bp->wol = 0; } - if (CHIP_NUM(bp) == CHIP_NUM_5706) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) { /* Don't do parallel detect on this board because of * some board problems. The link will not go down * if we do parallel detect. @@ -8345,25 +8352,25 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; } - } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || - CHIP_NUM(bp) == CHIP_NUM_5708) + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 || + BNX2_CHIP(bp) == BNX2_CHIP_5708) bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; - else if (CHIP_NUM(bp) == CHIP_NUM_5709 && - (CHIP_REV(bp) == CHIP_REV_Ax || - CHIP_REV(bp) == CHIP_REV_Bx)) + else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && + (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax || + BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx)) bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; bnx2_init_fw_cap(bp); - if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || - (CHIP_ID(bp) == CHIP_ID_5708_B0) || - (CHIP_ID(bp) == CHIP_ID_5708_B1) || - !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) || + !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { bp->flags |= BNX2_FLAG_NO_WOL; bp->wol = 0; } - if (CHIP_ID(bp) == CHIP_ID_5706_A0) { + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bp->tx_quick_cons_trip_int = bp->tx_quick_cons_trip; bp->tx_ticks_int = bp->tx_ticks; @@ -8385,7 +8392,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) * AMD believes this incompatibility is unique to the 5706, and * prefers to locally disable MSI rather than globally disabling it. */ - if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) { struct pci_dev *amd_8132 = NULL; while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, @@ -8414,6 +8421,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->cnic_eth_dev.max_iscsi_conn = (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT; + bp->cnic_probe = bnx2_cnic_probe; #endif pci_save_state(pdev); @@ -8439,7 +8447,7 @@ err_out: return rc; } -static char * __devinit +static char * bnx2_bus_string(struct bnx2 *bp, char *str) { char *s = str; @@ -8505,7 +8513,7 @@ static const struct net_device_ops bnx2_netdev_ops = { #endif }; -static int __devinit +static int bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed = 0; @@ -8541,7 +8549,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_RXHASH | NETIF_F_RXCSUM; - if (CHIP_NUM(bp) == CHIP_NUM_5709) + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; dev->vlan_features = dev->hw_features; @@ -8556,8 +8564,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, " "node addr %pM\n", board_info[ent->driver_data].name, - ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', - ((CHIP_ID(bp) & 0x0ff0) >> 4), + ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A', + ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4), bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0), pdev->irq, dev->dev_addr); @@ -8573,7 +8581,7 @@ err_free: return rc; } -static void __devexit +static void bnx2_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -8752,7 +8760,7 @@ static struct pci_driver bnx2_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnx2_pci_tbl, .probe = bnx2_init_one, - .remove = __devexit_p(bnx2_remove_one), + .remove = bnx2_remove_one, .suspend = bnx2_suspend, .resume = bnx2_resume, .err_handler = &bnx2_err_handler, |