From d6a98c9680a6f885715ce64daa5de73b33d3edd4 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 16 May 2013 01:15:41 +0000 Subject: drivers/net/ethernet/renesas: don't check resource with devm_ioremap_resource devm_ioremap_resource does sanity checks on the given resource. No need to duplicate this in the driver. Signed-off-by: Wolfram Sang Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 33dc6f2418f2..42e9dd05c936 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2745,11 +2745,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (mdp->cd->tsu) { struct resource *rtsu; rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!rtsu) { - dev_err(&pdev->dev, "Not found TSU resource\n"); - ret = -ENODEV; - goto out_release; - } mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); if (IS_ERR(mdp->tsu_addr)) { ret = PTR_ERR(mdp->tsu_addr); -- cgit v1.2.3 From ed8a83a1f4655a31e0080a19f08d896bb29434a9 Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Thu, 16 May 2013 06:24:41 +0000 Subject: net: 3com: 3c509: remove unnecessary code This patch removes unnecessary #if 0 code from 3c509.c Signed-off-by: govindarajulu.v Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c509.c | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index adb4bf5eb4b4..ede8daa68275 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -723,25 +723,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", dev->name, skb->len, inw(ioaddr + EL3_STATUS)); } -#if 0 -#ifndef final_version - { /* Error-checking code, delete someday. */ - ushort status = inw(ioaddr + EL3_STATUS); - if (status & 0x0001 && /* IRQ line active, missed one. */ - inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ - pr_debug("%s: Missed interrupt, status then %04x now %04x" - " Tx %2.2x Rx %4.4x.\n", dev->name, status, - inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), - inw(ioaddr + RX_STATUS)); - /* Fake interrupt trigger by masking, acknowledge interrupts. */ - outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); - outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, - ioaddr + EL3_CMD); - outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); - } - } -#endif -#endif /* * We lock the driver against other processors. Note * we don't need to lock versus the IRQ as we suspended -- cgit v1.2.3 From bfd428daaf619d671f6463bd8c7e4df8107e4775 Mon Sep 17 00:00:00 2001 From: Emilio López Date: Fri, 17 May 2013 10:42:53 +0000 Subject: net: ethernet: sun: initialize variables directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the code a bit to initialize the variables directly when defining them. Signed-off-by: Emilio López Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunbmac.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 054975939a18..09b4f8c0b199 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1017,10 +1017,7 @@ static void bigmac_set_multicast(struct net_device *dev) tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { - u16 hash_table[4]; - - for (i = 0; i < 4; i++) - hash_table[i] = 0; + u16 hash_table[4] = { 0 }; netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); -- cgit v1.2.3 From 3b0aaef800c0949198fb6304adc7ff1ae7413f02 Mon Sep 17 00:00:00 2001 From: Emilio López Date: Fri, 17 May 2013 10:42:54 +0000 Subject: net: ethernet: apple: initialize variables directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the code a bit to initialize the variables directly when defining them. Signed-off-by: Emilio López Signed-off-by: David S. Miller --- drivers/net/ethernet/apple/bmac.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index f36bbd6d5085..714dcfe3a469 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1030,14 +1030,12 @@ static void bmac_set_multicast(struct net_device *dev) rx_cfg |= RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); } else { - u16 hash_table[4]; + u16 hash_table[4] = { 0 }; rx_cfg = bmread(dev, RXCFG); rx_cfg &= ~RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); - for(i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; -- cgit v1.2.3 From 35e729ac5d5c1a164aee475583cb9830e9707b61 Mon Sep 17 00:00:00 2001 From: Emilio López Date: Fri, 17 May 2013 10:42:55 +0000 Subject: net: ethernet: davicom: dm9000: initialize variables directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the code a bit to initialize the variables directly when defining them. Signed-off-by: Emilio López Signed-off-by: David S. Miller --- drivers/net/ethernet/davicom/dm9000.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9105465b2a1a..a2408c84bbdd 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -827,7 +827,7 @@ dm9000_hash_table_unlocked(struct net_device *dev) struct netdev_hw_addr *ha; int i, oft; u32 hash_val; - u16 hash_table[4]; + u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; dm9000_dbg(db, 1, "entering %s\n", __func__); @@ -835,13 +835,6 @@ dm9000_hash_table_unlocked(struct net_device *dev) for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) iow(db, oft, dev->dev_addr[i]); - /* Clear Hash Table */ - for (i = 0; i < 4; i++) - hash_table[i] = 0x0; - - /* broadcast address */ - hash_table[3] = 0x8000; - if (dev->flags & IFF_PROMISC) rcr |= RCR_PRMSC; -- cgit v1.2.3 From e998fd413e5e7dac750dfe79c79cbf8f417d41b7 Mon Sep 17 00:00:00 2001 From: Emilio López Date: Fri, 17 May 2013 10:42:56 +0000 Subject: net: ethernet: korina: initialize variables directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the code a bit to initialize the variables directly when defining them. Signed-off-by: Emilio López Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 5409fe876a44..0b5708520121 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -495,12 +495,9 @@ static void korina_multicast_list(struct net_device *dev) /* Build the hash table */ if (netdev_mc_count(dev) > 4) { - u16 hash_table[4]; + u16 hash_table[4] = { 0 }; u32 crc; - for (i = 0; i < 4; i++) - hash_table[i] = 0; - netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; -- cgit v1.2.3 From 9e2ecbeb250dee67f4bd06a18637c120b48a4865 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Sat, 18 May 2013 06:26:52 +0000 Subject: tg3: Add ethtool_eee struct and tg3_setup_eee() Add an eee structure and update it with eee settings. This will be used for set/get_eee operations. Add common function tg3_setup_eee() that will be used in the subsequent patches. Reviewed-by: Ben Li Signed-off-by: Michael Chan Signed-off-by: Nithin Nayak Sujir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 91 +++++++++++++++++++++++-------------- drivers/net/ethernet/broadcom/tg3.h | 1 + 2 files changed, 59 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 728d42ab2a76..26804db1f884 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4249,6 +4249,16 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) /* Advertise 1000-BaseT EEE ability */ if (advertise & ADVERTISED_1000baseT_Full) val |= MDIO_AN_EEE_ADV_1000T; + + if (!tp->eee.eee_enabled) { + val = 0; + tp->eee.advertised = 0; + } else { + tp->eee.advertised = advertise & + (ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; @@ -4613,6 +4623,42 @@ static void tg3_clear_mac_status(struct tg3 *tp) udelay(40); } +static void tg3_setup_eee(struct tg3 *tp) +{ + u32 val; + + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (tg3_asic_rev(tp) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + (tp->eee.tx_lpi_timer & 0xffff)); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); +} + static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) { bool current_link_up; @@ -9448,38 +9494,8 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) tg3_abort_hw(tp, 1); /* Enable MAC control of LPI */ - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { - val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | - TG3_CPMU_EEE_LNKIDL_UART_IDL; - if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) - val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; - - tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); - - tw32_f(TG3_CPMU_EEE_CTRL, - TG3_CPMU_EEE_CTRL_EXIT_20_1_US); - - val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | - TG3_CPMU_EEEMD_LPI_IN_TX | - TG3_CPMU_EEEMD_LPI_IN_RX | - TG3_CPMU_EEEMD_EEE_ENABLE; - - if (tg3_asic_rev(tp) != ASIC_REV_5717) - val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; - - if (tg3_flag(tp, ENABLE_APE)) - val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; - - tw32_f(TG3_CPMU_EEE_MODE, val); - - tw32_f(TG3_CPMU_EEE_DBTMR1, - TG3_CPMU_DBTMR1_PCIEXIT_2047US | - TG3_CPMU_DBTMR1_LNKIDLE_2047US); - - tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR2_APE_TX_2047US | - TG3_CPMU_DBTMR2_TXIDXEQ_2047US); - } + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { @@ -14946,9 +14962,18 @@ static int tg3_phy_probe(struct tg3 *tp) (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || (tg3_asic_rev(tp) == ASIC_REV_57765 && - tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + tp->eee.supported = SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + tp->eee.advertised = ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full; + tp->eee.eee_enabled = 1; + tp->eee.tx_lpi_enabled = 1; + tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; + } + tg3_phy_init_link_config(tp); if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 9b2d3ac2474a..057532c5c56c 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3371,6 +3371,7 @@ struct tg3 { unsigned int irq_cnt; struct ethtool_coalesce coal; + struct ethtool_eee eee; /* firmware info */ const char *fw_needed; -- cgit v1.2.3 From 400dfbaa8d444a29056b051a3d7082dc611e3b12 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Sat, 18 May 2013 06:26:53 +0000 Subject: tg3: Add tg3_eee_pull_config() function Add tg3_eee_pull_config() to pull the settings from the hardware and populate the eee structure. If Link Flap Avoidance is enabled, we pull the eee settings from the hw so as not to cause a phy reset on eee config mismatch later. This requires moving down tg3_setup_eee() below the tg3_pull_config() to not trample existing settings. Reviewed-by: Ben Li Signed-off-by: Michael Chan Signed-off-by: Nithin Nayak Sujir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 56 +++++++++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 26804db1f884..3e7bc3837ef9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -2320,6 +2320,46 @@ static void tg3_phy_apply_otp(struct tg3 *tp) tg3_phy_toggle_auxctl_smdsp(tp, false); } +static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) +{ + u32 val; + struct ethtool_eee *dest = &tp->eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + if (eee) + dest = eee; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) + return; + + /* Pull eee_active */ + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { + dest->eee_active = 1; + } else + dest->eee_active = 0; + + /* Pull lp advertised settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) + return; + dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull advertised and eee_enabled settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return; + dest->eee_enabled = !!val; + dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull tx_lpi_enabled */ + val = tr32(TG3_CPMU_EEE_MODE); + dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); + + /* Pull lpi timer value */ + dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; +} + static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) { u32 val; @@ -2343,11 +2383,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) tw32(TG3_CPMU_EEE_CTRL, eeectl); - tg3_phy_cl45_read(tp, MDIO_MMD_AN, - TG3_CL45_D7_EEERES_STAT, &val); - - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tg3_eee_pull_config(tp, NULL); + if (tp->eee.eee_active) tp->setlpicnt = 2; } @@ -9493,16 +9530,17 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1); - /* Enable MAC control of LPI */ - if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) - tg3_setup_eee(tp); - if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { tg3_phy_pull_config(tp); + tg3_eee_pull_config(tp, NULL); tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; } + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); + if (reset_phy) tg3_phy_reset(tp); -- cgit v1.2.3 From 5b6c273ad6c3886f30c7c5df7009e489043c59f3 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Sat, 18 May 2013 06:26:54 +0000 Subject: tg3: Simplify tg3_phy_eee_config_ok() by reusing tg3_eee_pull_config() eee_config_ok() was checking only for mismatch in advertised settings. This patch expands the scope of eee_config_ok() to check for mismatch in the other eee settings. On mismatch we will require a call to tg3_setup_eee() to push the configured settings to the hardware. Reviewed-by: Ben Li Signed-off-by: Michael Chan Signed-off-by: Nithin Nayak Sujir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3e7bc3837ef9..2e4974823853 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4540,26 +4540,23 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) static bool tg3_phy_eee_config_ok(struct tg3 *tp) { - u32 val; - u32 tgtadv = 0; - u32 advertising = tp->link_config.advertising; + struct ethtool_eee eee; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return true; - if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) - return false; - - val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); - - - if (advertising & ADVERTISED_100baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_100TX; - if (advertising & ADVERTISED_1000baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_1000T; + tg3_eee_pull_config(tp, &eee); - if (val != tgtadv) - return false; + if (tp->eee.eee_enabled) { + if (tp->eee.advertised != eee.advertised || + tp->eee.tx_lpi_timer != eee.tx_lpi_timer || + tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) + return false; + } else { + /* EEE is disabled but we're advertising */ + if (eee.advertised) + return false; + } return true; } @@ -4862,8 +4859,10 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) */ if (!eee_config_ok && (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && - !force_reset) + !force_reset) { + tg3_setup_eee(tp); tg3_phy_reset(tp); + } } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && -- cgit v1.2.3 From 1cbf9eb85a6601b58f01a71ff10299d2bf5d3365 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Sat, 18 May 2013 06:26:55 +0000 Subject: tg3: Implement set/get_eee handlers Reviewed-by: Ben Li Signed-off-by: Michael Chan Signed-off-by: Nithin Nayak Sujir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 53 +++++++++++++++++++++++++++++++++++++ drivers/net/ethernet/broadcom/tg3.h | 1 + 2 files changed, 54 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2e4974823853..fb06aa120293 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -13618,6 +13618,57 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } +static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + if (edata->advertised != tp->eee.advertised) { + netdev_warn(tp->dev, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { + netdev_warn(tp->dev, + "Maximal Tx Lpi timer supported is %#x(u)\n", + TG3_CPMU_DBTMR1_LNKIDLE_MAX); + return -EINVAL; + } + + tp->eee = *edata; + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(tp->dev)) { + tg3_full_lock(tp, 0); + tg3_setup_eee(tp); + tg3_phy_reset(tp); + tg3_full_unlock(tp); + } + + return 0; +} + +static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, + "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + *edata = tp->eee; + return 0; +} + static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, @@ -13651,6 +13702,8 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_channels = tg3_get_channels, .set_channels = tg3_set_channels, .get_ts_info = tg3_get_ts_info, + .get_eee = tg3_get_eee, + .set_eee = tg3_set_eee, }; static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 057532c5c56c..2530c20dd823 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -1175,6 +1175,7 @@ #define TG3_CPMU_EEE_DBTMR1 0x000036b4 #define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 #define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff +#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff #define TG3_CPMU_EEE_DBTMR2 0x000036b8 #define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 #define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff -- cgit v1.2.3 From 8802f5790ef3e2e1d907169557e3dd2e0e77d98f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 18 May 2013 07:14:53 +0000 Subject: net-bnx2x: dont reload on GRO change bnx2x_set_features() forces a driver reload if GRO setting is changed. A reload makes the ethernet port unresponsive for about 5 seconds. This is not needed in the common case LRO is enabled, as LRO (TPA_ENABLE_FLAG) has precedence over GRO (GRO_ENABLE_FLAG) Tested: Verified that "ethtool -K eth0 gro {on|off}" doesn't blackout the NIC anymore Google-Bug-Id: 8440442 Signed-off-by: Eric Dumazet Cc: Dmitry Kravkov Acked-by: Dmitry Kravkov Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b8fbe266ab68..6cc5101fa96b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4599,6 +4599,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) { struct bnx2x *bp = netdev_priv(dev); u32 flags = bp->flags; + u32 changes; bool bnx2x_reload = false; if (features & NETIF_F_LRO) @@ -4623,10 +4624,16 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) } } - if (flags ^ bp->flags) { - bp->flags = flags; + changes = flags ^ bp->flags; + + /* if GRO is changed while LRO is enabled, dont force a reload */ + if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) + changes &= ~GRO_ENABLE_FLAG; + + if (changes) bnx2x_reload = true; - } + + bp->flags = flags; if (bnx2x_reload) { if (bp->recovery_state == BNX2X_RECOVERY_DONE) -- cgit v1.2.3 From 1e18583adccfc122b5d6415cfe4bf1826c370f4e Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Sat, 18 May 2013 11:50:17 +0000 Subject: ThunderLAN: remove is_eisa flag These 2 places are the only matches for is_eisa in the whole tree. Signed-off-by: Rolf Eike Beer Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/tlan.c | 1 - drivers/net/ethernet/ti/tlan.h | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 60c400f6d01f..59abfbcd0d55 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -533,7 +533,6 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev, /* This is a hack. We need to know which board structure * is suited for this adapter */ device_id = inw(ioaddr + EISA_ID2); - priv->is_eisa = 1; if (device_id == 0x20F1) { priv->adapter = &board_info[13]; /* NetFlex-3/E */ priv->adapter_rev = 23; /* TLAN 2.3 */ diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h index 5fc98a8e4889..2eb33a250788 100644 --- a/drivers/net/ethernet/ti/tlan.h +++ b/drivers/net/ethernet/ti/tlan.h @@ -207,7 +207,6 @@ struct tlan_priv { u8 tlan_full_duplex; spinlock_t lock; u8 link; - u8 is_eisa; struct work_struct tlan_tqueue; u8 neg_be_verbose; }; -- cgit v1.2.3 From 4fc1ad6f4b4d9214505eda434b1ed3b0bfd16981 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 19 May 2013 10:17:13 +0000 Subject: 3c59x: remove useless VORTEX_PCI() invocations It's suboptimal to invoke quite complex VORTEX_PCI() macro every time we want to get a 'struct pci_dev *' when we already have it in a variable... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 072c6f14e8fc..30e74211a755 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1473,7 +1473,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, if (pdev) { vp->pm_state_valid = 1; - pci_save_state(VORTEX_PCI(vp)); + pci_save_state(pdev); acpi_set_WOL(dev); } retval = register_netdev(dev); @@ -3233,21 +3233,20 @@ static void vortex_remove_one(struct pci_dev *pdev) vp = netdev_priv(dev); if (vp->cb_fn_base) - pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); + pci_iounmap(pdev, vp->cb_fn_base); unregister_netdev(dev); - if (VORTEX_PCI(vp)) { - pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ - if (vp->pm_state_valid) - pci_restore_state(VORTEX_PCI(vp)); - pci_disable_device(VORTEX_PCI(vp)); - } + pci_set_power_state(pdev, PCI_D0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(pdev); + pci_disable_device(pdev); + /* Should really use issue_and_wait() here */ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), vp->ioaddr + EL3_CMD); - pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); + pci_iounmap(pdev, vp->ioaddr); pci_free_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE -- cgit v1.2.3 From a9683c94fe8c8f87fdc8380a2e76506aa1502dcb Mon Sep 17 00:00:00 2001 From: Tony Prisk Date: Sat, 18 May 2013 09:39:05 +0000 Subject: net: velocity: Rename vptr->dev to vptr->netdev Improve the clarity of the code in preparation for converting the dma functions to generic versions, which require a struct device *. This makes it possible to store a 'struct device *dev' in the velocity_info structure. Signed-off-by: Tony Prisk Signed-off-by: David S. Miller --- drivers/net/ethernet/via/via-velocity.c | 66 ++++++++++++++++----------------- drivers/net/ethernet/via/via-velocity.h | 4 +- 2 files changed, 35 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index fb6248956ee2..187eef33df11 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -998,9 +998,9 @@ static void velocity_print_link_status(struct velocity_info *vptr) { if (vptr->mii_status & VELOCITY_LINK_FAIL) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); if (vptr->mii_status & VELOCITY_SPEED_1000) VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); @@ -1014,7 +1014,7 @@ static void velocity_print_link_status(struct velocity_info *vptr) else VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); } else { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); switch (vptr->options.spd_dpx) { case SPD_DPX_1000_FULL: VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); @@ -1319,7 +1319,7 @@ static void velocity_init_registers(struct velocity_info *vptr, case VELOCITY_INIT_RESET: case VELOCITY_INIT_WOL: - netif_stop_queue(vptr->dev); + netif_stop_queue(vptr->netdev); /* * Reset RX to prevent RX pointer not on the 4X location @@ -1332,7 +1332,7 @@ static void velocity_init_registers(struct velocity_info *vptr, if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) - netif_wake_queue(vptr->dev); + netif_wake_queue(vptr->netdev); } enable_flow_control_ability(vptr); @@ -1354,7 +1354,7 @@ static void velocity_init_registers(struct velocity_info *vptr, mac_eeprom_reload(regs); for (i = 0; i < 6; i++) - writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); + writeb(vptr->netdev->dev_addr[i], &(regs->PAR[i])); /* * clear Pre_ACPI bit. @@ -1377,7 +1377,7 @@ static void velocity_init_registers(struct velocity_info *vptr, /* * Set packet filter: Receive directed and broadcast address */ - velocity_set_multi(vptr->dev); + velocity_set_multi(vptr->netdev); /* * Enable MII auto-polling @@ -1404,14 +1404,14 @@ static void velocity_init_registers(struct velocity_info *vptr, writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); mii_status = velocity_get_opt_media_mode(vptr); - netif_stop_queue(vptr->dev); + netif_stop_queue(vptr->netdev); mii_init(vptr, mii_status); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) - netif_wake_queue(vptr->dev); + netif_wake_queue(vptr->netdev); } enable_flow_control_ability(vptr); @@ -1474,7 +1474,7 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) rx_ring_size, &pool_dma); if (!pool) { dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", - vptr->dev->name); + vptr->netdev->name); return -ENOMEM; } @@ -1514,7 +1514,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) struct rx_desc *rd = &(vptr->rx.ring[idx]); struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); - rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64); + rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); if (rd_info->skb == NULL) return -ENOMEM; @@ -1620,7 +1620,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) if (velocity_rx_refill(vptr) != vptr->options.numrx) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR - "%s: failed to allocate RX buffer.\n", vptr->dev->name); + "%s: failed to allocate RX buffer.\n", vptr->netdev->name); velocity_free_rd_ring(vptr); goto out; } @@ -1809,7 +1809,7 @@ static void velocity_error(struct velocity_info *vptr, int status) printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); writew(TRDCSR_RUN, ®s->TDCSRClr); - netif_stop_queue(vptr->dev); + netif_stop_queue(vptr->netdev); /* FIXME: port over the pci_device_failed code and use it here */ @@ -1850,10 +1850,10 @@ static void velocity_error(struct velocity_info *vptr, int status) if (linked) { vptr->mii_status &= ~VELOCITY_LINK_FAIL; - netif_carrier_on(vptr->dev); + netif_carrier_on(vptr->netdev); } else { vptr->mii_status |= VELOCITY_LINK_FAIL; - netif_carrier_off(vptr->dev); + netif_carrier_off(vptr->netdev); } velocity_print_link_status(vptr); @@ -1867,9 +1867,9 @@ static void velocity_error(struct velocity_info *vptr, int status) enable_mii_autopoll(regs); if (vptr->mii_status & VELOCITY_LINK_FAIL) - netif_stop_queue(vptr->dev); + netif_stop_queue(vptr->netdev); else - netif_wake_queue(vptr->dev); + netif_wake_queue(vptr->netdev); } if (status & ISR_MIBFI) @@ -1894,7 +1894,7 @@ static int velocity_tx_srv(struct velocity_info *vptr) int idx; int works = 0; struct velocity_td_info *tdinfo; - struct net_device_stats *stats = &vptr->dev->stats; + struct net_device_stats *stats = &vptr->netdev->stats; for (qnum = 0; qnum < vptr->tx.numq; qnum++) { for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; @@ -1939,9 +1939,9 @@ static int velocity_tx_srv(struct velocity_info *vptr) * Look to see if we should kick the transmit network * layer for more work. */ - if (netif_queue_stopped(vptr->dev) && (full == 0) && + if (netif_queue_stopped(vptr->netdev) && (full == 0) && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { - netif_wake_queue(vptr->dev); + netif_wake_queue(vptr->netdev); } return works; } @@ -1989,7 +1989,7 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, if (pkt_size < rx_copybreak) { struct sk_buff *new_skb; - new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); + new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); if (new_skb) { new_skb->ip_summed = rx_skb[0]->ip_summed; skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); @@ -2030,14 +2030,14 @@ static inline void velocity_iph_realign(struct velocity_info *vptr, static int velocity_receive_frame(struct velocity_info *vptr, int idx) { void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); - struct net_device_stats *stats = &vptr->dev->stats; + struct net_device_stats *stats = &vptr->netdev->stats; struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); struct rx_desc *rd = &(vptr->rx.ring[idx]); int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; struct sk_buff *skb; if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { - VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name); stats->rx_length_errors++; return -EINVAL; } @@ -2075,7 +2075,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) PCI_DMA_FROMDEVICE); skb_put(skb, pkt_len - 4); - skb->protocol = eth_type_trans(skb, vptr->dev); + skb->protocol = eth_type_trans(skb, vptr->netdev); if (rd->rdesc0.RSR & RSR_DETAG) { u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); @@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) */ static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) { - struct net_device_stats *stats = &vptr->dev->stats; + struct net_device_stats *stats = &vptr->netdev->stats; int rd_curr = vptr->rx.curr; int works = 0; @@ -2292,7 +2292,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", - vptr->dev->name); + vptr->netdev->name); ret = -EINVAL; goto out_0; } @@ -2314,7 +2314,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) goto out_0; } - tmp_vptr->dev = dev; + tmp_vptr->netdev = dev; tmp_vptr->pdev = vptr->pdev; tmp_vptr->options = vptr->options; tmp_vptr->tx.numq = vptr->tx.numq; @@ -2692,7 +2692,7 @@ static int velocity_get_pci_info(struct velocity_info *vptr, */ static void velocity_print_info(struct velocity_info *vptr) { - struct net_device *dev = vptr->dev; + struct net_device *dev = vptr->netdev; printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); printk(KERN_INFO "%s: Ethernet Address: %pM\n", @@ -2755,7 +2755,7 @@ static int velocity_found1(struct pci_dev *pdev, velocity_init_info(pdev, vptr, info); - vptr->dev = dev; + vptr->netdev = dev; ret = pci_enable_device(pdev); if (ret < 0) @@ -3010,10 +3010,10 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) struct velocity_info *vptr = netdev_priv(dev); unsigned long flags; - if (!netif_running(vptr->dev)) + if (!netif_running(vptr->netdev)) return 0; - netif_device_detach(vptr->dev); + netif_device_detach(vptr->netdev); spin_lock_irqsave(&vptr->lock, flags); pci_save_state(pdev); @@ -3078,7 +3078,7 @@ static int velocity_resume(struct pci_dev *pdev) unsigned long flags; int i; - if (!netif_running(vptr->dev)) + if (!netif_running(vptr->netdev)) return 0; pci_set_power_state(pdev, PCI_D0); @@ -3101,7 +3101,7 @@ static int velocity_resume(struct pci_dev *pdev) mac_enable_int(vptr->mac_regs); spin_unlock_irqrestore(&vptr->lock, flags); - netif_device_attach(vptr->dev); + netif_device_attach(vptr->netdev); return 0; } diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index 4cb9f13485e9..ff8d7828aa1b 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1435,7 +1435,7 @@ struct velocity_opt { struct velocity_info { struct pci_dev *pdev; - struct net_device *dev; + struct net_device *netdev; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u8 ip_addr[4]; @@ -1514,7 +1514,7 @@ static inline int velocity_get_ip(struct velocity_info *vptr) int res = -ENOENT; rcu_read_lock(); - in_dev = __in_dev_get_rcu(vptr->dev); + in_dev = __in_dev_get_rcu(vptr->netdev); if (in_dev != NULL) { ifa = (struct in_ifaddr *) in_dev->ifa_list; if (ifa != NULL) { -- cgit v1.2.3 From e2c41f143fb2db14ad9b644a515c98e6c2e5ea2d Mon Sep 17 00:00:00 2001 From: Tony Prisk Date: Sat, 18 May 2013 09:39:06 +0000 Subject: net: velocity: Convert to generic dma functions Remove the pci_* dma functions and replace with the more generic versions. In preparation of adding platform support, a new struct device *dev is added to struct velocity_info which can be used by both the pci and platform code. Signed-off-by: Tony Prisk Signed-off-by: David S. Miller --- drivers/net/ethernet/via/via-velocity.c | 51 ++++++++++++++++----------------- drivers/net/ethernet/via/via-velocity.h | 1 + 2 files changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 187eef33df11..5996cee0ffa7 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -1459,7 +1460,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) struct velocity_opt *opt = &vptr->options; const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); - struct pci_dev *pdev = vptr->pdev; dma_addr_t pool_dma; void *pool; unsigned int i; @@ -1467,13 +1467,13 @@ static int velocity_init_dma_rings(struct velocity_info *vptr) /* * Allocate all RD/TD rings a single pool. * - * pci_alloc_consistent() fulfills the requirement for 64 bytes + * dma_alloc_coherent() fulfills the requirement for 64 bytes * alignment */ - pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + - rx_ring_size, &pool_dma); + pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + + rx_ring_size, &pool_dma, GFP_ATOMIC); if (!pool) { - dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", + dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", vptr->netdev->name); return -ENOMEM; } @@ -1524,8 +1524,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) */ skb_reserve(rd_info->skb, 64 - ((unsigned long) rd_info->skb->data & 63)); - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, - vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); + rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, + vptr->rx.buf_sz, DMA_FROM_DEVICE); /* * Fill in the descriptor to match @@ -1588,8 +1588,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) if (!rd_info->skb) continue; - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, - PCI_DMA_FROMDEVICE); + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, + DMA_FROM_DEVICE); rd_info->skb_dma = 0; dev_kfree_skb(rd_info->skb); @@ -1670,7 +1670,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr) const int size = vptr->options.numrx * sizeof(struct rx_desc) + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; - pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); + dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); } static int velocity_init_rings(struct velocity_info *vptr, int mtu) @@ -1727,8 +1727,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, pktlen = max_t(size_t, pktlen, td->td_buf[i].size & ~TD_QUEUE); - pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], - le16_to_cpu(pktlen), PCI_DMA_TODEVICE); + dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], + le16_to_cpu(pktlen), DMA_TO_DEVICE); } } dev_kfree_skb_irq(skb); @@ -1750,8 +1750,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr, if (td_info->skb) { for (i = 0; i < td_info->nskb_dma; i++) { if (td_info->skb_dma[i]) { - pci_unmap_single(vptr->pdev, td_info->skb_dma[i], - td_info->skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(vptr->dev, td_info->skb_dma[i], + td_info->skb->len, DMA_TO_DEVICE); td_info->skb_dma[i] = 0; } } @@ -2029,7 +2029,6 @@ static inline void velocity_iph_realign(struct velocity_info *vptr, */ static int velocity_receive_frame(struct velocity_info *vptr, int idx) { - void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); struct net_device_stats *stats = &vptr->netdev->stats; struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); struct rx_desc *rd = &(vptr->rx.ring[idx]); @@ -2047,8 +2046,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) skb = rd_info->skb; - pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, - vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, + vptr->rx.buf_sz, DMA_FROM_DEVICE); /* * Drop frame not meeting IEEE 802.3 @@ -2061,19 +2060,18 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) } } - pci_action = pci_dma_sync_single_for_device; - velocity_rx_csum(rd, skb); if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { velocity_iph_realign(vptr, skb, pkt_len); - pci_action = pci_unmap_single; rd_info->skb = NULL; + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, + DMA_FROM_DEVICE); + } else { + dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, + vptr->rx.buf_sz, DMA_FROM_DEVICE); } - pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, - PCI_DMA_FROMDEVICE); - skb_put(skb, pkt_len - 4); skb->protocol = eth_type_trans(skb, vptr->netdev); @@ -2550,7 +2548,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, * add it to the transmit ring. */ tdinfo->skb = skb; - tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); + tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, + DMA_TO_DEVICE); td_ptr->tdesc0.len = cpu_to_le16(pktlen); td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_high = 0; @@ -2560,7 +2559,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, + tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); @@ -2637,6 +2636,7 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, { memset(vptr, 0, sizeof(struct velocity_info)); + vptr->dev = &pdev->dev; vptr->pdev = pdev; vptr->chip_id = info->chip_id; vptr->tx.numq = info->txqueue; @@ -2744,7 +2744,6 @@ static int velocity_found1(struct pci_dev *pdev, SET_NETDEV_DEV(dev, &pdev->dev); vptr = netdev_priv(dev); - if (first) { printk(KERN_INFO "%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index ff8d7828aa1b..c38bbaed4d12 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1434,6 +1434,7 @@ struct velocity_opt { #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) struct velocity_info { + struct device *dev; struct pci_dev *pdev; struct net_device *netdev; -- cgit v1.2.3 From 6dffbe53fbdcc7bd5f6379fa18264f060b0cf61d Mon Sep 17 00:00:00 2001 From: Tony Prisk Date: Sat, 18 May 2013 09:39:07 +0000 Subject: net: velocity: Add platform device support to VIA velocity driver Add support for the VIA Velocity network driver to be bound to a OF created platform device. Signed-off-by: Tony Prisk Signed-off-by: David S. Miller --- .../devicetree/bindings/net/via-velocity.txt | 20 + drivers/net/ethernet/via/Kconfig | 3 +- drivers/net/ethernet/via/via-velocity.c | 408 ++++++++++++++------- drivers/net/ethernet/via/via-velocity.h | 3 +- 4 files changed, 305 insertions(+), 129 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/via-velocity.txt (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/via-velocity.txt b/Documentation/devicetree/bindings/net/via-velocity.txt new file mode 100644 index 000000000000..b3db469b1ad7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/via-velocity.txt @@ -0,0 +1,20 @@ +* VIA Velocity 10/100/1000 Network Controller + +Required properties: +- compatible : Should be "via,velocity-vt6110" +- reg : Address and length of the io space +- interrupts : Should contain the controller interrupt line + +Optional properties: +- no-eeprom : PCI network cards use an external EEPROM to store data. Embedded + devices quite often set this data in uboot and do not provide an eeprom. + Specify this option if you have no external eeprom. + +Examples: + +eth0@d8004000 { + compatible = "via,velocity-vt6110"; + reg = <0xd8004000 0x400>; + interrupts = <10>; + no-eeprom; +}; diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 68a9ba66feba..6a87097d88c0 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -5,7 +5,6 @@ config NET_VENDOR_VIA bool "VIA devices" default y - depends on PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -45,7 +44,7 @@ config VIA_RHINE_MMIO config VIA_VELOCITY tristate "VIA Velocity support" - depends on PCI + depends on (PCI || USE_OF) select CRC32 select CRC_CCITT select NET_CORE diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 5996cee0ffa7..76919948b4ee 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -65,7 +65,11 @@ #include #include #include +#include +#include +#include #include +#include #include #include #include @@ -80,10 +84,24 @@ #include "via-velocity.h" +enum velocity_bus_type { + BUS_PCI, + BUS_PLATFORM, +}; static int velocity_nics; static int msglevel = MSG_LEVEL_INFO; +static void velocity_set_power_state(struct velocity_info *vptr, char state) +{ + void *addr = vptr->mac_regs; + + if (vptr->pdev) + pci_set_power_state(vptr->pdev, state); + else + writeb(state, addr + 0x154); +} + /** * mac_get_cam_mask - Read a CAM mask * @regs: register block for this velocity @@ -362,12 +380,23 @@ static struct velocity_info_tbl chip_info_table[] = { * Describe the PCI device identifiers that we support in this * device driver. Used for hotplug autoloading. */ -static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = { + +static DEFINE_PCI_DEVICE_TABLE(velocity_pci_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, { } }; -MODULE_DEVICE_TABLE(pci, velocity_id_table); +MODULE_DEVICE_TABLE(pci, velocity_pci_id_table); + +/** + * Describe the OF device identifiers that we support in this + * device driver. Used for devicetree nodes. + */ +static struct of_device_id velocity_of_ids[] = { + { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, velocity_of_ids); /** * get_chip_name - identifier to name @@ -385,29 +414,6 @@ static const char *get_chip_name(enum chip_type chip_id) return chip_info_table[i].name; } -/** - * velocity_remove1 - device unplug - * @pdev: PCI device being removed - * - * Device unload callback. Called on an unplug or on module - * unload for each active device that is present. Disconnects - * the device from the network layer and frees all the resources - */ -static void velocity_remove1(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct velocity_info *vptr = netdev_priv(dev); - - unregister_netdev(dev); - iounmap(vptr->mac_regs); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - free_netdev(dev); - - velocity_nics--; -} - /** * velocity_set_int_opt - parser for integer options * @opt: pointer to option value @@ -1181,6 +1187,17 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status) u16 BMCR; switch (PHYID_GET_PHY_ID(vptr->phy_id)) { + case PHYID_ICPLUS_IP101A: + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), + MII_ADVERTISE, vptr->mac_regs); + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, + vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, + vptr->mac_regs); + MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); + break; case PHYID_CICADA_CS8201: /* * Reset to hardware default @@ -1312,6 +1329,7 @@ static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type) { struct mac_regs __iomem *regs = vptr->mac_regs; + struct net_device *netdev = vptr->netdev; int i, mii_status; mac_wol_reset(regs); @@ -1320,7 +1338,7 @@ static void velocity_init_registers(struct velocity_info *vptr, case VELOCITY_INIT_RESET: case VELOCITY_INIT_WOL: - netif_stop_queue(vptr->netdev); + netif_stop_queue(netdev); /* * Reset RX to prevent RX pointer not on the 4X location @@ -1333,7 +1351,7 @@ static void velocity_init_registers(struct velocity_info *vptr, if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) - netif_wake_queue(vptr->netdev); + netif_wake_queue(netdev); } enable_flow_control_ability(vptr); @@ -1353,9 +1371,11 @@ static void velocity_init_registers(struct velocity_info *vptr, velocity_soft_reset(vptr); mdelay(5); - mac_eeprom_reload(regs); - for (i = 0; i < 6; i++) - writeb(vptr->netdev->dev_addr[i], &(regs->PAR[i])); + if (!vptr->no_eeprom) { + mac_eeprom_reload(regs); + for (i = 0; i < 6; i++) + writeb(netdev->dev_addr[i], regs->PAR + i); + } /* * clear Pre_ACPI bit. @@ -1378,7 +1398,7 @@ static void velocity_init_registers(struct velocity_info *vptr, /* * Set packet filter: Receive directed and broadcast address */ - velocity_set_multi(vptr->netdev); + velocity_set_multi(netdev); /* * Enable MII auto-polling @@ -1405,14 +1425,14 @@ static void velocity_init_registers(struct velocity_info *vptr, writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); mii_status = velocity_get_opt_media_mode(vptr); - netif_stop_queue(vptr->netdev); + netif_stop_queue(netdev); mii_init(vptr, mii_status); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) - netif_wake_queue(vptr->netdev); + netif_wake_queue(netdev); } enable_flow_control_ability(vptr); @@ -2233,15 +2253,15 @@ static int velocity_open(struct net_device *dev) goto out; /* Ensure chip is running */ - pci_set_power_state(vptr->pdev, PCI_D0); + velocity_set_power_state(vptr, PCI_D0); velocity_init_registers(vptr, VELOCITY_INIT_COLD); - ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, + ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED, dev->name, dev); if (ret < 0) { /* Power down the chip */ - pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); velocity_free_rings(vptr); goto out; } @@ -2314,6 +2334,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) tmp_vptr->netdev = dev; tmp_vptr->pdev = vptr->pdev; + tmp_vptr->dev = vptr->dev; tmp_vptr->options = vptr->options; tmp_vptr->tx.numq = vptr->tx.numq; @@ -2413,7 +2434,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) saving then we need to bring the device back up to talk to it */ if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D0); + velocity_set_power_state(vptr, PCI_D0); switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ @@ -2426,7 +2447,7 @@ static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ret = -EOPNOTSUPP; } if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); return ret; @@ -2492,7 +2513,7 @@ static int velocity_close(struct net_device *dev) if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) velocity_get_ip(vptr); - free_irq(vptr->pdev->irq, dev); + free_irq(dev->irq, dev); velocity_free_rings(vptr); @@ -2631,13 +2652,9 @@ static const struct net_device_ops velocity_netdev_ops = { * Set up the initial velocity_info struct for the device that has been * discovered. */ -static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, - const struct velocity_info_tbl *info) +static void velocity_init_info(struct velocity_info *vptr, + const struct velocity_info_tbl *info) { - memset(vptr, 0, sizeof(struct velocity_info)); - - vptr->dev = &pdev->dev; - vptr->pdev = pdev; vptr->chip_id = info->chip_id; vptr->tx.numq = info->txqueue; vptr->multicast_limit = MCAM_SIZE; @@ -2652,10 +2669,9 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, * Retrieve the PCI configuration space data that interests us from * the kernel PCI layer */ -static int velocity_get_pci_info(struct velocity_info *vptr, - struct pci_dev *pdev) +static int velocity_get_pci_info(struct velocity_info *vptr) { - vptr->rev_id = pdev->revision; + struct pci_dev *pdev = vptr->pdev; pci_set_master(pdev); @@ -2678,7 +2694,37 @@ static int velocity_get_pci_info(struct velocity_info *vptr, dev_err(&pdev->dev, "region #1 is too small.\n"); return -EINVAL; } - vptr->pdev = pdev; + + return 0; +} + +/** + * velocity_get_platform_info - retrieve platform info for device + * @vptr: velocity device + * @pdev: platform device it matches + * + * Retrieve the Platform configuration data that interests us + */ +static int velocity_get_platform_info(struct velocity_info *vptr) +{ + struct resource res; + int ret; + + if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) + vptr->no_eeprom = 1; + + ret = of_address_to_resource(vptr->dev->of_node, 0, &res); + if (ret) { + dev_err(vptr->dev, "unable to find memory address\n"); + return ret; + } + + vptr->memaddr = res.start; + + if (resource_size(&res) < VELOCITY_IO_SIZE) { + dev_err(vptr->dev, "memory region is too small.\n"); + return -EINVAL; + } return 0; } @@ -2707,21 +2753,22 @@ static u32 velocity_get_link(struct net_device *dev) } /** - * velocity_found1 - set up discovered velocity card + * velocity_probe - set up discovered velocity device * @pdev: PCI device * @ent: PCI device table entry that matched + * @bustype: bus that device is connected to * * Configure a discovered adapter from scratch. Return a negative * errno error code on failure paths. */ -static int velocity_found1(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int velocity_probe(struct device *dev, int irq, + const struct velocity_info_tbl *info, + enum velocity_bus_type bustype) { static int first = 1; - struct net_device *dev; + struct net_device *netdev; int i; const char *drv_string; - const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; struct velocity_info *vptr; struct mac_regs __iomem *regs; int ret = -ENOMEM; @@ -2730,19 +2777,18 @@ static int velocity_found1(struct pci_dev *pdev, * can support more than MAX_UNITS. */ if (velocity_nics >= MAX_UNITS) { - dev_notice(&pdev->dev, "already found %d NICs.\n", - velocity_nics); + dev_notice(dev, "already found %d NICs.\n", velocity_nics); return -ENODEV; } - dev = alloc_etherdev(sizeof(struct velocity_info)); - if (!dev) + netdev = alloc_etherdev(sizeof(struct velocity_info)); + if (!netdev) goto out; /* Chain it all together */ - SET_NETDEV_DEV(dev, &pdev->dev); - vptr = netdev_priv(dev); + SET_NETDEV_DEV(netdev, dev); + vptr = netdev_priv(netdev); if (first) { printk(KERN_INFO "%s Ver. %s\n", @@ -2752,41 +2798,41 @@ static int velocity_found1(struct pci_dev *pdev, first = 0; } - velocity_init_info(pdev, vptr, info); + netdev->irq = irq; + vptr->netdev = netdev; + vptr->dev = dev; - vptr->netdev = dev; + velocity_init_info(vptr, info); - ret = pci_enable_device(pdev); - if (ret < 0) - goto err_free_dev; + if (bustype == BUS_PCI) { + vptr->pdev = to_pci_dev(dev); - ret = velocity_get_pci_info(vptr, pdev); - if (ret < 0) { - /* error message already printed */ - goto err_disable; - } - - ret = pci_request_regions(pdev, VELOCITY_NAME); - if (ret < 0) { - dev_err(&pdev->dev, "No PCI resources.\n"); - goto err_disable; + ret = velocity_get_pci_info(vptr); + if (ret < 0) + goto err_free_dev; + } else { + vptr->pdev = NULL; + ret = velocity_get_platform_info(vptr); + if (ret < 0) + goto err_free_dev; } regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); if (regs == NULL) { ret = -EIO; - goto err_release_res; + goto err_free_dev; } vptr->mac_regs = regs; + vptr->rev_id = readb(®s->rev_id); mac_wol_reset(regs); for (i = 0; i < 6; i++) - dev->dev_addr[i] = readb(®s->PAR[i]); + netdev->dev_addr[i] = readb(®s->PAR[i]); - drv_string = dev_driver_string(&pdev->dev); + drv_string = dev_driver_string(dev); velocity_get_options(&vptr->options, velocity_nics, drv_string); @@ -2807,46 +2853,125 @@ static int velocity_found1(struct pci_dev *pdev, vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); - dev->netdev_ops = &velocity_netdev_ops; - dev->ethtool_ops = &velocity_ethtool_ops; - netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); + netdev->netdev_ops = &velocity_netdev_ops; + netdev->ethtool_ops = &velocity_ethtool_ops; + netif_napi_add(netdev, &vptr->napi, velocity_poll, + VELOCITY_NAPI_WEIGHT); - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; - dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_IP_CSUM; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_IP_CSUM; - ret = register_netdev(dev); + ret = register_netdev(netdev); if (ret < 0) goto err_iounmap; - if (!velocity_get_link(dev)) { - netif_carrier_off(dev); + if (!velocity_get_link(netdev)) { + netif_carrier_off(netdev); vptr->mii_status |= VELOCITY_LINK_FAIL; } velocity_print_info(vptr); - pci_set_drvdata(pdev, dev); + dev_set_drvdata(vptr->dev, netdev); /* and leave the chip powered down */ - pci_set_power_state(pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); velocity_nics++; out: return ret; err_iounmap: iounmap(regs); -err_release_res: - pci_release_regions(pdev); -err_disable: - pci_disable_device(pdev); err_free_dev: - free_netdev(dev); + free_netdev(netdev); goto out; } -#ifdef CONFIG_PM +/** + * velocity_remove - device unplug + * @dev: device being removed + * + * Device unload callback. Called on an unplug or on module + * unload for each active device that is present. Disconnects + * the device from the network layer and frees all the resources + */ +static int velocity_remove(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); + + unregister_netdev(netdev); + iounmap(vptr->mac_regs); + free_netdev(netdev); + velocity_nics--; + + return 0; +} + +static int velocity_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + const struct velocity_info_tbl *info = + &chip_info_table[ent->driver_data]; + int ret; + + ret = pci_enable_device(pdev); + if (ret < 0) + return ret; + + ret = pci_request_regions(pdev, VELOCITY_NAME); + if (ret < 0) { + dev_err(&pdev->dev, "No PCI resources.\n"); + goto fail1; + } + + ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI); + if (ret == 0) + return 0; + + pci_release_regions(pdev); +fail1: + pci_disable_device(pdev); + return ret; +} + +static void velocity_pci_remove(struct pci_dev *pdev) +{ + velocity_remove(&pdev->dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int velocity_platform_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + const struct velocity_info_tbl *info; + int irq; + + of_id = of_match_device(velocity_of_ids, &pdev->dev); + if (!of_id) + return -EINVAL; + info = of_id->data; + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + return -EINVAL; + + return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM); +} + +static int velocity_platform_remove(struct platform_device *pdev) +{ + velocity_remove(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP /** * wol_calc_crc - WOL CRC * @pattern: data pattern @@ -3003,10 +3128,10 @@ static void velocity_save_context(struct velocity_info *vptr, struct velocity_co } -static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) +static int velocity_suspend(struct device *dev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct velocity_info *vptr = netdev_priv(dev); + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); unsigned long flags; if (!netif_running(vptr->netdev)) @@ -3015,20 +3140,23 @@ static int velocity_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(vptr->netdev); spin_lock_irqsave(&vptr->lock, flags); - pci_save_state(pdev); + if (vptr->pdev) + pci_save_state(vptr->pdev); if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { velocity_get_ip(vptr); velocity_save_context(vptr, &vptr->context); velocity_shutdown(vptr); velocity_set_wol(vptr); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_set_power_state(pdev, PCI_D3hot); + if (vptr->pdev) + pci_enable_wake(vptr->pdev, PCI_D3hot, 1); + velocity_set_power_state(vptr, PCI_D3hot); } else { velocity_save_context(vptr, &vptr->context); velocity_shutdown(vptr); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (vptr->pdev) + pci_disable_device(vptr->pdev); + velocity_set_power_state(vptr, PCI_D3hot); } spin_unlock_irqrestore(&vptr->lock, flags); @@ -3070,19 +3198,22 @@ static void velocity_restore_context(struct velocity_info *vptr, struct velocity writeb(*((u8 *) (context->mac_reg + i)), ptr + i); } -static int velocity_resume(struct pci_dev *pdev) +static int velocity_resume(struct device *dev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct velocity_info *vptr = netdev_priv(dev); + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); unsigned long flags; int i; if (!netif_running(vptr->netdev)) return 0; - pci_set_power_state(pdev, PCI_D0); - pci_enable_wake(pdev, 0, 0); - pci_restore_state(pdev); + velocity_set_power_state(vptr, PCI_D0); + + if (vptr->pdev) { + pci_enable_wake(vptr->pdev, 0, 0); + pci_restore_state(vptr->pdev); + } mac_wol_reset(vptr->mac_regs); @@ -3104,23 +3235,34 @@ static int velocity_resume(struct pci_dev *pdev) return 0; } -#endif +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume); /* * Definition for our device driver. The PCI layer interface * uses this to handle all our card discover and plugging */ -static struct pci_driver velocity_driver = { +static struct pci_driver velocity_pci_driver = { .name = VELOCITY_NAME, - .id_table = velocity_id_table, - .probe = velocity_found1, - .remove = velocity_remove1, -#ifdef CONFIG_PM - .suspend = velocity_suspend, - .resume = velocity_resume, -#endif + .id_table = velocity_pci_id_table, + .probe = velocity_pci_probe, + .remove = velocity_pci_remove, + .driver = { + .pm = &velocity_pm_ops, + }, }; +static struct platform_driver velocity_platform_driver = { + .probe = velocity_platform_probe, + .remove = velocity_platform_remove, + .driver = { + .name = "via-velocity", + .owner = THIS_MODULE, + .of_match_table = velocity_of_ids, + .pm = &velocity_pm_ops, + }, +}; /** * velocity_ethtool_up - pre hook for ethtool @@ -3133,7 +3275,7 @@ static int velocity_ethtool_up(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D0); + velocity_set_power_state(vptr, PCI_D0); return 0; } @@ -3148,7 +3290,7 @@ static void velocity_ethtool_down(struct net_device *dev) { struct velocity_info *vptr = netdev_priv(dev); if (!netif_running(dev)) - pci_set_power_state(vptr->pdev, PCI_D3hot); + velocity_set_power_state(vptr, PCI_D3hot); } static int velocity_get_settings(struct net_device *dev, @@ -3268,9 +3410,14 @@ static int velocity_set_settings(struct net_device *dev, static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct velocity_info *vptr = netdev_priv(dev); + strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info)); + if (vptr->pdev) + strlcpy(info->bus_info, pci_name(vptr->pdev), + sizeof(info->bus_info)); + else + strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); } static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -3560,13 +3707,20 @@ static void velocity_unregister_notifier(void) */ static int __init velocity_init_module(void) { - int ret; + int ret_pci, ret_platform; velocity_register_notifier(); - ret = pci_register_driver(&velocity_driver); - if (ret < 0) + + ret_pci = pci_register_driver(&velocity_pci_driver); + ret_platform = platform_driver_register(&velocity_platform_driver); + + /* if both_registers failed, remove the notifier */ + if ((ret_pci < 0) && (ret_platform < 0)) { velocity_unregister_notifier(); - return ret; + return ret_pci; + } + + return 0; } /** @@ -3580,7 +3734,9 @@ static int __init velocity_init_module(void) static void __exit velocity_cleanup_module(void) { velocity_unregister_notifier(); - pci_unregister_driver(&velocity_driver); + + pci_unregister_driver(&velocity_pci_driver); + platform_driver_unregister(&velocity_platform_driver); } module_init(velocity_init_module); diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index c38bbaed4d12..9453bfa9324a 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1265,7 +1265,7 @@ struct velocity_context { #define PHYID_VT3216_64BIT 0x000FC600UL #define PHYID_MARVELL_1000 0x01410C50UL #define PHYID_MARVELL_1000S 0x01410C40UL - +#define PHYID_ICPLUS_IP101A 0x02430C54UL #define PHYID_REV_ID_MASK 0x0000000FUL #define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) @@ -1437,6 +1437,7 @@ struct velocity_info { struct device *dev; struct pci_dev *pdev; struct net_device *netdev; + int no_eeprom; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u8 ip_addr[4]; -- cgit v1.2.3 From 4a5bddf7ea6b6c5916eccbc2fa1950555073ff48 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 20 May 2013 03:06:17 +0000 Subject: fec: Let device core handle pinctrl Since commit ab78029 (drivers/pinctrl: grab default handles from device core) we can rely on device core for handling pinctrl, so remove devm_pinctrl_get_select_default() from the driver. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca9825ca88c9..2f4b3e028012 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -53,7 +53,6 @@ #include #include #include -#include #include #include @@ -1841,7 +1840,6 @@ fec_probe(struct platform_device *pdev) struct resource *r; const struct of_device_id *of_id; static int dev_id; - struct pinctrl *pinctrl; struct regulator *reg_phy; of_id = of_match_device(fec_dt_ids, &pdev->dev); @@ -1891,12 +1889,6 @@ fec_probe(struct platform_device *pdev) fep->phy_interface = ret; } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - ret = PTR_ERR(pinctrl); - goto failed_pin; - } - fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fep->clk_ipg)) { ret = PTR_ERR(fep->clk_ipg); @@ -1996,7 +1988,6 @@ failed_regulator: clk_disable_unprepare(fep->clk_ipg); clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ptp); -failed_pin: failed_clk: failed_ioremap: free_netdev(ndev); -- cgit v1.2.3 From 0b8bf1baabe56f721d541953f083560d0660d78f Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Mon, 20 May 2013 09:16:58 +0000 Subject: net: dm9000: Allow instantiation using device tree This patch adds Device Tree support to dm9000 driver. Signed-off-by: Tomasz Figa Reviewed-by: Sylwester Nawrocki Reviewed-by: Sascha Hauer Signed-off-by: David S. Miller --- .../devicetree/bindings/net/davicom-dm9000.txt | 26 ++++++++++++++ .../devicetree/bindings/vendor-prefixes.txt | 1 + drivers/net/ethernet/davicom/dm9000.c | 42 ++++++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/davicom-dm9000.txt (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt new file mode 100644 index 000000000000..2d39c990e641 --- /dev/null +++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt @@ -0,0 +1,26 @@ +Davicom DM9000 Fast Ethernet controller + +Required properties: +- compatible = "davicom,dm9000"; +- reg : physical addresses and sizes of registers, must contain 2 entries: + first entry : address register, + second entry : data register. +- interrupt-parent : interrupt controller to which the device is connected +- interrupts : interrupt specifier specific to interrupt controller + +Optional properties: +- local-mac-address : A bytestring of 6 bytes specifying Ethernet MAC address + to use (from firmware or bootloader) +- davicom,no-eeprom : Configuration EEPROM is not available +- davicom,ext-phy : Use external PHY + +Example: + + ethernet@18000000 { + compatible = "davicom,dm9000"; + reg = <0x18000000 0x2 0x18000004 0x2>; + interrupt-parent = <&gpn>; + interrupts = <7 4>; + local-mac-address = [00 00 de ad be ef]; + davicom,no-eeprom; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 6931c4348d24..2fe74e6ec209 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -18,6 +18,7 @@ chrp Common Hardware Reference Platform cirrus Cirrus Logic, Inc. cortina Cortina Systems, Inc. dallas Maxim Integrated Products (formerly Dallas Semiconductor) +davicom DAVICOM Semiconductor, Inc. denx Denx Software Engineering emmicro EM Microelectronic epson Seiko Epson Corp. diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index a2408c84bbdd..dd243a1b03e0 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include #include @@ -1351,6 +1353,31 @@ static const struct net_device_ops dm9000_netdev_ops = { #endif }; +static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) +{ + struct dm9000_plat_data *pdata; + struct device_node *np = dev->of_node; + const void *mac_addr; + + if (!IS_ENABLED(CONFIG_OF) || !np) + return NULL; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + if (of_find_property(np, "davicom,ext-phy", NULL)) + pdata->flags |= DM9000_PLATF_EXT_PHY; + if (of_find_property(np, "davicom,no-eeprom", NULL)) + pdata->flags |= DM9000_PLATF_NO_EEPROM; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr)); + + return pdata; +} + /* * Search DM9000 board, allocate space and register it */ @@ -1366,6 +1393,12 @@ dm9000_probe(struct platform_device *pdev) int i; u32 id_val; + if (!pdata) { + pdata = dm9000_parse_dt(&pdev->dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); if (!ndev) @@ -1676,11 +1709,20 @@ dm9000_drv_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id dm9000_of_matches[] = { + { .compatible = "davicom,dm9000", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dm9000_of_matches); +#endif + static struct platform_driver dm9000_driver = { .driver = { .name = "dm9000", .owner = THIS_MODULE, .pm = &dm9000_drv_pm_ops, + .of_match_table = of_match_ptr(dm9000_of_matches), }, .probe = dm9000_probe, .remove = dm9000_drv_remove, -- cgit v1.2.3 From 4b2305826ccd57a8b22d3daff543837ba27cfd43 Mon Sep 17 00:00:00 2001 From: Rasesh Mody Date: Mon, 20 May 2013 10:08:01 +0000 Subject: bna: Clear Driver Config Flags When HW Resets Driver configuration flags are retained across open/stop operations preventing configurations to be set in next open/stop. Setting MTU on a 1020 causes network to fail until a reboot is performed on the host. Clear the flags when configuration resets in hardware. Signed-off-by: Rasesh Mody Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bnad.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 07f7ef05c3f2..b78e69e0e52a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -2624,6 +2624,9 @@ bnad_stop(struct net_device *netdev) bnad_destroy_tx(bnad, 0); bnad_destroy_rx(bnad, 0); + /* These config flags are cleared in the hardware */ + bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | BNAD_CF_PROMISC); + /* Synchronize mailbox IRQ */ bnad_mbox_irq_sync(bnad); -- cgit v1.2.3 From f489a4ba769ca1d7dd4657a4f68ea93746dd669b Mon Sep 17 00:00:00 2001 From: Rasesh Mody Date: Mon, 20 May 2013 10:08:02 +0000 Subject: bna: Fix Ucast Failure Handling Failure of the UCAST set for base mac address fails when user configures a duplicate mac address that matches that of another vNIC on the same port. The bna does not handle the ucast failure and keeps this address in cache. On disable of the vNIC, bna tries to delete the failed base mac address and the fw asserts. On failure of ucast address, mark ucast address set to false. Signed-off-by: Rasesh Mody Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bna.h | 2 ++ drivers/net/ethernet/brocade/bna/bna_enet.c | 7 ++++++- drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 15 +++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 25dae757e9c4..f1eafc409bbd 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -455,6 +455,8 @@ void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); /* APIs for BNA */ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index db14f69d63bc..3ca77fad4851 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -298,7 +298,6 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) case BFI_ENET_I2H_RSS_ENABLE_RSP: case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: case BFI_ENET_I2H_RX_DEFAULT_RSP: - case BFI_ENET_I2H_MAC_UCAST_SET_RSP: case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: @@ -311,6 +310,12 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); break; + case BFI_ENET_I2H_MAC_UCAST_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr); + break; + case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: bna_rx_from_rid(bna, msghdr->enet_id, rx); if (rx) diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index ea6f4a036401..57cd1bff59f1 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -710,6 +710,21 @@ bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) bfa_fsm_send_event(rxf, RXF_E_FW_RESP); } +void +bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rsp *rsp = + (struct bfi_enet_rsp *)msghdr; + + if (rsp->error) { + /* Clear ucast from cache */ + rxf->ucast_active_set = 0; + } + + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) -- cgit v1.2.3 From 43c07adaebc34d8c23cdc60128fc7f41088515f7 Mon Sep 17 00:00:00 2001 From: Rasesh Mody Date: Mon, 20 May 2013 10:08:03 +0000 Subject: bna: Enahncement to Identify Default IOC Function User should not be allowed to delete base function of eth port. Add a new field to the bfa ioc attributes structure to indicate if the given ioc is default function on the port or not. Signed-off-by: Rasesh Mody Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bfa_defs.h | 3 ++- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 7 ++++--- drivers/net/ethernet/brocade/bna/bfa_ioc.h | 2 ++ 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index e423f82da490..b7d8127c198f 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -164,7 +164,8 @@ struct bfa_ioc_attr { u8 port_mode; /*!< enum bfa_mode */ u8 cap_bm; /*!< capability */ u8 port_mode_cfg; /*!< enum bfa_mode */ - u8 rsvd[4]; /*!< 64bit align */ + u8 def_fn; /*!< 1 if default fn */ + u8 rsvd[3]; /*!< 64bit align */ }; /* Adapter capability mask definition */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index f2b73ffa9122..6f3cac060f29 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2371,7 +2371,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); ioc_attr->state = bfa_ioc_get_state(ioc); - ioc_attr->port_id = ioc->port_id; + ioc_attr->port_id = bfa_ioc_portid(ioc); ioc_attr->port_mode = ioc->port_mode; ioc_attr->port_mode_cfg = ioc->port_mode_cfg; @@ -2381,8 +2381,9 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); - ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; - ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = bfa_ioc_is_default(ioc); bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 63a85e555df8..f04e0aab25b4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -222,6 +222,8 @@ struct bfa_ioc_hwif { #define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) #define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_default(__ioc) \ + (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc)) #define bfa_ioc_fetch_stats(__ioc, __stats) \ (((__stats)->drv_stats) = (__ioc)->stats) #define bfa_ioc_clr_stats(__ioc) \ -- cgit v1.2.3 From 45e983414334f217c60bd04d39d6f5ec2d8d7bb4 Mon Sep 17 00:00:00 2001 From: Rasesh Mody Date: Mon, 20 May 2013 10:08:04 +0000 Subject: bna: Driver and Firmware Updated Driver and Firmware versions updated to 3.2.21.1. Signed-off-by: Rasesh Mody Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bnad.h | 2 +- drivers/net/ethernet/brocade/bna/cna.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index c1d0bc059bfd..aefee77523f2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -71,7 +71,7 @@ struct bnad_rx_ctrl { #define BNAD_NAME "bna" #define BNAD_NAME_LEN 64 -#define BNAD_VERSION "3.1.2.1" +#define BNAD_VERSION "3.2.21.1" #define BNAD_MAILBOX_MSIX_INDEX 0 #define BNAD_MAILBOX_MSIX_VECTORS 1 diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index 14ca9317c915..c37f706d9992 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -37,8 +37,8 @@ extern char bfa_version[]; -#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin" -#define CNA_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" +#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin" #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ #pragma pack(1) -- cgit v1.2.3 From e80bd1d181ff4601d88cf438817a3a7e84fe6912 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Wed, 1 May 2013 01:19:46 +0000 Subject: e1000e: cleanup whitespace Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/80003es2lan.c | 24 +++++----- drivers/net/ethernet/intel/e1000e/82571.c | 24 +++++----- drivers/net/ethernet/intel/e1000e/ethtool.c | 34 +++++++------- drivers/net/ethernet/intel/e1000e/hw.h | 34 +++++++------- drivers/net/ethernet/intel/e1000e/ich8lan.c | 62 ++++++++++++------------- drivers/net/ethernet/intel/e1000e/netdev.c | 51 ++++++++++---------- drivers/net/ethernet/intel/e1000e/nvm.c | 1 - drivers/net/ethernet/intel/e1000e/phy.c | 22 ++++----- 8 files changed, 125 insertions(+), 127 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index b71c8502a2b3..895450e9bb3c 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -66,17 +66,17 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) s32 ret_val; if (hw->phy.media_type != e1000_media_type_copper) { - phy->type = e1000_phy_none; + phy->type = e1000_phy_none; return 0; } else { phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; } - phy->addr = 1; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; - phy->type = e1000_phy_gg82563; + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; /* This can only be done after all function pointers are setup. */ ret_val = e1000e_get_phy_id(hw); @@ -98,19 +98,19 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) u32 eecd = er32(EECD); u16 size; - nvm->opcode_bits = 8; - nvm->delay_usec = 1; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: - nvm->page_size = 32; + nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: - nvm->page_size = 8; + nvm->page_size = 8; nvm->address_bits = 8; break; default: - nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } @@ -128,7 +128,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = 1 << size; return 0; } @@ -859,7 +859,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); - reg &= ~(0xF << 27); /* 30:27 */ + reg &= ~(0xF << 27); /* 30:27 */ if (hw->phy.media_type != e1000_media_type_copper) reg &= ~(1 << 20); ew32(TARC(0), reg); diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7380442a3829..bef2f103ab03 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -77,24 +77,24 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) return 0; } - phy->addr = 1; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - phy->reset_delay_us = 100; + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_82571; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; switch (hw->mac.type) { case e1000_82571: case e1000_82572: - phy->type = e1000_phy_igp_2; + phy->type = e1000_phy_igp_2; break; case e1000_82573: - phy->type = e1000_phy_m88; + phy->type = e1000_phy_m88; break; case e1000_82574: case e1000_82583: - phy->type = e1000_phy_bm; + phy->type = e1000_phy_bm; phy->ops.acquire = e1000_get_hw_semaphore_82574; phy->ops.release = e1000_put_hw_semaphore_82574; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; @@ -193,7 +193,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = 1 << size; break; } @@ -339,7 +339,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - static int global_quad_port_a; /* global port a indication */ + static int global_quad_port_a; /* global port a indication */ struct pci_dev *pdev = adapter->pdev; int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; s32 rc; @@ -1178,7 +1178,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); - reg &= ~(0xF << 27); /* 30:27 */ + reg &= ~(0xF << 27); /* 30:27 */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1390,7 +1390,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); if (ret_val) return false; - if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); if (ret_val) return false; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 7c8ca658d553..59c22bf18701 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -244,7 +244,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) mac->autoneg = 1; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ + case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: goto err_inval; } @@ -416,7 +416,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data) static int e1000_get_regs_len(struct net_device __always_unused *netdev) { -#define E1000_REGS_LEN 32 /* overestimate */ +#define E1000_REGS_LEN 32 /* overestimate */ return E1000_REGS_LEN * sizeof(u32); } @@ -433,22 +433,22 @@ static void e1000_get_regs(struct net_device *netdev, regs->version = (1 << 24) | (adapter->pdev->revision << 16) | adapter->pdev->device; - regs_buff[0] = er32(CTRL); - regs_buff[1] = er32(STATUS); + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); - regs_buff[2] = er32(RCTL); - regs_buff[3] = er32(RDLEN(0)); - regs_buff[4] = er32(RDH(0)); - regs_buff[5] = er32(RDT(0)); - regs_buff[6] = er32(RDTR); + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); + regs_buff[6] = er32(RDTR); - regs_buff[7] = er32(TCTL); - regs_buff[8] = er32(TDLEN(0)); - regs_buff[9] = er32(TDH(0)); + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); regs_buff[10] = er32(TDT(0)); regs_buff[11] = er32(TIDV); - regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ /* ethtool doesn't use anything past this point, so all this * code is likely legacy junk for apps that may or may not exist @@ -1379,7 +1379,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) if (hw->phy.media_type == e1000_media_type_copper && hw->phy.type == e1000_phy_m88) { - ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ } else { /* Set the ILOS bit on the fiber Nic if half duplex link is * detected. @@ -1613,7 +1613,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ew32(TDT(0), k); e1e_flush(); msleep(200); - time = jiffies; /* set the start time for the receive */ + time = jiffies; /* set the start time for the receive */ good_cnt = 0; /* receive the sent packets */ do { @@ -1636,11 +1636,11 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) */ } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); if (good_cnt != 64) { - ret_val = 13; /* ret_val is the same as mis-compare */ + ret_val = 13; /* ret_val is the same as mis-compare */ break; } if (jiffies >= (time + 20)) { - ret_val = 14; /* error code for time out error */ + ret_val = 14; /* error code for time out error */ break; } } diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 84850f7a23e4..a6f903a9b773 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -402,13 +402,13 @@ struct e1000_phy_stats { struct e1000_host_mng_dhcp_cookie { u32 signature; - u8 status; - u8 reserved0; + u8 status; + u8 reserved0; u16 vlan_id; u32 reserved1; u16 reserved2; - u8 reserved3; - u8 checksum; + u8 reserved3; + u8 checksum; }; /* Host Interface "Rev 1" */ @@ -427,8 +427,8 @@ struct e1000_host_command_info { /* Host Interface "Rev 2" */ struct e1000_host_mng_command_header { - u8 command_id; - u8 checksum; + u8 command_id; + u8 checksum; u16 reserved1; u16 reserved2; u16 command_length; @@ -549,7 +549,7 @@ struct e1000_mac_info { u32 mta_shadow[MAX_MTA_REG]; u16 rar_entry_count; - u8 forced_speed_duplex; + u8 forced_speed_duplex; bool adaptive_ifs; bool has_fwsm; @@ -577,7 +577,7 @@ struct e1000_phy_info { u32 addr; u32 id; - u32 reset_delay_us; /* in usec */ + u32 reset_delay_us; /* in usec */ u32 revision; enum e1000_media_type media_type; @@ -636,11 +636,11 @@ struct e1000_dev_spec_82571 { }; struct e1000_dev_spec_80003es2lan { - bool mdic_wa_enable; + bool mdic_wa_enable; }; struct e1000_shadow_ram { - u16 value; + u16 value; bool modified; }; @@ -660,17 +660,17 @@ struct e1000_hw { void __iomem *hw_addr; void __iomem *flash_address; - struct e1000_mac_info mac; - struct e1000_fc_info fc; - struct e1000_phy_info phy; - struct e1000_nvm_info nvm; - struct e1000_bus_info bus; + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; struct e1000_host_mng_dhcp_cookie mng_cookie; union { - struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_82571 e82571; struct e1000_dev_spec_80003es2lan e80003es2lan; - struct e1000_dev_spec_ich8lan ich8lan; + struct e1000_dev_spec_ich8lan ich8lan; } dev_spec; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index ad9d8f2dd868..9dde390f7e71 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -101,12 +101,12 @@ union ich8_hws_flash_regacc { /* ICH Flash Protected Region */ union ich8_flash_protected_range { struct ich8_pr { - u32 base:13; /* 0:12 Protected Range Base */ - u32 reserved1:2; /* 13:14 Reserved */ - u32 rpe:1; /* 15 Read Protection Enable */ - u32 limit:13; /* 16:28 Protected Range Limit */ - u32 reserved2:2; /* 29:30 Reserved */ - u32 wpe:1; /* 31 Write Protection Enable */ + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ } range; u32 regval; }; @@ -362,21 +362,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) struct e1000_phy_info *phy = &hw->phy; s32 ret_val; - phy->addr = 1; - phy->reset_delay_us = 100; - - phy->ops.set_page = e1000_set_page_igp; - phy->ops.read_reg = e1000_read_phy_reg_hv; - phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; - phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; - phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; - phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; - phy->ops.write_reg = e1000_write_phy_reg_hv; - phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; - phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; - phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->id = e1000_phy_unknown; @@ -445,11 +445,11 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) s32 ret_val; u16 i = 0; - phy->addr = 1; - phy->reset_delay_us = 100; + phy->addr = 1; + phy->reset_delay_us = 100; - phy->ops.power_up = e1000_power_up_phy_copper; - phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; /* We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again @@ -457,7 +457,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) ret_val = e1000e_determine_phy_address(hw); if (ret_val) { phy->ops.write_reg = e1000e_write_phy_reg_bm; - phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; ret_val = e1000e_determine_phy_address(hw); if (ret_val) { e_dbg("Cannot determine PHY addr. Erroring out\n"); @@ -560,7 +560,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) /* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) { dev_spec->shadow_ram[i].modified = false; - dev_spec->shadow_ram[i].value = 0xFFFF; + dev_spec->shadow_ram[i].value = 0xFFFF; } return 0; @@ -1012,7 +1012,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) hw->dev_spec.ich8lan.eee_lp_ability = 0; if (!link) - return 0; /* No link detected */ + return 0; /* No link detected */ mac->get_link_status = false; @@ -2816,7 +2816,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val = -E1000_ERR_NVM; u8 count = 0; - if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + @@ -2939,7 +2939,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) * write to bank 0 etc. We also need to erase the segment that * is going to be written */ - ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { e_dbg("Could not detect valid bank, assuming bank 0\n"); bank = 0; @@ -4073,7 +4073,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) { u32 reg; u16 data; - u8 retry = 0; + u8 retry = 0; if (hw->phy.type != e1000_phy_igp_3) return; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a27e3bcc3249..ad5f43476924 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1196,7 +1196,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1385,7 +1385,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, l1); goto copydone; - } /* if */ + } /* if */ } for (j = 0; j < PS_PAGE_BUFFERS; j++) { @@ -1800,7 +1800,7 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) u32 rctl, icr = er32(ICR); if (!icr || test_bit(__E1000_DOWN, &adapter->state)) - return IRQ_NONE; /* Not our interrupt */ + return IRQ_NONE; /* Not our interrupt */ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is * not set, then the adapter didn't send an interrupt @@ -2487,7 +2487,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; - case low_latency: /* 50 usec aka 20000 ints/s */ + case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { /* this if handles the TSO accounting */ if (bytes / packets > 8000) @@ -2502,7 +2502,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) retval = lowest_latency; } break; - case bulk_latency: /* 250 usec aka 4000 ints/s */ + case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; @@ -2554,7 +2554,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter) new_itr = 70000; break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = 20000; /* aka hwitr = ~200 */ break; case bulk_latency: new_itr = 4000; @@ -3104,13 +3104,13 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ - rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ - E1000_RCTL_BAM | /* RX All Bcast Pkts */ - E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ - rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ - E1000_RCTL_DPF | /* Allow filtered pause */ - E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. */ @@ -3799,7 +3799,7 @@ void e1000e_reset(struct e1000_adapter *adapter) hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - adapter->max_frame_size)); - fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; break; case e1000_pchlan: @@ -3808,10 +3808,10 @@ void e1000e_reset(struct e1000_adapter *adapter) */ if (adapter->netdev->mtu > ETH_DATA_LEN) { fc->high_water = 0x3500; - fc->low_water = 0x1500; + fc->low_water = 0x1500; } else { fc->high_water = 0x5000; - fc->low_water = 0x3000; + fc->low_water = 0x3000; } fc->refresh_time = 0x1000; break; @@ -4581,7 +4581,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.crcerrs += er32(CRCERRS); adapter->stats.gprc += er32(GPRC); adapter->stats.gorc += er32(GORCL); - er32(GORCH); /* Clear gorc */ + er32(GORCH); /* Clear gorc */ adapter->stats.bprc += er32(BPRC); adapter->stats.mprc += er32(MPRC); adapter->stats.roc += er32(ROC); @@ -4614,7 +4614,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.xofftxc += er32(XOFFTXC); adapter->stats.gptc += er32(GPTC); adapter->stats.gotc += er32(GOTCL); - er32(GOTCH); /* Clear gotc */ + er32(GOTCH); /* Clear gotc */ adapter->stats.rnbc += er32(RNBC); adapter->stats.ruc += er32(RUC); @@ -5106,13 +5106,13 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc = E1000_CONTEXT_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; - context_desc->lower_setup.ip_fields.ipcss = ipcss; - context_desc->lower_setup.ip_fields.ipcso = ipcso; - context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; context_desc->upper_setup.tcp_fields.tucse = 0; - context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); @@ -5363,7 +5363,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) { - struct e1000_hw *hw = &adapter->hw; + struct e1000_hw *hw = &adapter->hw; u16 length, offset; if (vlan_tx_tag_present(skb) && @@ -6259,7 +6259,7 @@ static void e1000_netpoll(struct net_device *netdev) e1000_intr_msi(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); break; - default: /* E1000E_INT_MODE_LEGACY */ + default: /* E1000E_INT_MODE_LEGACY */ disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); @@ -6589,9 +6589,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; /* construct the net_device struct */ - netdev->netdev_ops = &e1000e_netdev_ops; + netdev->netdev_ops = &e1000e_netdev_ops; e1000e_set_ethtool_ops(netdev); - netdev->watchdog_timeo = 5 * HZ; + netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); @@ -7034,7 +7034,6 @@ static void __exit e1000_exit_module(void) } module_exit(e1000_exit_module); - MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 44ddc0a0ee0e..d70a03906ac0 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -117,7 +117,6 @@ static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) u16 data; eecd = er32(EECD); - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); data = 0; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 59c76a6815a0..da2be59505c0 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1583,13 +1583,13 @@ s32 e1000e_check_downshift(struct e1000_hw *hw) case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: - offset = M88E1000_PHY_SPEC_STATUS; - mask = M88E1000_PSSR_DOWNSHIFT; + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: case e1000_phy_igp_3: - offset = IGP01E1000_PHY_LINK_HEALTH; - mask = IGP01E1000_PLHR_SS_DOWNGRADE; + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ @@ -1653,14 +1653,14 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw) if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { - offset = IGP01E1000_PHY_PCS_INIT_REG; - mask = IGP01E1000_PHY_POLARITY_MASK; + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ - offset = IGP01E1000_PHY_PORT_STATUS; - mask = IGP01E1000_PSSR_POLARITY_REVERSED; + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = e1e_rphy(hw, offset, &data); @@ -1900,7 +1900,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val; + s32 ret_val; u16 phy_data; bool link; @@ -2253,7 +2253,7 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) case M88E1011_I_PHY_ID: phy_type = e1000_phy_m88; break; - case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ phy_type = e1000_phy_igp_2; break; case GG82563_E_PHY_ID: @@ -2317,7 +2317,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw) /* If phy_type is valid, break - we found our * PHY address */ - if (phy_type != e1000_phy_unknown) + if (phy_type != e1000_phy_unknown) return 0; usleep_range(1000, 2000); -- cgit v1.2.3 From 603cdca9805e4f8001cf7ffbd8c539c9fa6674ce Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Wed, 1 May 2013 03:48:11 +0000 Subject: e1000e: prevent warning from -Wunused-parameter Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ad5f43476924..77f81cbb601a 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2673,7 +2673,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) } static int e1000_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2699,7 +2699,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, } static int e1000_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; -- cgit v1.2.3 From 6c1d8b96d09ed8852f5bc11c42374be3232374ce Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Thu, 2 May 2013 02:57:44 +0000 Subject: e1000e: Release mutex lock only if it has been initially acquired This patch fixes the issue of unlocking swflag_mutex for 82574 and 82583 devices regardless of if the hw semaphore has been successfully acquired via e1000_get_hw_semaphore_82574(). With this patch, unlocking mutex now depends on if the hw semaphore was successfully acquired before. And 82574/82583 devices are reset regardless of whether e1000_get_hw_semaphore_82574() returns success or failure. Reported-by: Alexey Khoroshilov Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/82571.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index bef2f103ab03..4c303e2a7cb3 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1003,8 +1003,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) default: break; } - if (ret_val) - e_dbg("Cannot acquire MDIO ownership\n"); ctrl = er32(CTRL); @@ -1015,7 +1013,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82574: case e1000_82583: - e1000_put_hw_semaphore_82574(hw); + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); break; default: break; -- cgit v1.2.3 From cf7ed221714c36848b257311b316452e274f7e15 Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Fri, 29 Mar 2013 08:22:25 +0000 Subject: igb: Changed LEDs blink mechanism to include designs using cathode This patch addresses the changes needed to make LEDs work properly with negative logic. This implementation uses LED Invert bit to reverse the logic issue that occurred when LEDs are driven by cathode. Keep LEDs blinking for SerDes devices. Also made changes to magic number and the for loop to reduce number of shifts. Signed-off-by: Akeem G Abodunrin Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_defines.h | 6 +++-- drivers/net/ethernet/intel/igb/e1000_mac.c | 37 +++++++++++++++++++------- 2 files changed, 32 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 31a0f82cc650..aa176490ddbc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -270,8 +270,10 @@ #define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX /* LED Control */ -#define E1000_LEDCTL_LED0_MODE_SHIFT 0 -#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 #define E1000_LEDCTL_MODE_LED_ON 0xE #define E1000_LEDCTL_MODE_LED_OFF 0xF diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 2559d70a2321..660b7addfbb0 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1406,15 +1406,34 @@ s32 igb_blink_led(struct e1000_hw *hw) u32 ledctl_blink = 0; u32 i; - /* set the blink bit for each LED that's "on" (0x0E) - * in ledctl_mode2 - */ - ledctl_blink = hw->mac.ledctl_mode2; - for (i = 0; i < 4; i++) - if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == - E1000_LEDCTL_MODE_LED_ON) - ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << - (i * 8)); + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } wr32(E1000_LEDCTL, ledctl_blink); -- cgit v1.2.3 From 20a48412281732ddb75e0ac7d9e0b5406f1b6669 Mon Sep 17 00:00:00 2001 From: Matthew Vick Date: Wed, 24 Apr 2013 07:42:06 +0000 Subject: igb: Add update to last_rx_timestamp in Rx rings In order to support a more accurate check for a PTP Rx hang where the device can no longer timestamp received packets, we need to update, per ring, when the last Rx timestamp was. Because of how the PTP Rx hang logic works, the current logic is valid, but properly updating the ring variable increases the accuracy of the check. Signed-off-by: Matthew Vick Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 9 +++++++-- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9d6c075e232d..a90570755ddc 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -514,13 +514,18 @@ extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, struct sk_buff *skb); -static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, +static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) - igb_ptp_rx_rgtstamp(q_vector, skb); + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; } extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 64cbe0dfe043..2c8f7e04b02b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6622,7 +6622,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring, igb_rx_checksum(rx_ring, rx_desc, skb); - igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); + igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { -- cgit v1.2.3 From 641ac5c0cd46919dc9be4c933f95edae1e4e4163 Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Wed, 24 Apr 2013 16:54:50 +0000 Subject: igb: Support for SFP modules discovery This patch adds support for SFP modules media type discovery for SGMII, which will enable driver to detect supported external PHYs, including 100baseFXSFP module. Signed-off-by: Akeem G Abodunrin Signed-off-by: Carolyn Wyborny Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 120 +++++++++++++++++++++++- drivers/net/ethernet/intel/igb/e1000_defines.h | 30 +++--- drivers/net/ethernet/intel/igb/e1000_hw.h | 2 + drivers/net/ethernet/intel/igb/e1000_phy.c | 124 +++++++++++++++++++++++++ drivers/net/ethernet/intel/igb/e1000_phy.h | 20 ++++ drivers/net/ethernet/intel/igb/igb_ethtool.c | 34 +++---- 6 files changed, 291 insertions(+), 39 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index ff6a17cb1362..f21a91a299a2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -401,12 +401,82 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) return 0; } +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + static s32 igb_get_invariants_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; s32 ret_val; u32 ctrl_ext = 0; + u32 link_mode = 0; switch (hw->device_id) { case E1000_DEV_ID_82575EB_COPPER: @@ -470,15 +540,55 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ hw->phy.media_type = e1000_media_type_copper; dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; ctrl_ext = rd32(E1000_CTRL_EXT); - switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { - case E1000_CTRL_EXT_LINK_MODE_SGMII: - dev_spec->sgmii_active = true; - break; + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: - case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + break; default: break; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index aa176490ddbc..aa201abb8ad2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -61,20 +61,22 @@ /* Clear Interrupt timers after IMS clear */ /* packet buffer parity error detection enabled */ /* descriptor FIFO parity error detection enable */ -#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ -#define E1000_I2CCMD_REG_ADDR_SHIFT 16 -#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 -#define E1000_I2CCMD_OPCODE_READ 0x08000000 -#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 -#define E1000_I2CCMD_READY 0x20000000 -#define E1000_I2CCMD_ERROR 0x80000000 -#define E1000_MAX_SGMII_PHY_REG_ADDR 255 -#define E1000_I2CCMD_PHY_TIMEOUT 200 -#define E1000_IVAR_VALID 0x80 -#define E1000_GPIE_NSICR 0x00000001 -#define E1000_GPIE_MSIX_MODE 0x00000010 -#define E1000_GPIE_EIAME 0x40000000 -#define E1000_GPIE_PBA 0x80000000 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 488abb24a54f..94d7866b9c20 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -528,6 +528,8 @@ struct e1000_dev_spec_82575 { bool global_device_reset; bool eee_disable; bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; }; struct e1000_hw { diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 115b0da6e013..1d6a401cc5d4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -340,6 +340,130 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) return 0; } +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + wr32(E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + wr32(E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return 0; +} + /** * igb_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 784fd1c40989..6a0873f2095a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -69,6 +69,8 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); s32 igb_copper_link_setup_82580(struct e1000_hw *hw); s32 igb_get_phy_info_82580(struct e1000_hw *hw); s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); @@ -157,4 +159,22 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define GS40G_CS_POWER_DOWN 0x0002 #define GS40G_LINE_LB 0x4000 +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + #endif diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7876240fa74e..4e54f847ac92 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -142,6 +142,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; u32 status; if (hw->phy.media_type == e1000_media_type_copper) { @@ -181,30 +183,22 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->phy_address = hw->phy.addr; ecmd->transceiver = XCVR_INTERNAL; } else { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full | - SUPPORTED_FIBRE | + ecmd->supported = (SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause); - if (hw->mac.type == e1000_i354) - ecmd->supported |= SUPPORTED_2500baseX_Full; - ecmd->advertising = ADVERTISED_FIBRE; - - switch (adapter->link_speed) { - case SPEED_2500: - ecmd->advertising = ADVERTISED_2500baseX_Full; - break; - case SPEED_1000: - ecmd->advertising = ADVERTISED_1000baseT_Full; - break; - case SPEED_100: - ecmd->advertising = ADVERTISED_100baseT_Full; - break; - default: - break; + if (hw->mac.type == e1000_i354) { + ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->advertising |= ADVERTISED_2500baseX_Full; + } + if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) { + ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + if (eth_flags->e100_base_fx) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full; } - if (hw->mac.autoneg == 1) ecmd->advertising |= ADVERTISED_Autoneg; -- cgit v1.2.3 From 373e6978f9c5c05af3b3ec4cd0295b0bfbe11644 Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Fri, 29 Mar 2013 15:22:17 +0000 Subject: igb: SerDes flow control setting This path allows users to get appropriate flow control setting on SerDes devices, based on original implementation for Copper devices. Also, since 100baseFX does not support setting flow control, so exclude it from the setting mechanism. Signed-off-by: Akeem G. Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 34 ++++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 4e54f847ac92..7b25ee2e81c0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -164,21 +164,6 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->advertising |= hw->phy.autoneg_advertised; } - if (hw->mac.autoneg != 1) - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - - if (hw->fc.requested_mode == e1000_fc_full) - ecmd->advertising |= ADVERTISED_Pause; - else if (hw->fc.requested_mode == e1000_fc_rx_pause) - ecmd->advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - else if (hw->fc.requested_mode == e1000_fc_tx_pause) - ecmd->advertising |= ADVERTISED_Asym_Pause; - else - ecmd->advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - ecmd->port = PORT_TP; ecmd->phy_address = hw->phy.addr; ecmd->transceiver = XCVR_INTERNAL; @@ -206,6 +191,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->transceiver = XCVR_EXTERNAL; } + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; + else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; + else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + status = rd32(E1000_STATUS); if (status & E1000_STATUS_LU) { @@ -386,6 +386,10 @@ static int igb_set_pauseparam(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; int retval = 0; + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + adapter->fc_autoneg = pause->autoneg; while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) -- cgit v1.2.3 From 2a0a0f1ea27ac39afd25c741a1ccc53bc5530acf Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Thu, 25 Apr 2013 17:22:34 +0000 Subject: igb: Fix set_ethtool function to call update nvm for entire image This patch fixes a problem where we were only checking to update checksum on first part of nvm image. Newer parts have multiple checksum fields and checksum function will accommodate that as long as we call it in the first place for any changes made. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7b25ee2e81c0..85fe7b52f435 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -811,10 +811,8 @@ static int igb_set_eeprom(struct net_device *netdev, ret_val = hw->nvm.ops.write(hw, first_word, last_word - first_word + 1, eeprom_buff); - /* Update the checksum over the first part of the EEPROM if needed - * and flush shadow RAM for 82573 controllers - */ - if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) hw->nvm.ops.update(hw); igb_set_fw_version(adapter); -- cgit v1.2.3 From 41f149a285da21529bc9a0bad323df53b2f17b16 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Tue, 30 Apr 2013 00:21:32 +0000 Subject: igb: Fix possible panic caused by Rx traffic arrival while interface is down This patch reorders disabling napi and irqs during igb_down. This is done to avoid possible panic's found in other Intel drivers when Rx traffic arrives while interface is going down. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 2c8f7e04b02b..6a0c1b66ce54 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1667,10 +1667,13 @@ void igb_down(struct igb_adapter *adapter) wrfl(); msleep(10); - for (i = 0; i < adapter->num_q_vectors; i++) + igb_irq_disable(adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) { + napi_synchronize(&(adapter->q_vector[i]->napi)); napi_disable(&(adapter->q_vector[i]->napi)); + } - igb_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); -- cgit v1.2.3 From 6f8b916065596d80843bb7d4f601ef72b3223c54 Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Wed, 1 May 2013 05:44:45 +0000 Subject: igb: Implementation of i210/i211 LED support This patch fixes LED issues with i210 and i211 devices, due to changes in the device registers. Signed-off-by: Akeem G Abodunrin Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_i210.h | 6 +++--- drivers/net/ethernet/intel/igb/e1000_mac.c | 8 +++++++- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index bfc08e05c907..5caa332e7556 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -82,11 +82,11 @@ enum E1000_INVM_STRUCTURE_TYPE { #define E1000_INVM_MAJOR_SHIFT 4 #define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ - (ID_LED_OFF1_OFF2 << 4) | \ - (ID_LED_DEF1_DEF2)) + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) #define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ - (ID_LED_DEF1_DEF2)) + (ID_LED_OFF1_ON2)) /* NVM offset defaults for i211 device */ #define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 660b7addfbb0..bab556a47fcc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1332,7 +1332,13 @@ s32 igb_id_led_init(struct e1000_hw *hw) u16 data, i, temp; const u16 led_mask = 0x0F; - ret_val = igb_valid_led_default(hw, &data); + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + if (ret_val) goto out; -- cgit v1.2.3 From f7727c53094bd1122351a14a2087585f46009c04 Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Wed, 15 May 2013 07:41:30 +0000 Subject: igb: Removed unused i2c function This patch removes unused i2c function definition. Signed-off-by: Akeem G Abodunrin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index a90570755ddc..15ea8dc9dad3 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -322,11 +322,6 @@ static inline int igb_desc_unused(struct igb_ring *ring) return ring->count + ring->next_to_clean - ring->next_to_use - 1; } -struct igb_i2c_client_list { - struct i2c_client *client; - struct igb_i2c_client_list *next; -}; - #ifdef CONFIG_IGB_HWMON #define IGB_HWMON_TYPE_LOC 0 -- cgit v1.2.3 From e8915bebb4b33c63d6e08161edc3fbda32a840a7 Mon Sep 17 00:00:00 2001 From: Amir Hanania Date: Thu, 18 Apr 2013 04:23:52 +0000 Subject: IXGBE: Set the SW prio_tc values at initialization to the HW setting. Set the SW prio_tc values at initialization to the HW setting. Setting the SW prio_tc default values to be the HW setting by reading the rtrup2tc register. For any TC change we need to reset the device. This will remove the need to reset the device at the first time we call ixgbe_dcbnl_ieee_setets. Signed-off-by: Amir Hanania Tested-by: Jack Morgan Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 23 ++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 3 +++ 4 files changed, 29 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 1f2c805684dd..e055e000131b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -380,3 +380,26 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, } return 0; } + +static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); + return; +} + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_dcb_read_rtrup2tc_82599(hw, map); + break; + default: + break; + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 1634de8b627f..fc0a2dd52499 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -159,6 +159,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + /* DCB definitions for credit calculation */ #define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ #define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index a4ef07631d1e..d71d9ce3e394 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -45,6 +45,7 @@ /* Receive UP2TC mapping */ #define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK 7 /* Transmit UP2TC mapping */ #define IXGBE_RTTUP2TC_UP_SHIFT 3 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index f3d68f9696ba..edd89a1ef27f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -554,6 +554,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) adapter->ixgbe_ieee_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ + ixgbe_dcb_read_rtrup2tc(&adapter->hw, + adapter->ixgbe_ieee_ets->prio_tc); } for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { -- cgit v1.2.3 From 61c951503be1935f24760ae6c43483c5c9529646 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:06 +0000 Subject: net/ethernet/silan/sc92031: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/silan/sc92031.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index 28f7268f1b88..5eb933c97bba 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1578,19 +1578,7 @@ static struct pci_driver sc92031_pci_driver = { .resume = sc92031_resume, }; -static int __init sc92031_init(void) -{ - return pci_register_driver(&sc92031_pci_driver); -} - -static void __exit sc92031_exit(void) -{ - pci_unregister_driver(&sc92031_pci_driver); -} - -module_init(sc92031_init); -module_exit(sc92031_exit); - +module_pci_driver(sc92031_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cesar Eduardo Barros "); MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); -- cgit v1.2.3 From 2c21d6c98813829fad79b17460fd289109a77099 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:07 +0000 Subject: net/ethernet/atheros/atl1c/atl1c_main: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 0ba900762b13..786a87483298 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2755,27 +2755,4 @@ static struct pci_driver atl1c_driver = { .driver.pm = &atl1c_pm_ops, }; -/** - * atl1c_init_module - Driver Registration Routine - * - * atl1c_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1c_init_module(void) -{ - return pci_register_driver(&atl1c_driver); -} - -/** - * atl1c_exit_module - Driver Exit Cleanup Routine - * - * atl1c_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1c_exit_module(void) -{ - pci_unregister_driver(&atl1c_driver); -} - -module_init(atl1c_init_module); -module_exit(atl1c_exit_module); +module_pci_driver(atl1c_driver); -- cgit v1.2.3 From 687df5d489cd1ddf7d2d231bd824f6b687bfb26b Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:08 +0000 Subject: net/ethernet/atheros/atl1e/atl1e_main: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 0688bb82b442..895f5377ad1b 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2489,27 +2489,4 @@ static struct pci_driver atl1e_driver = { .err_handler = &atl1e_err_handler }; -/** - * atl1e_init_module - Driver Registration Routine - * - * atl1e_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1e_init_module(void) -{ - return pci_register_driver(&atl1e_driver); -} - -/** - * atl1e_exit_module - Driver Exit Cleanup Routine - * - * atl1e_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1e_exit_module(void) -{ - pci_unregister_driver(&atl1e_driver); -} - -module_init(atl1e_init_module); -module_exit(atl1e_exit_module); +module_pci_driver(atl1e_driver); -- cgit v1.2.3 From c996f4e8f053ddab28fa196e5b18bdd93e8ee3a2 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:09 +0000 Subject: net/ethernet/atheros/atlx/atl1: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atlx/atl1.c | 27 ++------------------------- 1 file changed, 2 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index fa0915f3999b..538211d6f7d9 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -3145,31 +3145,6 @@ static struct pci_driver atl1_driver = { .driver.pm = &atl1_pm_ops, }; -/** - * atl1_exit_module - Driver Exit Cleanup Routine - * - * atl1_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit atl1_exit_module(void) -{ - pci_unregister_driver(&atl1_driver); -} - -/** - * atl1_init_module - Driver Registration Routine - * - * atl1_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init atl1_init_module(void) -{ - return pci_register_driver(&atl1_driver); -} - -module_init(atl1_init_module); -module_exit(atl1_exit_module); - struct atl1_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -3705,3 +3680,5 @@ static const struct ethtool_ops atl1_ethtool_ops = { .get_ethtool_stats = atl1_get_ethtool_stats, .get_sset_count = atl1_get_sset_count, }; + +module_pci_driver(atl1_driver); -- cgit v1.2.3 From 52a58f9df12ab03bda432b398a4bdfcc9ed2b414 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:10 +0000 Subject: net/ethernet/sis/sis190: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/sis/sis190.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 9a9c379420d1..02df0894690d 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1934,15 +1934,4 @@ static struct pci_driver sis190_pci_driver = { .remove = sis190_remove_one, }; -static int __init sis190_init_module(void) -{ - return pci_register_driver(&sis190_pci_driver); -} - -static void __exit sis190_cleanup_module(void) -{ - pci_unregister_driver(&sis190_pci_driver); -} - -module_init(sis190_init_module); -module_exit(sis190_cleanup_module); +module_pci_driver(sis190_pci_driver); -- cgit v1.2.3 From 31d60ebfbe6792068e8951741f92f40bdea97837 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:11 +0000 Subject: net/ethernet/dec/tulip/xircom_cb: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Reviewed-by: Grant Grundler Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/xircom_cb.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c index cdbcd1643141..9b84cb04fe5f 100644 --- a/drivers/net/ethernet/dec/tulip/xircom_cb.c +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -1171,16 +1171,4 @@ investigate_write_descriptor(struct net_device *dev, } } -static int __init xircom_init(void) -{ - return pci_register_driver(&xircom_ops); -} - -static void __exit xircom_exit(void) -{ - pci_unregister_driver(&xircom_ops); -} - -module_init(xircom_init) -module_exit(xircom_exit) - +module_pci_driver(xircom_ops); -- cgit v1.2.3 From b6f5721087e72d11ff3daab565afddc5918c024b Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:12 +0000 Subject: net/ethernet/toshiba/tc35815: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/toshiba/tc35815.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index fe256094db35..a971b9cca564 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -2209,18 +2209,6 @@ MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); module_param_named(duplex, options.duplex, int, 0); MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); -static int __init tc35815_init_module(void) -{ - return pci_register_driver(&tc35815_pci_driver); -} - -static void __exit tc35815_cleanup_module(void) -{ - pci_unregister_driver(&tc35815_pci_driver); -} - -module_init(tc35815_init_module); -module_exit(tc35815_cleanup_module); - +module_pci_driver(tc35815_pci_driver); MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 43d426c3f938b9ac21f452fcd22956ed2f526f4f Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:13 +0000 Subject: net/ethernet/icplus/ipg: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/icplus/ipg.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index 068d78151658..1fde90b96685 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -2298,15 +2298,4 @@ static struct pci_driver ipg_pci_driver = { .remove = ipg_remove, }; -static int __init ipg_init_module(void) -{ - return pci_register_driver(&ipg_pci_driver); -} - -static void __exit ipg_exit_module(void) -{ - pci_unregister_driver(&ipg_pci_driver); -} - -module_init(ipg_init_module); -module_exit(ipg_exit_module); +module_pci_driver(ipg_pci_driver); -- cgit v1.2.3 From e6d20fca76cfe4a36f8dbaac6b3847e6dad63e4c Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:42:14 +0000 Subject: net/ethernet/alteon/acenic: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/alteon/acenic.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index b7894f8af9d1..219be1bf3cfc 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -702,19 +702,6 @@ static struct pci_driver acenic_pci_driver = { .remove = acenic_remove_one, }; -static int __init acenic_init(void) -{ - return pci_register_driver(&acenic_pci_driver); -} - -static void __exit acenic_exit(void) -{ - pci_unregister_driver(&acenic_pci_driver); -} - -module_init(acenic_init); -module_exit(acenic_exit); - static void ace_free_descriptors(struct net_device *dev) { struct ace_private *ap = netdev_priv(dev); @@ -3199,3 +3186,5 @@ static int read_eeprom_byte(struct net_device *dev, unsigned long offset) ap->name, offset); goto out; } + +module_pci_driver(acenic_pci_driver); -- cgit v1.2.3 From 5a4123f39ef14fc7c043f1611ed3b20b69325146 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:58:05 +0000 Subject: net/ethernet/broadcom/bnx2: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 5d204492c603..1a1b23eb13dc 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8764,18 +8764,4 @@ static struct pci_driver bnx2_pci_driver = { .err_handler = &bnx2_err_handler, }; -static int __init bnx2_init(void) -{ - return pci_register_driver(&bnx2_pci_driver); -} - -static void __exit bnx2_cleanup(void) -{ - pci_unregister_driver(&bnx2_pci_driver); -} - -module_init(bnx2_init); -module_exit(bnx2_cleanup); - - - +module_pci_driver(bnx2_pci_driver); -- cgit v1.2.3 From 8dbb0dc2cb16096efe998c576170e41dc6e8b64f Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:58:06 +0000 Subject: net/ethernet/broadcom/tg3: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Acked-by: Nithin Nayak Sujir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index fb06aa120293..4f0e3fe99e17 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17804,15 +17804,4 @@ static struct pci_driver tg3_driver = { .driver.pm = &tg3_pm_ops, }; -static int __init tg3_init(void) -{ - return pci_register_driver(&tg3_driver); -} - -static void __exit tg3_cleanup(void) -{ - pci_unregister_driver(&tg3_driver); -} - -module_init(tg3_init); -module_exit(tg3_cleanup); +module_pci_driver(tg3_driver); -- cgit v1.2.3 From e0fc4441f98e19d8120f2a385c9e75bc55369b81 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:58:07 +0000 Subject: net/ethernet/sgi/ioc3-eth: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/sgi/ioc3-eth.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 7ed08c32a9c5..ffa78432164d 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -1398,16 +1398,6 @@ static struct pci_driver ioc3_driver = { .remove = ioc3_remove_one, }; -static int __init ioc3_init_module(void) -{ - return pci_register_driver(&ioc3_driver); -} - -static void __exit ioc3_cleanup_module(void) -{ - pci_unregister_driver(&ioc3_driver); -} - static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long data; @@ -1677,9 +1667,7 @@ static void ioc3_set_multicast_list(struct net_device *dev) netif_wake_queue(dev); /* Let us get going again. */ } +module_pci_driver(ioc3_driver); MODULE_AUTHOR("Ralf Baechle "); MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); MODULE_LICENSE("GPL"); - -module_init(ioc3_init_module); -module_exit(ioc3_cleanup_module); -- cgit v1.2.3 From 70a611debe964ae508d67528d82ca7fa0715f5a3 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:58:08 +0000 Subject: net/ethernet/qlogic/qlge/qlge_main: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 50235d201592..e04d471acb10 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4945,15 +4945,4 @@ static struct pci_driver qlge_driver = { .err_handler = &qlge_err_handler }; -static int __init qlge_init_module(void) -{ - return pci_register_driver(&qlge_driver); -} - -static void __exit qlge_exit(void) -{ - pci_unregister_driver(&qlge_driver); -} - -module_init(qlge_init_module); -module_exit(qlge_exit); +module_pci_driver(qlge_driver); -- cgit v1.2.3 From 5119ad0b7388641815bad0bacdc5133532d24685 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:58:09 +0000 Subject: net/ethernet/sun/sungem: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sungem.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 5f3f9d52757d..e62df2b81302 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -3028,15 +3028,4 @@ static struct pci_driver gem_driver = { #endif /* CONFIG_PM */ }; -static int __init gem_init(void) -{ - return pci_register_driver(&gem_driver); -} - -static void __exit gem_cleanup(void) -{ - pci_unregister_driver(&gem_driver); -} - -module_init(gem_init); -module_exit(gem_cleanup); +module_pci_driver(gem_driver); -- cgit v1.2.3 From a46e6ccdd1cd557cbb9a24022ee027d09a4bd625 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 12:58:10 +0000 Subject: net/ethernet/amd/amd8111e: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 8e6b665a6726..bc71aec1159d 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1981,15 +1981,4 @@ static struct pci_driver amd8111e_driver = { .resume = amd8111e_resume }; -static int __init amd8111e_init(void) -{ - return pci_register_driver(&amd8111e_driver); -} - -static void __exit amd8111e_cleanup(void) -{ - pci_unregister_driver(&amd8111e_driver); -} - -module_init(amd8111e_init); -module_exit(amd8111e_cleanup); +module_pci_driver(amd8111e_driver); -- cgit v1.2.3 From 65c21912b5b977f32a12336e87fe32f36105b981 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 13:42:55 +0000 Subject: net/ethernet/chelsio/cxgb/cxgb2: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. The name of the pci_driver struct had to be changed in order to prevent a build failure. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 9624cfe7df57..d7048db9863d 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -1351,22 +1351,11 @@ static void remove_one(struct pci_dev *pdev) t1_sw_reset(pdev); } -static struct pci_driver driver = { +static struct pci_driver cxgb_pci_driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = remove_one, }; -static int __init t1_init_module(void) -{ - return pci_register_driver(&driver); -} - -static void __exit t1_cleanup_module(void) -{ - pci_unregister_driver(&driver); -} - -module_init(t1_init_module); -module_exit(t1_cleanup_module); +module_pci_driver(cxgb_pci_driver); -- cgit v1.2.3 From 4f45c40f2c018e5c278988521a74fd5ee06fb819 Mon Sep 17 00:00:00 2001 From: Peter Hüwe Date: Tue, 21 May 2013 13:42:56 +0000 Subject: net/ethernet/nvidia/forcedeth: Use module_pci_driver to register driver Removing some boilerplate by using module_pci_driver instead of calling register and unregister in the otherwise empty init/exit functions. The name of the pci_driver struct had to be changed in order to prevent a build failure. Signed-off-by: Peter Huewe Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index b003fe53c8e2..098b96dad66f 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6340,7 +6340,7 @@ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { {0,}, }; -static struct pci_driver driver = { +static struct pci_driver forcedeth_pci_driver = { .name = DRV_NAME, .id_table = pci_tbl, .probe = nv_probe, @@ -6349,16 +6349,6 @@ static struct pci_driver driver = { .driver.pm = NV_PM_OPS, }; -static int __init init_nic(void) -{ - return pci_register_driver(&driver); -} - -static void __exit exit_nic(void) -{ - pci_unregister_driver(&driver); -} - module_param(max_interrupt_work, int, 0); MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); module_param(optimization_mode, int, 0); @@ -6379,11 +6369,8 @@ module_param(debug_tx_timeout, bool, 0); MODULE_PARM_DESC(debug_tx_timeout, "Dump tx related registers and ring when tx_timeout happens"); +module_pci_driver(forcedeth_pci_driver); MODULE_AUTHOR("Manfred Spraul "); MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); MODULE_LICENSE("GPL"); - MODULE_DEVICE_TABLE(pci, pci_tbl); - -module_init(init_nic); -module_exit(exit_nic); -- cgit v1.2.3 From f1238261891bea67da845688b7684a3444c61bd9 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 21 May 2013 00:22:37 +0000 Subject: net: smsc911x: don't artificially limit build Currently the SMSC911X driver may only be built for a specific set of architectures, being limited to do so by a Kconfig depends line. This means that if a platform wishes to use the driver, its architecture must be added to the list explicitly, introducing pointless churn. This may have been due to the driver's use of the {read,write}s{b,w,l} functions, which have since been replaced with the more standard io{read,write}{8,16,32}_rep. We can instead depend on HAS_IOMEM, which should prevent build issues while allowing the driver to be built for currently unlisted architectures, including x86 and arm64. This patch removes the explicit list of architectures from the driver's depend line, and replaces it with a dependency on HAS_IOMEM. Signed-off-by: Mark Rutland Cc: David S. Miller Cc: Arnd Bergmann Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index bb4c1674ff99..ff9e99474039 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -97,7 +97,7 @@ config SMC911X config SMSC911X tristate "SMSC LAN911x/LAN921x families embedded ethernet support" - depends on (ARM || SUPERH || BLACKFIN || MIPS || MN10300) + depends on HAS_IOMEM select CRC32 select NET_CORE select MII -- cgit v1.2.3 From 9313eb4be3220054c07a7c34c38764ea1ebcd220 Mon Sep 17 00:00:00 2001 From: Jay Fenlason Date: Tue, 21 May 2013 04:21:28 +0000 Subject: cxgb3: Fix warning about using rcu_dereference when not in a rcu-locked section It is about using rcu_dereference() when not in a rcu-locked section. It only happens on initialization hence fix the initialization to not rcu_dereference() Signed-off-by: Jay Fenlason Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 0c96e5fe99cc..4058b856eb71 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1246,6 +1246,7 @@ int cxgb3_offload_activate(struct adapter *adapter) struct tid_range stid_range, tid_range; struct mtutab mtutab; unsigned int l2t_capacity; + struct l2t_data *l2td; t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) @@ -1261,8 +1262,8 @@ int cxgb3_offload_activate(struct adapter *adapter) goto out_free; err = -ENOMEM; - RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); - if (!L2DATA(dev)) + l2td = t3_init_l2t(l2t_capacity); + if (!l2td) goto out_free; natids = min(tid_range.num / 2, MAX_ATIDS); @@ -1279,6 +1280,7 @@ int cxgb3_offload_activate(struct adapter *adapter) INIT_LIST_HEAD(&t->list_node); t->dev = dev; + RCU_INIT_POINTER(dev->l2opt, l2td); T3C_DATA(dev) = t; dev->recv = process_rx; dev->neigh_update = t3_l2t_update; @@ -1294,8 +1296,7 @@ int cxgb3_offload_activate(struct adapter *adapter) return 0; out_free_l2t: - t3_free_l2t(L2DATA(dev)); - RCU_INIT_POINTER(dev->l2opt, NULL); + t3_free_l2t(l2td); out_free: kfree(t); return err; -- cgit v1.2.3 From f83331bab149e29fa2c49cf102c0cd8c3f1ce9f9 Mon Sep 17 00:00:00 2001 From: Santosh Rastapur Date: Tue, 21 May 2013 04:21:29 +0000 Subject: cxgb3: Check and handle the dma mapping errors This patch adds checks at approprate places whether *dma_map*() call has succeeded or not. Signed-off-by: Santosh Rastapur Reviewed-by: Jay Fenlason Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/sge.c | 105 ++++++++++++++++++++++++------- 1 file changed, 82 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index f12e6b85a653..2fd773e267dc 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { + __free_pages(q->pg_chunk.page, order); + q->pg_chunk.page = NULL; + return -EIO; + } q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -949,40 +954,75 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) return flits_to_desc(flits); } + +/* map_skb - map a packet main body and its page fragments + * @pdev: the PCI device + * @skb: the packet + * @addr: placeholder to save the mapped addresses + * + * map the main body of an sk_buff and its page fragments, if any. + */ +static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), + DMA_TO_DEVICE); + if (pci_dma_mapping_error(pdev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), + DMA_TO_DEVICE); + + pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); +out_err: + return -ENOMEM; +} + /** - * make_sgl - populate a scatter/gather list for a packet + * write_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL - * @pdev: the PCI device + * @addr: the list of the mapped addresses * - * Generates a scatter/gather list for the buffers that make up a packet + * Copies the scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ -static inline unsigned int make_sgl(const struct sk_buff *skb, +static inline unsigned int write_sgl(const struct sk_buff *skb, struct sg_ent *sgp, unsigned char *start, - unsigned int len, struct pci_dev *pdev) + unsigned int len, const dma_addr_t *addr) { - dma_addr_t mapping; - unsigned int i, j = 0, nfrags; + unsigned int i, j = 0, k = 0, nfrags; if (len) { - mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); - sgp->addr[0] = cpu_to_be64(mapping); - j = 1; + sgp->addr[j++] = cpu_to_be64(addr[k++]); } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); - sgp->addr[j] = cpu_to_be64(mapping); + sgp->addr[j] = cpu_to_be64(addr[k++]); j ^= 1; if (j == 0) ++sgp; @@ -1138,7 +1178,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, - unsigned int compl) + unsigned int compl, const dma_addr_t *addr) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; @@ -1196,7 +1236,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); + sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), @@ -1227,6 +1267,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* * The chip min packet length is 9 octets but play safe and reject @@ -1255,6 +1296,11 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); @@ -1312,7 +1358,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!skb_shared(skb))) skb_orphan(skb); - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } @@ -1578,7 +1624,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc) + unsigned int gen, unsigned int ndesc, + const dma_addr_t *addr) { unsigned int sgl_flits, flits; struct work_request_hdr *from; @@ -1599,9 +1646,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), skb->tail - skb->transport_header, - adap->pdev); + addr); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1659,6 +1706,11 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); goto again; } + if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { + spin_unlock(&q->lock); + return NET_XMIT_SUCCESS; + } + gen = q->gen; q->in_use += ndesc; pidx = q->pidx; @@ -1669,7 +1721,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); } spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } @@ -1687,6 +1739,7 @@ static void restart_offloadq(unsigned long data) struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; + unsigned int written = 0; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); @@ -1706,10 +1759,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); break; } + if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) + break; + gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; + written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; @@ -1717,7 +1774,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc, + (dma_addr_t *)skb->head); spin_lock(&q->lock); } spin_unlock(&q->lock); @@ -1727,8 +1785,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + if (likely(written)) + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** -- cgit v1.2.3 From ffed61e6fd3a405e79f4d267f22acaee3267fb37 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 21 May 2013 05:44:26 +0000 Subject: fec: Use DIV_ROUND_UP macro Use the standard DIV_ROUND_UP macro in order to provide better readability. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2f4b3e028012..6b5dc58c56d8 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -242,7 +242,7 @@ static void *swap_buffer(void *bufaddr, int len) int i; unsigned int *buf = bufaddr; - for (i = 0; i < (len + 3) / 4; i++, buf++) + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) *buf = cpu_to_be32(*buf); return bufaddr; -- cgit v1.2.3 From 3521b419cc21339537266cb1ab9934b041ca08e9 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 22 May 2013 21:21:49 +0000 Subject: bnx2x: Add Private Flags Support Utilize ethtool's callback `get_priv_flags' - shed more light on the feasibility of devices as storage interfaces. CC: Ben Hutchings Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Acked-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 + .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 49 ++++++++++++++++++---- 2 files changed, 44 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 3dba2a70a00e..3f4291056e80 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2137,6 +2137,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \ + IS_MF_FCOE_AFEX(bp)) /* stuff added to make the code fit 80Col */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ce1a91618677..9e311c4b69f3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1921,6 +1921,19 @@ static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { "link_test (online) " }; +enum { + BNX2X_PRI_FLAG_ISCSI, + BNX2X_PRI_FLAG_FCOE, + BNX2X_PRI_FLAG_STORAGE, + BNX2X_PRI_FLAG_LEN, +}; + +static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "iSCSI offload support", + "FCoE offload support", + "Storage only interface" +}; + static u32 bnx2x_eee_to_adv(u32 eee_adv) { u32 modes = 0; @@ -2978,32 +2991,47 @@ static int bnx2x_num_stat_queues(struct bnx2x *bp) static int bnx2x_get_sset_count(struct net_device *dev, int stringset) { struct bnx2x *bp = netdev_priv(dev); - int i, num_stats; + int i, num_strings = 0; switch (stringset) { case ETH_SS_STATS: if (is_multi(bp)) { - num_stats = bnx2x_num_stat_queues(bp) * - BNX2X_NUM_Q_STATS; + num_strings = bnx2x_num_stat_queues(bp) * + BNX2X_NUM_Q_STATS; } else - num_stats = 0; + num_strings = 0; if (IS_MF_MODE_STAT(bp)) { for (i = 0; i < BNX2X_NUM_STATS; i++) if (IS_FUNC_STAT(i)) - num_stats++; + num_strings++; } else - num_stats += BNX2X_NUM_STATS; + num_strings += BNX2X_NUM_STATS; - return num_stats; + return num_strings; case ETH_SS_TEST: return BNX2X_NUM_TESTS(bp); + case ETH_SS_PRIV_FLAGS: + return BNX2X_PRI_FLAG_LEN; + default: return -EINVAL; } } +static u32 bnx2x_get_private_flags(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = 0; + + flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; + flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; + flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; + + return flags; +} + static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); @@ -3045,6 +3073,12 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) start = 4; memcpy(buf, bnx2x_tests_str_arr + start, ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); + break; + + case ETH_SS_PRIV_FLAGS: + memcpy(buf, bnx2x_private_arr, + ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); + break; } } @@ -3445,6 +3479,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_pauseparam = bnx2x_set_pauseparam, .self_test = bnx2x_self_test, .get_sset_count = bnx2x_get_sset_count, + .get_priv_flags = bnx2x_get_private_flags, .get_strings = bnx2x_get_strings, .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, -- cgit v1.2.3 From 178135c11611c02161c5b64aeba341d777f2d64d Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Wed, 22 May 2013 21:21:50 +0000 Subject: bnx2x: Link-flap avoidance in switch dependent mode As part of the previous unload flow, probed devices will reset the chip in order to clean the remains of the UNDI driver. As a result, it's possible for the FW to toggle the link. This toggling can prove fatal, as long periods without link can cause the filesystem mount to fail as the storage protocol timeouts. This has been observed against particular switches with long link re-establishment time. This patch informs FW during the reset period that the link should not be toggled - the FW will keep it alive until some interface will load and claim the link as its own. Signed-off-by: Dmitry Kravkov Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 10 ++++++++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 2 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 19 ++++++++++++++++--- 4 files changed, 28 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 3f4291056e80..946450d6d988 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1937,6 +1937,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, void bnx2x_update_coalesce(struct bnx2x *bp); int bnx2x_get_cur_phy_idx(struct bnx2x *bp); +bool bnx2x_port_after_undi(struct bnx2x *bp); + static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 6cc5101fa96b..f4d1c08c67d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2183,6 +2183,8 @@ alloc_mem_err: /* send load request to mcp and analyze response */ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) { + u32 param; + /* init fw_seq */ bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & @@ -2195,9 +2197,13 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) DRV_PULSE_SEQ_MASK); BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; + + if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) + param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; + /* load request */ - (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, - DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); /* if mcp fails to respond we must abort */ if (!(*load_code)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 12f00a40cdf0..5ef3f964e544 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1323,6 +1323,8 @@ struct drv_func_mb { #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b4c9dea93a53..649880d2bb1b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9780,6 +9780,21 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) return rc; } +bool bnx2x_port_after_undi(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *entry; + bool val; + + down(&bnx2x_prev_sem); + + entry = bnx2x_prev_path_get_entry(bp); + val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); + + up(&bnx2x_prev_sem); + + return val; +} + static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) { struct bnx2x_prev_path_list *tmp_list; @@ -10036,7 +10051,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp) { int time_counter = 10; u32 rc, fw, hw_lock_reg, hw_lock_val; - struct bnx2x_prev_path_list *prev_list; BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); /* clear hw from errors which may have resulted from an interrupted @@ -10107,8 +10121,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) } /* Mark function if its port was used to boot from SAN */ - prev_list = bnx2x_prev_path_get_entry(bp); - if (prev_list && (prev_list->undi & (1 << BP_PORT(bp)))) + if (bnx2x_port_after_undi(bp)) bp->link_params.feature_config_flags |= FEATURE_CONFIG_BOOT_FROM_SAN; -- cgit v1.2.3 From e68072ef46bcfb609ea16d45f115896a0dfbd3ff Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 22 May 2013 21:21:51 +0000 Subject: bnx2x: Wait for MCP validity during AER During PCIe advanced error recovery, the secondary bus reset will cause FW to reset; This will cause the shared memory between it and the driver to be invalidated. During the driver's recovery flow, the driver should not make any assumption on the validity of that memory and instead re-initialize it. This also removes a redundant re-initialization of a previously initialized mutex. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 649880d2bb1b..7ed9cdfa115e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12760,19 +12760,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) return 0; } -static void bnx2x_eeh_recover(struct bnx2x *bp) -{ - u32 val; - - mutex_init(&bp->port.phy_mutex); - - - val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); - if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) - BNX2X_ERR("BAD MCP validity signature\n"); -} - /** * bnx2x_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -12841,6 +12828,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) if (netif_running(dev)) { BNX2X_ERR("IO slot reset --> driver unload\n"); + + /* MCP should have been reset; Need to wait for validity */ + bnx2x_init_shmem(bp); + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 v; @@ -12899,8 +12890,6 @@ static void bnx2x_io_resume(struct pci_dev *pdev) rtnl_lock(); - bnx2x_eeh_recover(bp); - bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; -- cgit v1.2.3 From 84b6f7456e8b88507dd85f988a9d350eb5af0e46 Mon Sep 17 00:00:00 2001 From: Yaniv Rosner Date: Wed, 22 May 2013 21:21:52 +0000 Subject: bnx2x: Enable `set_phys_id' for all functions In Multi-function mode, all functions should be able to utilize said function; There's no reason why only the link owner should be able to do so. Signed-off-by: Yaniv Rosner Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 9e311c4b69f3..32aa88f520dc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3146,11 +3146,6 @@ static int bnx2x_set_phys_id(struct net_device *dev, return -EAGAIN; } - if (!bp->port.pmf) { - DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n"); - return -EOPNOTSUPP; - } - switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ -- cgit v1.2.3 From ee9c799c231324de681eb21e06d8bf4842768b75 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Wed, 22 May 2013 23:04:55 +0000 Subject: be2net: refactor HW workarounds in be_xmit() Refactor all TX HW workarounds into a separate routine called be_xmit_workarounds(). Signed-off-by: Sathya Perla Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 62 ++++++++++++++++++----------- 1 file changed, 38 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a444110b060f..7892361af0ac 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -842,32 +842,27 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; } -static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, + struct sk_buff *skb) { - return BE3_chip(adapter) && - be_ipv6_exthdr_check(skb); + return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); } -static netdev_tx_t be_xmit(struct sk_buff *skb, - struct net_device *netdev) +static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + bool *skip_hw_vlan) { - struct be_adapter *adapter = netdev_priv(netdev); - struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; - struct be_queue_info *txq = &txo->q; - struct iphdr *ip = NULL; - u32 wrb_cnt = 0, copied = 0; - u32 start = txq->head, eth_hdr_len; - bool dummy_wrb, stopped = false; - bool skip_hw_vlan = false; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; - - eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? - VLAN_ETH_HLEN : ETH_HLEN; + unsigned int eth_hdr_len; + struct iphdr *ip; /* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. */ - if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) { + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? + VLAN_ETH_HLEN : ETH_HLEN; + if (skb->len <= 60 && vlan_tx_tag_present(skb) && + is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } @@ -877,15 +872,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, */ if ((adapter->function_mode & UMC_ENABLED) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - skip_hw_vlan = true; + *skip_hw_vlan = true; /* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. * Manually insert VLAN in pkt. */ if (skb->ip_summed != CHECKSUM_PARTIAL && - vlan_tx_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + vlan_tx_tag_present(skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; } @@ -895,8 +890,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && - (adapter->pvid || adapter->qnq_vid) && - !qnq_async_evt_rcvd(adapter))) + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) goto tx_drop; /* Manual VLAN tag insertion to prevent: @@ -907,11 +902,31 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; } + return skb; +tx_drop: + dev_kfree_skb_any(skb); + return NULL; +} + +static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; + struct be_queue_info *txq = &txo->q; + bool dummy_wrb, stopped = false; + u32 wrb_cnt = 0, copied = 0; + bool skip_hw_vlan = false; + u32 start = txq->head; + + skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + if (!skb) + return NETDEV_TX_OK; + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, @@ -941,7 +956,6 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, txq->head = start; dev_kfree_skb_any(skb); } -tx_drop: return NETDEV_TX_OK; } -- cgit v1.2.3 From 2e460fc07ef0ebadfb593b564a8c7fa6559ac3d7 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Thu, 23 May 2013 11:11:22 +0000 Subject: tg3: Split APE driver state change out of boot reset signature update Unlike the boot signature that needs to be set before every reset, the ape state only needs to be updated to tell the firmware that the driver is now taking/releasing control of the hardware. Move the calls to tg3_ape_driver_state_change() to better, more appropriate places. Also, the firmware does not distinguish between SUSPEND and START states anymore. Remove the SUSPEND case in the switch. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index c15a92d012ef..ae808b03affe 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -965,9 +965,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) event = APE_EVENT_STATUS_STATE_UNLOAD; break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; default: return; } @@ -1739,10 +1736,6 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) break; } } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -1764,9 +1757,6 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) break; } } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -4206,6 +4196,8 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); + return 0; } @@ -11244,6 +11236,9 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_full_lock(tp, 0); + if (init) + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + err = tg3_init_hw(tp, reset_phy); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@ -13360,11 +13355,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, struct tg3 *tp = netdev_priv(dev); bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; - if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && - tg3_power_up(tp)) { - etest->flags |= ETH_TEST_FL_FAILED; - memset(data, 1, sizeof(u64) * TG3_NUM_TEST); - return; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); } memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@ -17670,6 +17667,8 @@ static int tg3_resume(struct device *device) tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); @@ -17804,6 +17803,7 @@ static void tg3_io_resume(struct pci_dev *pdev) goto done; tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, true); if (err) { -- cgit v1.2.3 From 32ba19efc08a4bd63590310f05da5ce4d8773508 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Thu, 23 May 2013 11:11:23 +0000 Subject: tg3: Simplify ring control block setup The current code calls tg3_set_bdinfo() separately on napi0, followed by a loop that does napi1+. Simplify it by setting bdinfo in the loop for all napi contexts. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 79 +++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ae808b03affe..bb1d34c109f5 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -9239,6 +9239,48 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) } } +/* tp->lock is held. */ +static void tg3_tx_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } +} + /* tp->lock is held. */ static void tg3_rings_reset(struct tg3 *tp) { @@ -9315,9 +9357,6 @@ static void tg3_rings_reset(struct tg3 *tp) tw32_tx_mbox(mbox + i * 8, 0); } - txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); @@ -9327,46 +9366,20 @@ static void tg3_rings_reset(struct tg3 *tp) tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff)); - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT, 0); - rxrcb += TG3_BDINFO_SIZE; - } - stblk = HOSTCC_STATBLCK_RING1; for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8; /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - ((tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; } + + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); } static void tg3_setup_rxbd_thresholds(struct tg3 *tp) -- cgit v1.2.3 From 328947ff2609989fefe6dfd1143cc0787a5ec862 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Thu, 23 May 2013 11:11:24 +0000 Subject: tg3: Make tg3_rings_reset() more concise Simplify the rings reset function and increase readability by moving the control block disable code into separate functions. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 81 ++++++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bb1d34c109f5..693c5f23c3ea 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -9239,6 +9239,28 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) } } +/* tp->lock is held. */ +static void tg3_tx_rcbs_disable(struct tg3 *tp) +{ + u32 txrcb, limit; + + /* Disable all transmit rings but the first. */ + if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); +} + /* tp->lock is held. */ static void tg3_tx_rcbs_init(struct tg3 *tp) { @@ -9260,6 +9282,29 @@ static void tg3_tx_rcbs_init(struct tg3 *tp) } } +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) +{ + u32 rxrcb, limit; + + /* Disable all receive return rings but the first. */ + if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5762 || + tg3_flag(tp, 57765_CLASS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); +} + /* tp->lock is held. */ static void tg3_rx_ret_rcbs_init(struct tg3 *tp) { @@ -9285,42 +9330,12 @@ static void tg3_rx_ret_rcbs_init(struct tg3 *tp) static void tg3_rings_reset(struct tg3 *tp) { int i; - u32 stblk, txrcb, rxrcb, limit; + u32 stblk; struct tg3_napi *tnapi = &tp->napi[0]; - /* Disable all transmit rings but the first. */ - if (!tg3_flag(tp, 5705_PLUS)) - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; - else if (tg3_flag(tp, 5717_PLUS)) - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; - else if (tg3_flag(tp, 57765_CLASS) || - tg3_asic_rev(tp) == ASIC_REV_5762) - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; - else - limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; - - for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; - txrcb < limit; txrcb += TG3_BDINFO_SIZE) - tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); - - - /* Disable all receive return rings but the first. */ - if (tg3_flag(tp, 5717_PLUS)) - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; - else if (!tg3_flag(tp, 5705_PLUS)) - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; - else if (tg3_asic_rev(tp) == ASIC_REV_5755 || - tg3_asic_rev(tp) == ASIC_REV_5762 || - tg3_flag(tp, 57765_CLASS)) - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; - else - limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + tg3_tx_rcbs_disable(tp); - for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; - rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) - tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, - BDINFO_FLAGS_DISABLED); + tg3_rx_ret_rcbs_disable(tp); /* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); -- cgit v1.2.3 From 4a5f46f2fef13ecfc1f396d0caf75031a7a15422 Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Thu, 23 May 2013 11:11:25 +0000 Subject: tg3: Use descriptive label names in tg3_start Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 693c5f23c3ea..6ab02d6a0eb3 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11244,7 +11244,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, */ err = tg3_alloc_consistent(tp); if (err) - goto err_out1; + goto out_ints_fini; tg3_napi_init(tp); @@ -11258,7 +11258,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } - goto err_out2; + goto out_napi_fini; } } @@ -11276,7 +11276,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_full_unlock(tp); if (err) - goto err_out3; + goto out_free_irq; if (test_irq && tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); @@ -11287,7 +11287,7 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, tg3_free_rings(tp); tg3_full_unlock(tp); - goto err_out2; + goto out_napi_fini; } if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { @@ -11327,18 +11327,18 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, return 0; -err_out3: +out_free_irq: for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } -err_out2: +out_napi_fini: tg3_napi_disable(tp); tg3_napi_fini(tp); tg3_free_consistent(tp); -err_out1: +out_ints_fini: tg3_ints_fini(tp); return err; -- cgit v1.2.3 From 7c10ee32f32644c9efbb3cfebf5fb7cea4b48a2c Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Thu, 23 May 2013 11:11:26 +0000 Subject: tg3: Fix misplaced empty line Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 6ab02d6a0eb3..bd5917d8ffb5 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1311,8 +1311,8 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) if (err) return err; - if (enable) + if (enable) val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; else val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; -- cgit v1.2.3 From f2068b80ca59d63ca21ee1da829c9fe09f25b08e Mon Sep 17 00:00:00 2001 From: Nithin Sujir Date: Thu, 23 May 2013 11:11:27 +0000 Subject: tg3: Remove unnecessary lock around tg3_flag_set The spinlock was needed when flags used to be a u32 and set/cleared using bit operations. Now that we use the atomic set_bit, this lock isn't needed. Signed-off-by: Nithin Nayak Sujir Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bd5917d8ffb5..d7755d849e53 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6386,9 +6386,7 @@ static void tg3_tx_recover(struct tg3 *tp) "Please report the problem to the driver maintainer " "and include system chipset information.\n"); - spin_lock(&tp->lock); tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); } static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) -- cgit v1.2.3 From a80be5a58ebf29e7ca8a2915debe42295608a20a Mon Sep 17 00:00:00 2001 From: Rajesh Borundia Date: Thu, 23 May 2013 21:04:25 +0000 Subject: qlcnic: Support spoof check config. o Add support for spoof check configuration per VF using iproute2 tool. Signed-off-by: Rajesh Borundia Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 2 ++ .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 1 + .../net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 36 ++++++++++++++++++++++ 4 files changed, 40 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index aeb26a850679..1e196713c7d3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -449,6 +449,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate, .ndo_get_vf_config = qlcnic_sriov_get_vf_config, .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, #endif }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index d85fbb57c25b..9176cb015732 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -129,6 +129,7 @@ struct qlcnic_vport { u8 vlan_mode; u16 vlan; u8 qos; + bool spoofchk; u8 mac[6]; }; @@ -225,6 +226,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int); int qlcnic_sriov_get_vf_config(struct net_device *, int , struct ifla_vf_info *); int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); +int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool); #else static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {} static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 196b2d100407..6262c71bc18f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -187,6 +187,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) } sriov->vf_info[i].vp = vp; vp->max_tx_bw = MAX_BW; + vp->spoofchk = true; random_ether_addr(vp->mac); dev_info(&adapter->pdev->dev, "MAC Address %pM is configured for VF %d\n", diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 1a66ccded235..ee0c1d307966 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -580,6 +580,7 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) struct qlcnic_cmd_args cmd; struct qlcnic_vport *vp; int err, id; + u8 *mac; id = qlcnic_sriov_func_to_index(adapter, func); if (id < 0) @@ -591,6 +592,14 @@ static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) return err; cmd.req.arg[1] = 0x3 | func << 16; + if (vp->spoofchk == true) { + mac = vp->mac; + cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8; + cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 | + mac[2] << 24; + cmd.req.arg[5] = mac[1] | mac[0] << 8; + } + if (vp->vlan_mode == QLC_PVID_MODE) { cmd.req.arg[2] |= BIT_6; cmd.req.arg[3] |= vp->vlan << 8; @@ -1767,6 +1776,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev, memcpy(&ivi->mac, vp->mac, ETH_ALEN); ivi->vlan = vp->vlan; ivi->qos = vp->qos; + ivi->spoofchk = vp->spoofchk; if (vp->max_tx_bw == MAX_BW) ivi->tx_rate = 0; else @@ -1775,3 +1785,29 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev, ivi->vf = vf; return 0; } + +int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + struct qlcnic_vport *vp; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + vf_info = &sriov->vf_info[vf]; + vp = vf_info->vp; + if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { + netdev_err(netdev, + "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", + vf); + return -EOPNOTSUPP; + } + + vp->spoofchk = chk; + return 0; +} -- cgit v1.2.3 From 45ef92ed5815883087c984356a85aa3efdb5bcab Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 23 May 2013 21:04:26 +0000 Subject: qlcnic: Disable INT-x interrupt for 83xx on driver unload o Set HW mask for 8300 Series adapter, in INT-x mode, to stop generating interrupt during driver unload. Signed-off-by: Himanshu Madhani Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index b4ff1e35a11d..9b91d6a231cb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -312,6 +312,11 @@ inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter) writel(0, adapter->tgt_mask_reg); } +inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter) +{ + writel(1, adapter->tgt_mask_reg); +} + /* Enable MSI-x and INT-x interrupts */ void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter, struct qlcnic_host_sds_ring *sds_ring) @@ -458,6 +463,9 @@ void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) { u32 num_msix; + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_set_legacy_intr_mask(adapter); + qlcnic_83xx_disable_mbx_intr(adapter); if (adapter->flags & QLCNIC_MSIX_ENABLED) -- cgit v1.2.3 From aa2a80340ce0b4a148ae8878142d17bc793ba2ab Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 23 May 2013 21:04:27 +0000 Subject: qlcnic: Update IRQ name for 8200 and 8300 Series adapter. o Updated IRQ name for 8200 and 8300 Series adapter as per format used by other multiqueue drivers. Signed-off-by: Himanshu Madhani Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 5 +---- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 23 ++++++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 9b91d6a231cb..9f5683fda801 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -482,7 +482,6 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) { irq_handler_t handler; u32 val; - char name[32]; int err = 0; unsigned long flags = 0; @@ -493,9 +492,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) if (adapter->flags & QLCNIC_MSIX_ENABLED) { handler = qlcnic_83xx_handle_aen; val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector; - snprintf(name, (IFNAMSIZ + 4), - "%s[%s]", "qlcnic", "aen"); - err = request_irq(val, handler, flags, name, adapter); + err = request_irq(val, handler, flags, "qlcnic-MB", adapter); if (err) { dev_err(&adapter->pdev->dev, "failed to register MBX interrupt\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1e196713c7d3..76b95b48c815 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1396,16 +1396,23 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) for (ring = 0; ring < num_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_82xx_check(adapter) && - (ring == (num_sds_rings - 1))) + (ring == (num_sds_rings - 1))) { + if (!(adapter->flags & + QLCNIC_MSIX_ENABLED)) + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "qlcnic"); + else + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "%s-tx-0-rx-%d", + netdev->name, ring); + } else { snprintf(sds_ring->name, sizeof(sds_ring->name), - "qlcnic-%s[Tx0+Rx%d]", - netdev->name, ring); - else - snprintf(sds_ring->name, - sizeof(sds_ring->name), - "qlcnic-%s[Rx%d]", + "%s-rx-%d", netdev->name, ring); + } err = request_irq(sds_ring->irq, handler, flags, sds_ring->name, sds_ring); if (err) @@ -1420,7 +1427,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter) ring++) { tx_ring = &adapter->tx_ring[ring]; snprintf(tx_ring->name, sizeof(tx_ring->name), - "qlcnic-%s[Tx%d]", netdev->name, ring); + "%s-tx-%d", netdev->name, ring); err = request_irq(tx_ring->irq, handler, flags, tx_ring->name, tx_ring); if (err) -- cgit v1.2.3 From 7e8fd003c96dda2920a4ba0c32d54780d15dfefc Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 23 May 2013 21:04:28 +0000 Subject: qlcnic: Remove qlcnic_config_npars module parameter qlcnic_config_npars module parameter is used to configure NPAR operational modes where NPAR function is used for PCI passthrough. Removing this paramter as PCI passthrough is not supported for NPAR functions. Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 1 - .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | 21 ++++++------------- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 24 +++++----------------- 3 files changed, 11 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index c1b693cb3df3..60f310a542da 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -303,7 +303,6 @@ extern int qlcnic_use_msi; extern int qlcnic_use_msi_x; extern int qlcnic_auto_fw_reset; extern int qlcnic_load_fw_file; -extern int qlcnic_config_npars; /* Number of status descriptors to handle per interrupt */ #define MAX_STATUS_HANDLE (64) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index b0c3de9ede03..55e5e1b5ad4f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -42,27 +42,18 @@ int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock) static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) { u8 id; - int i, ret = -EBUSY; + int ret = -EBUSY; u32 data = QLCNIC_MGMT_FUNC; struct qlcnic_hardware_context *ahw = adapter->ahw; if (qlcnic_83xx_lock_driver(adapter)) return ret; - if (qlcnic_config_npars) { - for (i = 0; i < ahw->act_pci_func; i++) { - id = adapter->npars[i].pci_func; - if (id == ahw->pci_func) - continue; - data |= qlcnic_config_npars & - QLC_83XX_SET_FUNC_OPMODE(0x3, id); - } - } else { - data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); - data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) | - QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, - ahw->pci_func); - } + id = ahw->pci_func; + data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) | + QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id); + QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data); qlcnic_83xx_unlock_driver(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 76b95b48c815..5b18b0accb06 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -52,10 +52,6 @@ int qlcnic_load_fw_file; MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); -int qlcnic_config_npars; -module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); - static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); static int qlcnic_open(struct net_device *netdev); @@ -769,7 +765,7 @@ static int qlcnic_set_function_modes(struct qlcnic_adapter *adapter) { u8 id; - int i, ret = 1; + int ret; u32 data = QLCNIC_MGMT_FUNC; struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -777,20 +773,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter) if (ret) goto err_lock; - if (qlcnic_config_npars) { - for (i = 0; i < ahw->act_pci_func; i++) { - id = adapter->npars[i].pci_func; - if (id == ahw->pci_func) - continue; - data |= (qlcnic_config_npars & - QLC_DEV_SET_DRV(0xf, id)); - } - } else { - data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); - data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) | - (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, - ahw->pci_func)); - } + id = ahw->pci_func; + data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); + data = (data & ~QLC_DEV_SET_DRV(0xf, id)) | + QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id); QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data); qlcnic_api_unlock(adapter); err_lock: -- cgit v1.2.3 From 1267ff962fc04d33d30b52c5c09201df2b1c1d76 Mon Sep 17 00:00:00 2001 From: Sucheta Chakraborty Date: Thu, 23 May 2013 21:04:29 +0000 Subject: qlcnic: Initialize trans_work and idc_aen_work at VF probe. o work_struct should be initialized before cancel_delayed_work call to destroy it. Signed-off-by: Sucheta Chakraborty Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 6262c71bc18f..bcd200eff981 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -35,6 +35,7 @@ static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *); +static void qlcnic_sriov_process_bc_cmd(struct work_struct *); static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { .read_crb = qlcnic_83xx_read_crb, @@ -179,6 +180,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) spin_lock_init(&vf->rcv_pend.lock); init_completion(&vf->ch_free_cmpl); + INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); + if (qlcnic_sriov_pf_check(adapter)) { vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); if (!vp) { @@ -653,6 +656,8 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) if (qlcnic_read_mac_addr(adapter)) dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); + INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); + clear_bit(__QLCNIC_RESETTING, &adapter->state); return 0; } @@ -865,7 +870,6 @@ static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, vf->adapter->need_fw_reset) return; - INIT_WORK(&vf->trans_work, func); queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); } -- cgit v1.2.3 From b17f2ccaeea10f30322b9706d8d9b5b39d21f427 Mon Sep 17 00:00:00 2001 From: Jitendra Kalsaria Date: Thu, 23 May 2013 21:04:30 +0000 Subject: qlcnic: Convert nested if-else to switch-case Signed-off-by: Jitendra Kalsaria Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index 55e5e1b5ad4f..b5054e1d1710 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -187,20 +187,24 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) else priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, ahw->pci_func); - - if (priv_level == QLCNIC_NON_PRIV_FUNC) { + switch (priv_level) { + case QLCNIC_NON_PRIV_FUNC: ahw->op_mode = QLCNIC_NON_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; - } else if (priv_level == QLCNIC_PRIV_FUNC) { + break; + case QLCNIC_PRIV_FUNC: ahw->op_mode = QLCNIC_PRIV_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; - } else if (priv_level == QLCNIC_MGMT_FUNC) { + break; + case QLCNIC_MGMT_FUNC: ahw->op_mode = QLCNIC_MGMT_FUNC; ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; - } else { + break; + default: + dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); return -EIO; } @@ -209,8 +213,8 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) else adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; - adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; - adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; + ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; + ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; return 0; } -- cgit v1.2.3 From 4690a7e48ac55cf84fcf214279baf133304d71e4 Mon Sep 17 00:00:00 2001 From: Sony Chacko Date: Thu, 23 May 2013 21:04:31 +0000 Subject: qlcnic: diagnostics routine changes Test and set diagnostics mode bit before starting diagnostics tests. Stop diagnostics tests if adapter is resetting. Signed-off-by: Sony Chacko Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 12 ++++++++++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 28 +++++++++++++++------- 2 files changed, 31 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 60f310a542da..8affec5e6eb8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -931,6 +931,7 @@ struct qlcnic_ipaddr { #define __QLCNIC_SRIOV_ENABLE 10 #define __QLCNIC_SRIOV_CAPABLE 11 #define __QLCNIC_MBX_POLL_ENABLE 12 +#define __QLCNIC_DIAG_MODE 13 #define QLCNIC_INTERRUPT_TEST 1 #define QLCNIC_LOOPBACK_TEST 2 @@ -1476,6 +1477,7 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *); /* Functions from qlcnic_ethtool.c */ + int qlcnic_check_loopback_buff(unsigned char *, u8 []); int qlcnic_do_lb_test(struct qlcnic_adapter *, u8); int qlcnic_loopback_test(struct net_device *, u8); @@ -1885,6 +1887,16 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) writel(0xfbff, adapter->tgt_mask_reg); } +static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter) +{ + return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + +static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter) +{ + clear_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_failed_ops; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 9f5683fda801..1e699e6f29c8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1593,16 +1593,24 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) struct qlcnic_hardware_context *ahw = adapter->ahw; int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings; - QLCDB(adapter, DRV, "%s loopback test in progress\n", - mode == QLCNIC_ILB_MODE ? "internal" : "external"); if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { - dev_warn(&adapter->pdev->dev, - "Loopback test not supported for non privilege function\n"); + netdev_warn(netdev, + "Loopback test not supported in non privileged mode\n"); return ret; } - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); return -EBUSY; + } + + if (qlcnic_get_diag_lock(adapter)) { + netdev_info(netdev, "Device is in diagnostics mode\n"); + return -EBUSY; + } + + netdev_info(netdev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, max_sds_rings); @@ -1643,7 +1651,7 @@ free_diag_res: fail_diag_alloc: adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_release_diag_lock(adapter); return ret; } @@ -3118,8 +3126,10 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) u8 val; int ret, max_sds_rings = adapter->max_sds_rings; - if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) - return -EIO; + if (qlcnic_get_diag_lock(adapter)) { + netdev_info(netdev, "Device in diagnostics mode\n"); + return -EBUSY; + } ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, max_sds_rings); @@ -3161,7 +3171,7 @@ done: fail_diag_irq: adapter->max_sds_rings = max_sds_rings; - clear_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_release_diag_lock(adapter); return ret; } -- cgit v1.2.3 From 099907fac61c47a67a02edb85d551e91959ecc5b Mon Sep 17 00:00:00 2001 From: Sony Chacko Date: Thu, 23 May 2013 21:04:32 +0000 Subject: qlcnic: modify reset recovery path in diag mode Provide diagnostics routines enough time to unwind before proceeding with reset recovery. Signed-off-by: Sony Chacko Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 6 +++- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 1 + .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 35 +++++++++++++++++----- 3 files changed, 34 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 8affec5e6eb8..4ff0625a0cef 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1477,7 +1477,6 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *); /* Functions from qlcnic_ethtool.c */ - int qlcnic_check_loopback_buff(unsigned char *, u8 []); int qlcnic_do_lb_test(struct qlcnic_adapter *, u8); int qlcnic_loopback_test(struct net_device *, u8); @@ -1897,6 +1896,11 @@ static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter) clear_bit(__QLCNIC_DIAG_MODE, &adapter->state); } +static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_ops; extern const struct ethtool_ops qlcnic_ethtool_failed_ops; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index f5db67fc9f55..1bfe283a9412 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -314,6 +314,7 @@ struct qlc_83xx_idc { u8 vnic_state; u8 vnic_wait_limit; u8 quiesce_req; + u8 delay_reset; char **name; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 5e7fb1dfb97b..aa26250d7374 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -649,6 +649,7 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) ahw->idc.collect_dump = 0; ahw->reset_context = 0; adapter->tx_timeo_cnt = 0; + ahw->idc.delay_reset = 0; clear_bit(__QLCNIC_RESETTING, &adapter->state); } @@ -883,21 +884,41 @@ static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) int ret = 0; if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { - qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(__QLCNIC_RESETTING, &adapter->state); clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) qlcnic_83xx_disable_vnic_mode(adapter, 1); - qlcnic_83xx_idc_detach_driver(adapter); + + if (qlcnic_check_diag_status(adapter)) { + dev_info(&adapter->pdev->dev, + "%s: Wait for diag completion\n", __func__); + adapter->ahw->idc.delay_reset = 1; + return 0; + } else { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_detach_driver(adapter); + } } - /* Check ACK from other functions */ - ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); - if (ret) { + if (qlcnic_check_diag_status(adapter)) { dev_info(&adapter->pdev->dev, - "%s: Waiting for reset ACK\n", __func__); - return 0; + "%s: Wait for diag completion\n", __func__); + return -1; + } else { + if (adapter->ahw->idc.delay_reset) { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_detach_driver(adapter); + adapter->ahw->idc.delay_reset = 0; + } + + /* Check for ACK from other functions */ + ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); + if (ret) { + dev_info(&adapter->pdev->dev, + "%s: Waiting for reset ACK\n", __func__); + return -1; + } } /* Transit to INIT state and restart the HW */ -- cgit v1.2.3 From 487042af928c16d2a5b4dd52a9639a03b8b40761 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 23 May 2013 21:04:33 +0000 Subject: qlcnic: Implement GET_LED_STATUS command for 82xx adapter. o GET_LED_STATUS command will be used by driver to get current beacon state from 82xx adapter. o Refactored qlcnic_store_beacon() to split 8200 and 8300 specific calls. Signed-off-by: Himanshu Madhani Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 5 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 15 +++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 122 ++++++++++++++-------- 5 files changed, 98 insertions(+), 46 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 4ff0625a0cef..e6f8beccbf3e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -815,6 +815,7 @@ struct qlcnic_mac_list_s { #define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2 #define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 #define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5 +#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -912,6 +913,9 @@ struct qlcnic_ipaddr { #define QLCNIC_IS_TSO_CAPABLE(adapter) \ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) +#define QLCNIC_BEACON_EANBLE 0xC +#define QLCNIC_BEACON_DISABLE 0xD + #define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 #define QLCNIC_PCI_REG_MSIX_TBL 0x44 @@ -1543,6 +1547,7 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); int qlcnic_reset_npar_config(struct qlcnic_adapter *); int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); +int qlcnic_get_beacon_state(struct qlcnic_adapter *, u8 *); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 43562c256379..d63b5f728d75 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -37,6 +37,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_TEMP_SIZE, 4, 4}, {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, {QLCNIC_CMD_SET_DRV_VER, 4, 1}, + {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, }; static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 106a12f2a02f..218978db2963 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -1503,6 +1503,21 @@ int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) return rv; } +int qlcnic_get_beacon_state(struct qlcnic_adapter *adapter, u8 *h_state) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_STATUS); + if (!err) { + err = qlcnic_issue_cmd(adapter, &cmd); + if (!err) + *h_state = cmd.rsp.arg[1]; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) { void __iomem *msix_base_addr; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index b6818f4356b9..812fd07baef3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -87,6 +87,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_CONFIG_VPORT 0x32 #define QLCNIC_CMD_GET_MAC_STATS 0x37 #define QLCNIC_CMD_SET_DRV_VER 0x38 +#define QLCNIC_CMD_GET_LED_STATUS 0x3C #define QLCNIC_CMD_CONFIGURE_RSS 0x41 #define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 #define QLCNIC_CMD_CONFIGURE_LED 0x44 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index e7a2fe21b649..025a7192289d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -114,57 +114,51 @@ static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, return 0; } -static ssize_t qlcnic_store_beacon(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter, + const char *buf, size_t len) { - struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_hardware_context *ahw = adapter->ahw; - int err, max_sds_rings = adapter->max_sds_rings; - u16 beacon; - u8 b_state, b_rate; unsigned long h_beacon; + int err; - if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { - dev_warn(dev, - "LED test not supported in non privileged mode\n"); - return -EOPNOTSUPP; - } + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; - if (qlcnic_83xx_check(adapter) && - !test_bit(__QLCNIC_RESETTING, &adapter->state)) { - if (kstrtoul(buf, 2, &h_beacon)) - return -EINVAL; + if (kstrtoul(buf, 2, &h_beacon)) + return -EINVAL; - if (ahw->beacon_state == h_beacon) - return len; + if (ahw->beacon_state == h_beacon) + return len; - rtnl_lock(); - if (!ahw->beacon_state) { - if (test_and_set_bit(__QLCNIC_LED_ENABLE, - &adapter->state)) { - rtnl_unlock(); - return -EBUSY; - } - } - if (h_beacon) { - err = qlcnic_83xx_config_led(adapter, 1, h_beacon); - if (err) - goto beacon_err; - } else { - err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); - if (err) - goto beacon_err; + rtnl_lock(); + if (!ahw->beacon_state) { + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { + rtnl_unlock(); + return -EBUSY; } - /* set the current beacon state */ + } + + if (h_beacon) + err = qlcnic_83xx_config_led(adapter, 1, h_beacon); + else + err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); + if (!err) ahw->beacon_state = h_beacon; -beacon_err: - if (!ahw->beacon_state) - clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); - rtnl_unlock(); - return len; - } + if (!ahw->beacon_state) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + rtnl_unlock(); + return len; +} + +static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, + const char *buf, size_t len) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, max_sds_rings = adapter->max_sds_rings; + u16 beacon; + u8 h_beacon_state, b_state, b_rate; if (len != sizeof(u16)) return QL_STATUS_INVALID_PARAM; @@ -174,16 +168,29 @@ beacon_err: if (err) return err; - if (adapter->ahw->beacon_state == b_state) + if ((ahw->capabilities2 & QLCNIC_FW_CAPABILITY_2_BEACON)) { + err = qlcnic_get_beacon_state(adapter, &h_beacon_state); + if (!err) { + dev_info(&adapter->pdev->dev, + "Failed to get current beacon state\n"); + } else { + if (h_beacon_state == QLCNIC_BEACON_DISABLE) + ahw->beacon_state = 0; + else if (h_beacon_state == QLCNIC_BEACON_EANBLE) + ahw->beacon_state = 2; + } + } + + if (ahw->beacon_state == b_state) return len; rtnl_lock(); - - if (!adapter->ahw->beacon_state) + if (!ahw->beacon_state) { if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { rtnl_unlock(); return -EBUSY; } + } if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { err = -EIO; @@ -206,14 +213,37 @@ beacon_err: if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) qlcnic_diag_free_res(adapter->netdev, max_sds_rings); - out: - if (!adapter->ahw->beacon_state) +out: + if (!ahw->beacon_state) clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); rtnl_unlock(); return err; } +static ssize_t qlcnic_store_beacon(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int err = 0; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(dev, + "LED test not supported in non privileged mode\n"); + return -EOPNOTSUPP; + } + + if (qlcnic_82xx_check(adapter)) + err = qlcnic_82xx_store_beacon(adapter, buf, len); + else if (qlcnic_83xx_check(adapter)) + err = qlcnic_83xx_store_beacon(adapter, buf, len); + else + return -EIO; + + return err; +} + static ssize_t qlcnic_show_beacon(struct device *dev, struct device_attribute *attr, char *buf) { -- cgit v1.2.3 From 2343f06a8c9549af27b885d36e26c59771d697f1 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 23 May 2013 21:04:34 +0000 Subject: qlcnic: qlcnic_get_board_name() function cleanup Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 37 ++++++++++++++++++------ 1 file changed, 28 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 5b18b0accb06..da82f2eb73b4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -862,6 +862,27 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev, return 0; } +static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, + int index) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned short subsystem_vendor; + bool ret = true; + + subsystem_vendor = pdev->subsystem_vendor; + + if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X || + pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { + if (qlcnic_boards[index].sub_vendor == subsystem_vendor && + qlcnic_boards[index].sub_device == pdev->subsystem_device) + ret = true; + else + ret = false; + } + + return ret; +} + static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) { struct pci_dev *pdev = adapter->pdev; @@ -869,20 +890,18 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { if (qlcnic_boards[i].vendor == pdev->vendor && - qlcnic_boards[i].device == pdev->device && - qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && - qlcnic_boards[i].sub_device == pdev->subsystem_device) { - sprintf(name, "%pM: %s" , - adapter->mac_addr, - qlcnic_boards[i].short_name); - found = 1; - break; + qlcnic_boards[i].device == pdev->device && + qlcnic_validate_subsystem_id(adapter, i)) { + found = 1; + break; } - } if (!found) sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); + else + sprintf(name, "%pM: %s" , adapter->mac_addr, + qlcnic_boards[i].short_name); } static void -- cgit v1.2.3 From ee9e8b6c6e167e2400efd26b92b300ff1d3f8e8b Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 23 May 2013 21:04:35 +0000 Subject: qlcnic: Enhance virtual NIC logging Signed-off-by: Manish Chopra Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 1 + .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 36 +++++++++--------- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 44 +++++++++++----------- 3 files changed, 41 insertions(+), 40 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index e6f8beccbf3e..00a09573faab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -442,6 +442,7 @@ struct qlcnic_hardware_context { u16 max_mtu; u32 msg_enable; u16 act_pci_func; + u16 max_pci_func; u32 capabilities; u32 capabilities2; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 1e699e6f29c8..f63a69570256 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2134,26 +2134,25 @@ out: int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, struct qlcnic_pci_info *pci_info) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; int i, err = 0, j = 0; u32 temp; - struct qlcnic_cmd_args cmd; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); err = qlcnic_issue_cmd(adapter, &cmd); - adapter->ahw->act_pci_func = 0; + ahw->act_pci_func = 0; if (err == QLCNIC_RCODE_SUCCESS) { - pci_info->func_count = cmd.rsp.arg[1] & 0xFF; - dev_info(&adapter->pdev->dev, - "%s: total functions = %d\n", - __func__, pci_info->func_count); + ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF; for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) { pci_info->id = cmd.rsp.arg[i] & 0xFFFF; pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; i++; pci_info->type = cmd.rsp.arg[i] & 0xFFFF; if (pci_info->type == QLCNIC_TYPE_NIC) - adapter->ahw->act_pci_func++; + ahw->act_pci_func++; temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; pci_info->default_port = temp; i++; @@ -2165,18 +2164,21 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, i++; memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); i = i + 3; - - dev_info(&adapter->pdev->dev, "%s:\n" - "\tid = %d active = %d type = %d\n" - "\tport = %d min bw = %d max bw = %d\n" - "\tmac_addr = %pM\n", __func__, - pci_info->id, pci_info->active, pci_info->type, - pci_info->default_port, pci_info->tx_min_bw, - pci_info->tx_max_bw, pci_info->mac); + if (ahw->op_mode == QLCNIC_MGMT_FUNC) + dev_info(dev, "id = %d active = %d type = %d\n" + "\tport = %d min bw = %d max bw = %d\n" + "\tmac_addr = %pM\n", pci_info->id, + pci_info->active, pci_info->type, + pci_info->default_port, + pci_info->tx_min_bw, + pci_info->tx_max_bw, pci_info->mac); } + if (ahw->op_mode == QLCNIC_MGMT_FUNC) + dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n", + ahw->max_pci_func, ahw->act_pci_func); + } else { - dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n", - err); + dev_err(dev, "Failed to get PCI Info, error = %d\n", err); err = -EIO; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index d63b5f728d75..9d0ae11589ce 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -883,11 +883,12 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, /* Configure eSwitch for port mirroring */ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, - u8 enable_mirroring, u8 pci_func) + u8 enable_mirroring, u8 pci_func) { + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; int err = -EIO; u32 arg1; - struct qlcnic_cmd_args cmd; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) @@ -901,13 +902,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, err = qlcnic_issue_cmd(adapter, &cmd); if (err != QLCNIC_RCODE_SUCCESS) - dev_err(&adapter->pdev->dev, - "Failed to configure port mirroring%d on eswitch:%d\n", + dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n", pci_func, id); else - dev_info(&adapter->pdev->dev, - "Configured eSwitch %d for port mirroring:%d\n", - id, pci_func); + dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n", + pci_func, id); qlcnic_free_mbx_args(&cmd); return err; @@ -1122,14 +1121,13 @@ err_ret: return -EIO; } -static int -__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, - u32 *arg1, u32 *arg2) +static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + u32 *arg1, u32 *arg2) { - int err = -EIO; + struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; - u8 pci_func; - pci_func = (*arg1 >> 8); + u8 pci_func = *arg1 >> 8; + int err = -EIO; qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); @@ -1140,12 +1138,11 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, qlcnic_free_mbx_args(&cmd); if (err == QLCNIC_RCODE_SUCCESS) - dev_info(&adapter->pdev->dev, - "eSwitch port config for pci func %d\n", pci_func); + dev_info(dev, "Get eSwitch port config for vNIC function %d\n", + pci_func); else - dev_err(&adapter->pdev->dev, - "Failed to get eswitch port config for pci func %d\n", - pci_func); + dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n", + pci_func); return err; } /* Configure eSwitch port @@ -1158,9 +1155,10 @@ op_type = 1 for port vlan_id int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, struct qlcnic_esw_func_cfg *esw_cfg) { + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; int err = -EIO, index; u32 arg1, arg2 = 0; - struct qlcnic_cmd_args cmd; u8 pci_func; if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) @@ -1217,11 +1215,11 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, qlcnic_free_mbx_args(&cmd); if (err != QLCNIC_RCODE_SUCCESS) - dev_err(&adapter->pdev->dev, - "Failed to configure eswitch pci func %d\n", pci_func); + dev_err(dev, "Failed to configure eswitch for vNIC function %d\n", + pci_func); else - dev_info(&adapter->pdev->dev, - "Configured eSwitch for pci func %d\n", pci_func); + dev_info(dev, "Configured eSwitch for vNIC function %d\n", + pci_func); return err; } -- cgit v1.2.3 From 6a531b7b1989ddef5ee45c619970037eb2374c0d Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 23 May 2013 21:04:36 +0000 Subject: qlcnic: Update version to 5.2.43 Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 00a09573faab..534e36ed855a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 42 -#define QLCNIC_LINUX_VERSIONID "5.2.42" +#define _QLCNIC_LINUX_SUBVERSION 43 +#define QLCNIC_LINUX_VERSIONID "5.2.43" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v1.2.3 From 8513fbd880093f00a47e85a552f14ca2de8d84d6 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Thu, 23 May 2013 00:52:31 +0000 Subject: net: ethernet: use platform_{get,set}_drvdata() Use the wrapper functions for getting and setting the driver data using platform_device instead of using dev_{get,set}_drvdata() with &pdev->dev, so we can directly pass a struct platform_device. Also, unnecessary dev_set_drvdata() is removed, because the driver core clears the driver data to NULL after device_release or on probe failure. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller --- drivers/net/ethernet/aeroflex/greth.c | 4 +--- drivers/net/ethernet/amd/sunlance.c | 6 ++---- drivers/net/ethernet/broadcom/sb1250-mac.c | 9 ++++----- drivers/net/ethernet/freescale/fec_mpc52xx.c | 9 ++++----- drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 5 ++--- drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c | 6 ++---- drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 6 ++---- drivers/net/ethernet/freescale/gianfar.c | 6 ++---- drivers/net/ethernet/freescale/gianfar_ptp.c | 4 ++-- drivers/net/ethernet/freescale/ucc_geth.c | 4 ++-- drivers/net/ethernet/freescale/xgmac_mdio.c | 4 ++-- drivers/net/ethernet/ibm/ehea/ehea_main.c | 4 ++-- drivers/net/ethernet/ibm/emac/mal.c | 6 ++---- drivers/net/ethernet/ibm/emac/rgmii.c | 18 ++++++++---------- drivers/net/ethernet/ibm/emac/tah.c | 14 ++++++-------- drivers/net/ethernet/ibm/emac/zmii.c | 18 ++++++++---------- drivers/net/ethernet/netx-eth.c | 4 +--- drivers/net/ethernet/octeon/octeon_mgmt.c | 4 ++-- drivers/net/ethernet/sun/niu.c | 5 ++--- drivers/net/ethernet/sun/sunhme.c | 4 ++-- drivers/net/ethernet/sun/sunqe.c | 10 ++++------ drivers/net/ethernet/xilinx/ll_temac_main.c | 5 ++--- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 6 ++---- 23 files changed, 66 insertions(+), 95 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 269295403fc4..7ff4b30d55ea 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1565,7 +1565,7 @@ error1: static int greth_of_remove(struct platform_device *of_dev) { - struct net_device *ndev = dev_get_drvdata(&of_dev->dev); + struct net_device *ndev = platform_get_drvdata(of_dev); struct greth_private *greth = netdev_priv(ndev); /* Free descriptor areas */ @@ -1573,8 +1573,6 @@ static int greth_of_remove(struct platform_device *of_dev) dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); - dev_set_drvdata(&of_dev->dev, NULL); - if (greth->phy) phy_stop(greth->phy); mdiobus_unregister(greth->mdio); diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index f47b780892e9..ece56831a647 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1470,7 +1470,7 @@ no_link_test: goto fail; } - dev_set_drvdata(&op->dev, lp); + platform_set_drvdata(op, lp); printk(KERN_INFO "%s: LANCE %pM\n", dev->name, dev->dev_addr); @@ -1501,7 +1501,7 @@ static int sunlance_sbus_probe(struct platform_device *op) static int sunlance_sbus_remove(struct platform_device *op) { - struct lance_private *lp = dev_get_drvdata(&op->dev); + struct lance_private *lp = platform_get_drvdata(op); struct net_device *net_dev = lp->dev; unregister_netdev(net_dev); @@ -1510,8 +1510,6 @@ static int sunlance_sbus_remove(struct platform_device *op) free_netdev(net_dev); - dev_set_drvdata(&op->dev, NULL); - return 0; } diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index e80bfb60c3ef..c2777712da99 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2197,7 +2197,7 @@ static const struct net_device_ops sbmac_netdev_ops = { static int sbmac_init(struct platform_device *pldev, long long base) { - struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct net_device *dev = platform_get_drvdata(pldev); int idx = pldev->id; struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; @@ -2275,7 +2275,7 @@ static int sbmac_init(struct platform_device *pldev, long long base) dev->name); goto free_mdio; } - dev_set_drvdata(&pldev->dev, sc->mii_bus); + platform_set_drvdata(pldev, sc->mii_bus); err = register_netdev(dev); if (err) { @@ -2300,7 +2300,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) return 0; unreg_mdio: mdiobus_unregister(sc->mii_bus); - dev_set_drvdata(&pldev->dev, NULL); free_mdio: mdiobus_free(sc->mii_bus); uninit_ctx: @@ -2624,7 +2623,7 @@ static int sbmac_probe(struct platform_device *pldev) goto out_unmap; } - dev_set_drvdata(&pldev->dev, dev); + platform_set_drvdata(pldev, dev); SET_NETDEV_DEV(dev, &pldev->dev); sc = netdev_priv(dev); @@ -2649,7 +2648,7 @@ out_out: static int __exit sbmac_remove(struct platform_device *pldev) { - struct net_device *dev = dev_get_drvdata(&pldev->dev); + struct net_device *dev = platform_get_drvdata(pldev); struct sbmac_softc *sc = netdev_priv(dev); unregister_netdev(dev); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 9bc15e2365bb..9947765e90c5 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -981,7 +981,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) goto err_node; /* We're done ! */ - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); netdev_info(ndev, "%s MAC %pM\n", op->dev.of_node->full_name, ndev->dev_addr); @@ -1010,7 +1010,7 @@ mpc52xx_fec_remove(struct platform_device *op) struct net_device *ndev; struct mpc52xx_fec_priv *priv; - ndev = dev_get_drvdata(&op->dev); + ndev = platform_get_drvdata(op); priv = netdev_priv(ndev); unregister_netdev(ndev); @@ -1030,14 +1030,13 @@ mpc52xx_fec_remove(struct platform_device *op) free_netdev(ndev); - dev_set_drvdata(&op->dev, NULL); return 0; } #ifdef CONFIG_PM static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); if (netif_running(dev)) mpc52xx_fec_close(dev); @@ -1047,7 +1046,7 @@ static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state static int mpc52xx_fec_of_resume(struct platform_device *op) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); mpc52xx_fec_hw_init(dev); mpc52xx_fec_reset_stats(dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index edc120094c34..8de53a14a6f4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1048,7 +1048,7 @@ static int fs_enet_probe(struct platform_device *ofdev) } SET_NETDEV_DEV(ndev, &ofdev->dev); - dev_set_drvdata(&ofdev->dev, ndev); + platform_set_drvdata(ofdev, ndev); fep = netdev_priv(ndev); fep->dev = &ofdev->dev; @@ -1106,7 +1106,6 @@ out_cleanup_data: fep->ops->cleanup_data(ndev); out_free_dev: free_netdev(ndev); - dev_set_drvdata(&ofdev->dev, NULL); out_put: of_node_put(fpi->phy_node); out_free_fpi: @@ -1116,7 +1115,7 @@ out_free_fpi: static int fs_enet_remove(struct platform_device *ofdev) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct fs_enet_private *fep = netdev_priv(ndev); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 2bafbd37c247..844ecfa84d17 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -179,7 +179,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) } new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); + platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) @@ -188,7 +188,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) return 0; out_free_irqs: - dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); @@ -202,11 +201,10 @@ out: static int fs_enet_mdio_remove(struct platform_device *ofdev) { - struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct mii_bus *bus = platform_get_drvdata(ofdev); struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 18e8ef203736..2f1c46a12f05 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -180,7 +180,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) } new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); + platform_set_drvdata(ofdev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) @@ -189,7 +189,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev) return 0; out_free_irqs: - dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(fec->fecp); @@ -204,11 +203,10 @@ out: static int fs_enet_mdio_remove(struct platform_device *ofdev) { - struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct mii_bus *bus = platform_get_drvdata(ofdev); struct fec_info *fec = bus->priv; mdiobus_unregister(bus); - dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); iounmap(fec->fecp); kfree(fec); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2375a01715a0..14f06947e70d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1000,7 +1000,7 @@ static int gfar_probe(struct platform_device *ofdev) spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); - dev_set_drvdata(&ofdev->dev, priv); + platform_set_drvdata(ofdev, priv); regs = priv->gfargrp[0].regs; gfar_detect_errata(priv); @@ -1240,15 +1240,13 @@ register_fail: static int gfar_remove(struct platform_device *ofdev) { - struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); + struct gfar_private *priv = platform_get_drvdata(ofdev); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); - dev_set_drvdata(&ofdev->dev, NULL); - unregister_netdev(priv->ndev); unmap_group_regs(priv); free_gfar_dev(priv); diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 083ea2b4d20a..098f133908ae 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -519,7 +519,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) } gfar_phc_index = ptp_clock_index(etsects->clock); - dev_set_drvdata(&dev->dev, etsects); + platform_set_drvdata(dev, etsects); return 0; @@ -537,7 +537,7 @@ no_memory: static int gianfar_ptp_remove(struct platform_device *dev) { - struct etsects *etsects = dev_get_drvdata(&dev->dev); + struct etsects *etsects = platform_get_drvdata(dev); gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index e04c59818f60..3c43dac894ec 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3564,7 +3564,7 @@ static void ucc_geth_timeout(struct net_device *dev) static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); if (!netif_running(ndev)) @@ -3592,7 +3592,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) static int ucc_geth_resume(struct platform_device *ofdev) { - struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct net_device *ndev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(ndev); int err; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 418068b941b1..c1b6e7e31aac 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -227,7 +227,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) goto err_registration; } - dev_set_drvdata(&pdev->dev, bus); + platform_set_drvdata(pdev, bus); return 0; @@ -242,7 +242,7 @@ err_ioremap: static int xgmac_mdio_remove(struct platform_device *pdev) { - struct mii_bus *bus = dev_get_drvdata(&pdev->dev); + struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); iounmap(bus->priv); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 90ea0b1673ca..0605e76c7edd 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3289,7 +3289,7 @@ static int ehea_probe_adapter(struct platform_device *dev, adapter->pd = EHEA_PD_ID; - dev_set_drvdata(&dev->dev, adapter); + platform_set_drvdata(dev, adapter); /* initialize adapter and ports */ @@ -3360,7 +3360,7 @@ out: static int ehea_remove(struct platform_device *dev) { - struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev); + struct ehea_adapter *adapter = platform_get_drvdata(dev); int i; for (i = 0; i < EHEA_MAX_PORTS; i++) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 610ed223d1db..856ea66c9223 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -696,7 +696,7 @@ static int mal_probe(struct platform_device *ofdev) /* Advertise this instance to the rest of the world */ wmb(); - dev_set_drvdata(&ofdev->dev, mal); + platform_set_drvdata(ofdev, mal); mal_dbg_register(mal); @@ -722,7 +722,7 @@ static int mal_probe(struct platform_device *ofdev) static int mal_remove(struct platform_device *ofdev) { - struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); + struct mal_instance *mal = platform_get_drvdata(ofdev); MAL_DBG(mal, "remove" NL); @@ -735,8 +735,6 @@ static int mal_remove(struct platform_device *ofdev) "mal%d: commac list is not empty on remove!\n", mal->index); - dev_set_drvdata(&ofdev->dev, NULL); - free_irq(mal->serr_irq, mal); free_irq(mal->txde_irq, mal); free_irq(mal->txeob_irq, mal); diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 39251765b55d..c47e23d6eeaa 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -95,7 +95,7 @@ static inline u32 rgmii_mode_mask(int mode, int input) int rgmii_attach(struct platform_device *ofdev, int input, int mode) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; RGMII_DBG(dev, "attach(%d)" NL, input); @@ -124,7 +124,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) void rgmii_set_speed(struct platform_device *ofdev, int input, int speed) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; u32 ssr; @@ -146,7 +146,7 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed) void rgmii_get_mdio(struct platform_device *ofdev, int input) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; u32 fer; @@ -167,7 +167,7 @@ void rgmii_get_mdio(struct platform_device *ofdev, int input) void rgmii_put_mdio(struct platform_device *ofdev, int input) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p = dev->base; u32 fer; @@ -188,7 +188,7 @@ void rgmii_put_mdio(struct platform_device *ofdev, int input) void rgmii_detach(struct platform_device *ofdev, int input) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct rgmii_regs __iomem *p; BUG_ON(!dev || dev->users == 0); @@ -214,7 +214,7 @@ int rgmii_get_regs_len(struct platform_device *ofdev) void *rgmii_dump_regs(struct platform_device *ofdev, void *buf) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); struct emac_ethtool_regs_subhdr *hdr = buf; struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1); @@ -279,7 +279,7 @@ static int rgmii_probe(struct platform_device *ofdev) (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); return 0; @@ -291,9 +291,7 @@ static int rgmii_probe(struct platform_device *ofdev) static int rgmii_remove(struct platform_device *ofdev) { - struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - - dev_set_drvdata(&ofdev->dev, NULL); + struct rgmii_instance *dev = platform_get_drvdata(ofdev); WARN_ON(dev->users != 0); diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index 795f1393e2b6..c231a4a32c4d 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -25,7 +25,7 @@ int tah_attach(struct platform_device *ofdev, int channel) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); mutex_lock(&dev->lock); /* Reset has been done at probe() time... nothing else to do for now */ @@ -37,7 +37,7 @@ int tah_attach(struct platform_device *ofdev, int channel) void tah_detach(struct platform_device *ofdev, int channel) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); mutex_lock(&dev->lock); --dev->users; @@ -46,7 +46,7 @@ void tah_detach(struct platform_device *ofdev, int channel) void tah_reset(struct platform_device *ofdev) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); struct tah_regs __iomem *p = dev->base; int n; @@ -74,7 +74,7 @@ int tah_get_regs_len(struct platform_device *ofdev) void *tah_dump_regs(struct platform_device *ofdev, void *buf) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_instance *dev = platform_get_drvdata(ofdev); struct emac_ethtool_regs_subhdr *hdr = buf; struct tah_regs *regs = (struct tah_regs *)(hdr + 1); @@ -118,7 +118,7 @@ static int tah_probe(struct platform_device *ofdev) goto err_free; } - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); @@ -137,9 +137,7 @@ static int tah_probe(struct platform_device *ofdev) static int tah_remove(struct platform_device *ofdev) { - struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); - - dev_set_drvdata(&ofdev->dev, NULL); + struct tah_instance *dev = platform_get_drvdata(ofdev); WARN_ON(dev->users != 0); diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index f91202f42125..4cdf286f7ee3 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -84,7 +84,7 @@ static inline u32 zmii_mode_mask(int mode, int input) int zmii_attach(struct platform_device *ofdev, int input, int *mode) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); struct zmii_regs __iomem *p = dev->base; ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); @@ -150,7 +150,7 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode) void zmii_get_mdio(struct platform_device *ofdev, int input) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); u32 fer; ZMII_DBG2(dev, "get_mdio(%d)" NL, input); @@ -163,7 +163,7 @@ void zmii_get_mdio(struct platform_device *ofdev, int input) void zmii_put_mdio(struct platform_device *ofdev, int input) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); ZMII_DBG2(dev, "put_mdio(%d)" NL, input); mutex_unlock(&dev->lock); @@ -172,7 +172,7 @@ void zmii_put_mdio(struct platform_device *ofdev, int input) void zmii_set_speed(struct platform_device *ofdev, int input, int speed) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); u32 ssr; mutex_lock(&dev->lock); @@ -193,7 +193,7 @@ void zmii_set_speed(struct platform_device *ofdev, int input, int speed) void zmii_detach(struct platform_device *ofdev, int input) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); BUG_ON(!dev || dev->users == 0); @@ -218,7 +218,7 @@ int zmii_get_regs_len(struct platform_device *ofdev) void *zmii_dump_regs(struct platform_device *ofdev, void *buf) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_instance *dev = platform_get_drvdata(ofdev); struct emac_ethtool_regs_subhdr *hdr = buf; struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1); @@ -272,7 +272,7 @@ static int zmii_probe(struct platform_device *ofdev) printk(KERN_INFO "ZMII %s initialized\n", ofdev->dev.of_node->full_name); wmb(); - dev_set_drvdata(&ofdev->dev, dev); + platform_set_drvdata(ofdev, dev); return 0; @@ -284,9 +284,7 @@ static int zmii_probe(struct platform_device *ofdev) static int zmii_remove(struct platform_device *ofdev) { - struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); - - dev_set_drvdata(&ofdev->dev, NULL); + struct zmii_instance *dev = platform_get_drvdata(ofdev); WARN_ON(dev->users != 0); diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index cb9e63831500..5da3ffb61062 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -430,11 +430,9 @@ exit: static int netx_eth_drv_remove(struct platform_device *pdev) { - struct net_device *ndev = dev_get_drvdata(&pdev->dev); + struct net_device *ndev = platform_get_drvdata(pdev); struct netx_eth_priv *priv = netdev_priv(ndev); - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); xc_stop(priv->xc); free_xc(priv->xc); diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 921729f9c85c..e6e029237a63 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1437,7 +1437,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); - dev_set_drvdata(&pdev->dev, netdev); + platform_set_drvdata(pdev, netdev); p = netdev_priv(netdev); netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, OCTEON_MGMT_NAPI_WEIGHT); @@ -1559,7 +1559,7 @@ err: static int octeon_mgmt_remove(struct platform_device *pdev) { - struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct net_device *netdev = platform_get_drvdata(pdev); unregister_netdev(netdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 95cff98d8a34..fa322409bff3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -10108,7 +10108,7 @@ static int niu_of_probe(struct platform_device *op) goto err_out_iounmap; } - dev_set_drvdata(&op->dev, dev); + platform_set_drvdata(op, dev); niu_device_announce(np); @@ -10145,7 +10145,7 @@ err_out: static int niu_of_remove(struct platform_device *op) { - struct net_device *dev = dev_get_drvdata(&op->dev); + struct net_device *dev = platform_get_drvdata(op); if (dev) { struct niu *np = netdev_priv(dev); @@ -10175,7 +10175,6 @@ static int niu_of_remove(struct platform_device *op) niu_put_parent(np); free_netdev(dev); - dev_set_drvdata(&op->dev, NULL); } return 0; } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 436fa9d5a071..171f5b0809c4 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2506,7 +2506,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child) struct quattro *qp; op = to_platform_device(parent); - qp = dev_get_drvdata(&op->dev); + qp = platform_get_drvdata(op); if (qp) return qp; @@ -2521,7 +2521,7 @@ static struct quattro *quattro_sbus_find(struct platform_device *child) qp->next = qfe_sbus_list; qfe_sbus_list = qp; - dev_set_drvdata(&op->dev, qp); + platform_set_drvdata(op, qp); } return qp; } diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 8182591bc187..b072f4dba033 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -767,7 +767,7 @@ static struct sunqec *get_qec(struct platform_device *child) struct platform_device *op = to_platform_device(child->dev.parent); struct sunqec *qecp; - qecp = dev_get_drvdata(&op->dev); + qecp = platform_get_drvdata(op); if (!qecp) { qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); if (qecp) { @@ -801,7 +801,7 @@ static struct sunqec *get_qec(struct platform_device *child) goto fail; } - dev_set_drvdata(&op->dev, qecp); + platform_set_drvdata(op, qecp); qecp->next_module = root_qec_dev; root_qec_dev = qecp; @@ -902,7 +902,7 @@ static int qec_ether_init(struct platform_device *op) if (res) goto fail; - dev_set_drvdata(&op->dev, qe); + platform_set_drvdata(op, qe); printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, dev->dev_addr); @@ -934,7 +934,7 @@ static int qec_sbus_probe(struct platform_device *op) static int qec_sbus_remove(struct platform_device *op) { - struct sunqe *qp = dev_get_drvdata(&op->dev); + struct sunqe *qp = platform_get_drvdata(op); struct net_device *net_dev = qp->dev; unregister_netdev(net_dev); @@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op) free_netdev(net_dev); - dev_set_drvdata(&op->dev, NULL); - return 0; } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 57c2e5ef2804..58eb4488beff 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1007,7 +1007,7 @@ static int temac_of_probe(struct platform_device *op) return -ENOMEM; ether_setup(ndev); - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; @@ -1136,7 +1136,7 @@ static int temac_of_probe(struct platform_device *op) static int temac_of_remove(struct platform_device *op) { - struct net_device *ndev = dev_get_drvdata(&op->dev); + struct net_device *ndev = platform_get_drvdata(op); struct temac_local *lp = netdev_priv(ndev); temac_mdio_teardown(lp); @@ -1145,7 +1145,6 @@ static int temac_of_remove(struct platform_device *op) if (lp->phy_node) of_node_put(lp->phy_node); lp->phy_node = NULL; - dev_set_drvdata(&op->dev, NULL); iounmap(lp->regs); if (lp->sdma_regs) iounmap(lp->sdma_regs); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 24748e8367a1..fb7d1c28a2ea 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1484,7 +1484,7 @@ static int axienet_of_probe(struct platform_device *op) return -ENOMEM; ether_setup(ndev); - dev_set_drvdata(&op->dev, ndev); + platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ @@ -1622,7 +1622,7 @@ nodev: static int axienet_of_remove(struct platform_device *op) { - struct net_device *ndev = dev_get_drvdata(&op->dev); + struct net_device *ndev = platform_get_drvdata(op); struct axienet_local *lp = netdev_priv(ndev); axienet_mdio_teardown(lp); @@ -1632,8 +1632,6 @@ static int axienet_of_remove(struct platform_device *op) of_node_put(lp->phy_node); lp->phy_node = NULL; - dev_set_drvdata(&op->dev, NULL); - iounmap(lp->regs); if (lp->dma_regs) iounmap(lp->dma_regs); -- cgit v1.2.3 From e09b74d0195496fdc2dbe106eaa55da48873876e Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Mon, 27 May 2013 04:08:26 +0000 Subject: bnx2x: Zero VFs starting MACs Hypervisor/Supervisor should set the VF's MAC prior to its load; Using a randomly generated MAC as a default is a bad practice. Signed-off-by: Ariel Elior Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7ed9cdfa115e..80982c38ae69 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11406,7 +11406,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (rc) return rc; } else { - random_ether_addr(bp->dev->dev_addr); + eth_zero_addr(bp->dev->dev_addr); } bnx2x_set_modes_bitmap(bp); @@ -11865,6 +11865,10 @@ static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + /* query the bulletin board for mac address configured by the PF */ + if (IS_VF(bp)) + bnx2x_sample_bulletin(bp); + if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; -- cgit v1.2.3 From ca1ee4b259ea8c4f91c84ee4c737338658711272 Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Mon, 27 May 2013 04:08:27 +0000 Subject: bnx2x: Add and correct PCI link speed prints This adds the print of the PCI gen3 link speed (8GHz), as well as correcting the same print for 57712 boards (the print erroneously showed a 2.5GHz speed). Signed-off-by: Dmitry Kravkov Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 5 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 40 +++++++++++++++--------- 2 files changed, 31 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 946450d6d988..47b06fc22b97 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2342,4 +2342,9 @@ enum { #define NUM_MACS 8 +enum bnx2x_pci_bus_speed { + BNX2X_PCI_LINK_SPEED_2500 = 2500, + BNX2X_PCI_LINK_SPEED_5000 = 5000, + BNX2X_PCI_LINK_SPEED_8000 = 8000 +}; #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 80982c38ae69..04fa5105ca52 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12111,15 +12111,26 @@ err_out: return rc; } -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed) +static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, + enum bnx2x_pci_bus_speed *speed) { - u32 val = 0; + u32 link_speed, val = 0; pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - /* return value of 1=2.5GHz 2=5GHz */ - *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; + link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; + + switch (link_speed) { + case 3: + *speed = BNX2X_PCI_LINK_SPEED_8000; + break; + case 2: + *speed = BNX2X_PCI_LINK_SPEED_5000; + break; + default: + *speed = BNX2X_PCI_LINK_SPEED_2500; + } } static int bnx2x_check_firmware(struct bnx2x *bp) @@ -12482,7 +12493,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, { struct net_device *dev = NULL; struct bnx2x *bp; - int pcie_width, pcie_speed; + int pcie_width; + enum bnx2x_pci_bus_speed pcie_speed; int rc, max_non_def_sbs; int rx_count, tx_count, rss_count, doorbell_size; int max_cos_est; @@ -12634,15 +12646,15 @@ static int bnx2x_init_one(struct pci_dev *pdev, BNX2X_DEV_INFO("got pcie width %d and speed %d\n", pcie_width, pcie_speed); - BNX2X_DEV_INFO( - "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", - board_info[ent->driver_data].name, - (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), - pcie_width, - ((!CHIP_IS_E2(bp) && pcie_speed == 2) || - (CHIP_IS_E2(bp) && pcie_speed == 1)) ? - "5GHz (Gen2)" : "2.5GHz", - dev->base_addr, bp->pdev->irq, dev->dev_addr); + BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : + pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : + pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : + "Unknown", + dev->base_addr, bp->pdev->irq, dev->dev_addr); return 0; -- cgit v1.2.3 From 70ca5d746cf0c16576cee43fea6a19116ab49d9c Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Mon, 27 May 2013 04:08:28 +0000 Subject: bnx2x: Ack unknown VF messages A PF should ack the firmware when receiving unknown messages through the VF's mailbox. This prevents the VF mailbox from being stuck in case of a VF sending a message unknown to the PF (e.g. VF with more advanced version). Signed-off-by: Ariel Elior Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 928b074d7d80..282606677bca 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1605,8 +1605,11 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_resp(bp, vf); } else { /* can't send a response since this VF is unknown to us - * just unlock the channel and be done with. + * just ack the FW to release the mailbox and unlock + * the channel. */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + mmiowb(); bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); } -- cgit v1.2.3 From 868001946e39627ec439921eeb0f5fa020e1e31d Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Mon, 27 May 2013 04:08:29 +0000 Subject: bnx2x: Count number of possible FCoE interfaces Commit 0eb43b4bb081a1a22574daab9c05286a600dd7fe "bnx2x, bnx2fc: Use per port max exchange resources" has changed the number of available FCoE exchanges, even in scenarios when some of the functions has no FCoE support; This needlessly degraded the available resources. Remedy this by calculating the maximal number of functions that may actually utilize said connection. Signed-off-by: Dmitry Kravkov CC: Bhanu Prakash Gollapudi Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 51 ++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 04fa5105ca52..0a3bb53f3eee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10795,12 +10795,56 @@ static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) bp->cnic_eth_dev.fcoe_wwn_node_name_lo = MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); } + +static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) +{ + u8 count = 0; + + if (IS_MF(bp)) { + u8 fid; + + /* iterate over absolute function ids for this path: */ + for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { + if (IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, + func_mf_config[fid].config); + + if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && + ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_FCOE)) + count++; + } else { + u32 cfg = MF_CFG_RD(bp, + func_ext_config[fid]. + func_cfg); + + if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && + (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + count++; + } + } + } else { /* SF */ + int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; + + for (port = 0; port < port_cnt; port++) { + u32 lic = SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn) ^ + FW_ENCODE_32BIT_PATTERN; + if (lic) + count++; + } + } + + return count; +} + static void bnx2x_get_fcoe_info(struct bnx2x *bp) { int port = BP_PORT(bp); int func = BP_ABS_FUNC(bp); u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, drv_lic_key[port].max_fcoe_conn); + u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); if (!CNIC_SUPPORT(bp)) { bp->flags |= NO_FCOE_FLAG; @@ -10814,9 +10858,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) /* Calculate the number of maximum allowed FCoE tasks */ bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; - if (IS_MF(bp) || CHIP_MODE_IS_4_PORT(bp)) - bp->cnic_eth_dev.max_fcoe_exchanges /= - MAX_FCOE_FUNCS_PER_ENGINE; + + /* check if FCoE resources must be shared between different functions */ + if (num_fcoe_func) + bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; /* Read the WWN: */ if (!IS_MF(bp)) { -- cgit v1.2.3 From b030ed2fdc8a396dba71e4d550236a0f1bb38b40 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 27 May 2013 04:08:30 +0000 Subject: bnx2x: Implement PCI shutdown Implement the PCI shutdown callback to support un-orderly shutdown, i.e., shutdowns in which the remove callback will not be called. Due to the lack of this functionality, when an un-orderly shutdown occurred, it was possible for wake-on-lan to remain disabled. This is now fixed as the callback introduces the correct place in which `system_state' can be queried and wake-on-lan be configured accordingly. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 71 ++++++++++++++++++++---- 1 file changed, 59 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0a3bb53f3eee..13f814485a36 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12721,17 +12721,11 @@ init_one_exit: return rc; } -static void bnx2x_remove_one(struct pci_dev *pdev) +static void __bnx2x_remove(struct pci_dev *pdev, + struct net_device *dev, + struct bnx2x *bp, + bool remove_netdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct bnx2x *bp; - - if (!dev) { - dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); - return; - } - bp = netdev_priv(dev); - /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); @@ -12744,7 +12738,15 @@ static void bnx2x_remove_one(struct pci_dev *pdev) bnx2x_dcbnl_update_applist(bp, true); #endif - unregister_netdev(dev); + /* Close the interface - either directly or implicitly */ + if (remove_netdev) { + unregister_netdev(dev); + } else { + rtnl_lock(); + if (netif_running(dev)) + bnx2x_close(dev); + rtnl_unlock(); + } /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) @@ -12766,6 +12768,12 @@ static void bnx2x_remove_one(struct pci_dev *pdev) if (IS_VF(bp)) bnx2x_vfpf_release(bp); + /* Assumes no further PCIe PM changes will occur */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } + if (bp->regview) iounmap(bp->regview); @@ -12780,7 +12788,8 @@ static void bnx2x_remove_one(struct pci_dev *pdev) } bnx2x_free_mem_bp(bp); - free_netdev(dev); + if (remove_netdev) + free_netdev(dev); if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); @@ -12789,6 +12798,20 @@ static void bnx2x_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +static void bnx2x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + + __bnx2x_remove(pdev, dev, bp, true); +} + static int bnx2x_eeh_nic_unload(struct bnx2x *bp) { bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; @@ -12968,6 +12991,29 @@ static const struct pci_error_handlers bnx2x_err_handler = { .resume = bnx2x_io_resume, }; +static void bnx2x_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + netif_device_detach(dev); + rtnl_unlock(); + + /* Don't remove the netdevice, as there are scenarios which will cause + * the kernel to hang, e.g., when trying to remove bnx2i while the + * rootfs is mounted from SAN. + */ + __bnx2x_remove(pdev, dev, bp, false); +} + static struct pci_driver bnx2x_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnx2x_pci_tbl, @@ -12979,6 +13025,7 @@ static struct pci_driver bnx2x_pci_driver = { #ifdef CONFIG_BNX2X_SRIOV .sriov_configure = bnx2x_sriov_configure, #endif + .shutdown = bnx2x_shutdown, }; static int __init bnx2x_init(void) -- cgit v1.2.3 From 3fb43eb20aec3f5ceed5813a035f200dafb51257 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 27 May 2013 04:08:31 +0000 Subject: bnx2x: Change to D3hot only on removal This changes the PCI power management scheme of the bnx2x driver to be similar to those of most network drivers - the driver will now changes the power state into D3hot whenever the driver will be removed, instead of whenever an interface is unloaded. This change enables the driver to access its eeprom via ethtool callbacks even when interfaces are unloaded (such access requires the function to be in D0active). Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 31 +++++++++++++++++----- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 --- 2 files changed, 24 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 32aa88f520dc..263950cddfe2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1381,12 +1381,28 @@ static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, return rc; } +static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) +{ + int rc = 1; + u16 pm = 0; + struct net_device *dev = pci_get_drvdata(bp->pdev); + + if (bp->pm_cap) + rc = pci_read_config_word(bp->pdev, + bp->pm_cap + PCI_PM_CTRL, &pm); + + if ((rc && !netif_running(dev)) || (!rc && ((pm & PCI_D0) != PCI_D0))) + return false; + + return true; +} + static int bnx2x_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *eebuf) { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1411,7 +1427,7 @@ static int bnx2x_get_module_eeprom(struct net_device *dev, u8 *user_data = data; unsigned int start_addr = ee->offset, xfer_size = 0; - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1474,7 +1490,7 @@ static int bnx2x_get_module_info(struct net_device *dev, int phy_idx, rc; u8 sff8472_comp, diag_type; - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -1676,7 +1692,8 @@ static int bnx2x_set_eeprom(struct net_device *dev, int port = BP_PORT(bp); int rc = 0; u32 ext_phy_config; - if (!netif_running(dev)) { + + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; @@ -2173,7 +2190,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } }; - if (!netif_running(bp->dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return rc; @@ -2277,7 +2294,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) { NULL, 0xffffffff, {0, 0, 0, 0} } }; - if (!netif_running(bp->dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return rc; @@ -3140,7 +3157,7 @@ static int bnx2x_set_phys_id(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - if (!netif_running(dev)) { + if (!bnx2x_is_nvm_accessible(bp)) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot access eeprom when the interface is down\n"); return -EAGAIN; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 13f814485a36..f6d0e209c44e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11643,9 +11643,6 @@ static int bnx2x_close(struct net_device *dev) /* Unload the driver, release IRQs */ bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); - /* Power off */ - bnx2x_set_power_state(bp, PCI_D3hot); - return 0; } -- cgit v1.2.3 From eeb65cedd78ebd375608d71d703e6b0b9296efbd Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Sun, 26 May 2013 21:08:36 +0000 Subject: be2net: cleanup be_get_drvinfo() Removing the be_cmd_get_fw_ver() query from be_get_drvinfo() and invoking the same from be_setup() and/or post firmware download. Signed-off-by: Kalesh AP Signed-off-by: Somnath Kotur Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 1 + drivers/net/ethernet/emulex/benet/be_ethtool.c | 8 ++------ drivers/net/ethernet/emulex/benet/be_main.c | 6 +++++- 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f544b297c9ab..edce38bf83c9 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -397,6 +397,7 @@ struct be_adapter { u32 cmd_privileges; /* Ethtool knobs and info */ char fw_ver[FW_VER_LEN]; + char fw_on_flash[FW_VER_LEN]; int if_handle; /* Used to configure filtering */ u32 *pmac_id; /* MAC addr handle used by BE card */ u32 beacon_state; /* for set_phys_id */ diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 3d4461adb3b4..f3ee07758198 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -177,19 +177,15 @@ static void be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct be_adapter *adapter = netdev_priv(netdev); - char fw_on_flash[FW_VER_LEN]; - - memset(fw_on_flash, 0 , sizeof(fw_on_flash)); - be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); - if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN)) + if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN)) strlcpy(drvinfo->fw_version, adapter->fw_ver, sizeof(drvinfo->fw_version)); else snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%s [%s]", adapter->fw_ver, fw_on_flash); + "%s [%s]", adapter->fw_ver, adapter->fw_on_flash); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3d5e1a8929ae..d1580823c286 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3193,7 +3193,7 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); + be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash); if (adapter->vlans_added) be_vid_config(adapter); @@ -3785,6 +3785,10 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file) else status = be_fw_download(adapter, fw); + if (!status) + be_cmd_get_fw_ver(adapter, adapter->fw_ver, + adapter->fw_on_flash); + fw_exit: release_firmware(fw); return status; -- cgit v1.2.3 From 48265667a756ab1c2750048caee3da719a86ebb2 Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Sun, 26 May 2013 21:08:47 +0000 Subject: be2net: Pad skb to meet min Tx pkt size in lancer In Lancer, packets that are 32 bytes or less may cause a transmit stall. The work-around is to pad such packets to a 36 byte length. Signed-off-by: Kalesh AP Signed-off-by: Somnath Kotur Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d1580823c286..26e222f81433 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -848,6 +848,16 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, unsigned int eth_hdr_len; struct iphdr *ip; + /* Lancer ASIC has a bug wherein packets that are 32 bytes or less + * may cause a transmit stall on that port. So the work-around is to + * pad such packets to a 36-byte length. + */ + if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { + if (skb_padto(skb, 36)) + goto tx_drop; + skb->len = 36; + } + /* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. */ -- cgit v1.2.3 From 3904dcc4a7af8e28e45a8d123cef3f7653847a51 Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Sun, 26 May 2013 21:09:06 +0000 Subject: be2net: Trim padded packets for Lancer For padded packets, Lancer computes incorrect checksum. The workaround is to trim such packets. This workaround is mainly for IPv4 packets. Signed-off-by: Kalesh AP Signed-off-by: Somnath Kotur Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 26e222f81433..1140a86ff5ea 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -860,10 +860,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, /* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. + * For padded packets, Lancer computes incorrect checksum. */ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? VLAN_ETH_HLEN : ETH_HLEN; - if (skb->len <= 60 && vlan_tx_tag_present(skb) && + if (skb->len <= 60 && + (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); -- cgit v1.2.3 From f4e9f3d2fdb141d920c9fd7bd5ea7db348f6d3be Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 27 May 2013 03:48:29 +0000 Subject: fec: Place the phy regulator in the private structure Instead of using a local reg_phy structure, let's put it inside the private structure, so that we are able to have access to the regulator structure even when we are outside fec_probe(). This is in preparation for controlling the FEC PHY regulator in the suspend and resume functions. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 1 + drivers/net/ethernet/freescale/fec_main.c | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 9ce5b7185fda..b11cdbcc04ff 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -272,6 +272,7 @@ struct fec_enet_private { int hwts_tx_en; struct timer_list time_keep; struct fec_enet_delayed_work delay_work; + struct regulator *reg_phy; }; void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0936b26b5e1e..57ac7c3453d6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1840,7 +1840,6 @@ fec_probe(struct platform_device *pdev) struct resource *r; const struct of_device_id *of_id; static int dev_id; - struct regulator *reg_phy; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) @@ -1919,9 +1918,9 @@ fec_probe(struct platform_device *pdev) clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ptp); - reg_phy = devm_regulator_get(&pdev->dev, "phy"); - if (!IS_ERR(reg_phy)) { - ret = regulator_enable(reg_phy); + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + if (!IS_ERR(fep->reg_phy)) { + ret = regulator_enable(fep->reg_phy); if (ret) { dev_err(&pdev->dev, "Failed to enable phy regulator: %d\n", ret); -- cgit v1.2.3 From 7a2bbd8d8e36c4fae7610bb1be96c2ba5ff7553a Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 27 May 2013 03:48:30 +0000 Subject: fec: Invert the order of error path sequence Currently when fec_enet_init fails it jumps to 'failed_init' error path, which will attemp to free the interrupts. This is wrong because at this point the interrupts have not even been acquired. Swap failed_init/failed_irq to fix the error path. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 57ac7c3453d6..dc7388ed40e7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1975,13 +1975,13 @@ fec_probe(struct platform_device *pdev) failed_register: fec_enet_mii_remove(fep); failed_mii_init: -failed_init: +failed_irq: for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); if (irq > 0) free_irq(irq, ndev); } -failed_irq: +failed_init: failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); -- cgit v1.2.3 From f6a4d607b3fdfaf305f3e78837cb8969d62c249a Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 27 May 2013 03:48:31 +0000 Subject: fec: Disable the PHY regulator on error and removal In the case of error during probe, disable the PHY regulator. Do the same in fec_drv_remove(). Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index dc7388ed40e7..58ac0c76441f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1926,6 +1926,8 @@ fec_probe(struct platform_device *pdev) "Failed to enable phy regulator: %d\n", ret); goto failed_regulator; } + } else { + fep->reg_phy = NULL; } fec_reset_phy(pdev); @@ -1982,6 +1984,8 @@ failed_irq: free_irq(irq, ndev); } failed_init: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -2005,6 +2009,8 @@ fec_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); -- cgit v1.2.3 From c55284e4ddf3779f52d730bedfa264d35d42e539 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 27 May 2013 03:48:32 +0000 Subject: fec: Remove irqs first During probe the clocks are enabled prior than the acquiring the interrupts. In the remove function we need to do the opposite: first remove the interrupts and then disable the clocks. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 58ac0c76441f..cac667db61be 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2009,6 +2009,11 @@ fec_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); + for (i = 0; i < FEC_IRQ_NUM; i++) { + int irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } if (fep->reg_phy) regulator_disable(fep->reg_phy); clk_disable_unprepare(fep->clk_ptp); @@ -2017,11 +2022,6 @@ fec_drv_remove(struct platform_device *pdev) clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); - for (i = 0; i < FEC_IRQ_NUM; i++) { - int irq = platform_get_irq(pdev, i); - if (irq > 0) - free_irq(irq, ndev); - } free_netdev(ndev); platform_set_drvdata(pdev, NULL); -- cgit v1.2.3 From 238f7bc74cde96e285e59a84e0c7bbd37faa8c07 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 27 May 2013 03:48:33 +0000 Subject: fec: Handle the regulator in suspend/resume In order to save power, let's disable the regulator in the suspend function and enable it in resume. Tested on a mx28evk board. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index cac667db61be..866b92220c15 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2044,6 +2044,9 @@ fec_suspend(struct device *dev) clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return 0; } @@ -2052,6 +2055,13 @@ fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + if (fep->reg_phy) { + ret = regulator_enable(fep->reg_phy); + if (ret) + return ret; + } clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ahb); -- cgit v1.2.3 From dfd93c977d84fef77404b689ef95bc716b313533 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Mon, 27 May 2013 19:01:12 +0000 Subject: net: ethernet: remove unnecessary platform_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure, since commit 0998d0631001288a5974afc0b2a5f568bcdecb4d (device-core: Ensure drvdata = NULL when no driver is bound). Thus, it is not needed to manually clear the device driver data to NULL. Signed-off-by: Jingoo Han Acked-by: Nicolas Ferre Acked-by: Rob Herring Acked-by: Roland Stigge Acked-by: Mugunthan V N Reviewed-by: H Hartley Sweeten Tested-by: Roland Stigge Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ne.c | 1 - drivers/net/ethernet/adi/bfin_mac.c | 4 ---- drivers/net/ethernet/amd/au1000_eth.c | 2 -- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 1 - drivers/net/ethernet/cadence/at91_ether.c | 1 - drivers/net/ethernet/cadence/macb.c | 2 -- drivers/net/ethernet/calxeda/xgmac.c | 2 -- drivers/net/ethernet/cirrus/ep93xx_eth.c | 1 - drivers/net/ethernet/davicom/dm9000.c | 2 -- drivers/net/ethernet/ethoc.c | 2 -- drivers/net/ethernet/faraday/ftgmac100.c | 2 -- drivers/net/ethernet/faraday/ftmac100.c | 2 -- drivers/net/ethernet/freescale/fec_main.c | 2 -- drivers/net/ethernet/korina.c | 1 - drivers/net/ethernet/marvell/mv643xx_eth.c | 2 -- drivers/net/ethernet/marvell/mvneta.c | 2 -- drivers/net/ethernet/marvell/pxa168_eth.c | 1 - drivers/net/ethernet/micrel/ks8695net.c | 1 - drivers/net/ethernet/micrel/ks8842.c | 1 - drivers/net/ethernet/micrel/ks8851_mll.c | 1 - drivers/net/ethernet/netx-eth.c | 1 - drivers/net/ethernet/nuvoton/w90p910_ether.c | 2 -- drivers/net/ethernet/nxp/lpc_eth.c | 2 -- drivers/net/ethernet/renesas/sh_eth.c | 1 - drivers/net/ethernet/s6gmac.c | 1 - drivers/net/ethernet/seeq/sgiseeq.c | 1 - drivers/net/ethernet/sgi/meth.c | 1 - drivers/net/ethernet/smsc/smc911x.c | 2 -- drivers/net/ethernet/smsc/smc91x.c | 3 --- drivers/net/ethernet/smsc/smsc911x.c | 2 -- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 2 -- drivers/net/ethernet/ti/cpsw.c | 1 - drivers/net/ethernet/ti/davinci_emac.c | 2 -- drivers/net/ethernet/tundra/tsi108_eth.c | 1 - drivers/net/ethernet/wiznet/w5100.c | 2 -- drivers/net/ethernet/wiznet/w5300.c | 2 -- drivers/net/ethernet/xscale/ixp4xx_eth.c | 2 -- 37 files changed, 61 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 47618e505355..b2e840513735 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -849,7 +849,6 @@ static int ne_drv_remove(struct platform_device *pdev) free_irq(dev->irq, dev); release_region(dev->base_addr, NE_IO_EXTENT); free_netdev(dev); - platform_set_drvdata(pdev, NULL); } return 0; } diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index dada66bfe0d6..e904b3838dcc 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1719,7 +1719,6 @@ out_err_mii_probe: mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); out_err_probe_mac: - platform_set_drvdata(pdev, NULL); free_netdev(ndev); return rc; @@ -1732,8 +1731,6 @@ static int bfin_mac_remove(struct platform_device *pdev) bfin_phc_release(lp); - platform_set_drvdata(pdev, NULL); - lp->mii_bus->priv = NULL; unregister_netdev(ndev); @@ -1868,7 +1865,6 @@ static int bfin_mii_bus_remove(struct platform_device *pdev) struct bfin_mii_bus_platform_data *mii_bus_pd = dev_get_platdata(&pdev->dev); - platform_set_drvdata(pdev, NULL); mdiobus_unregister(miibus); kfree(miibus->irq); mdiobus_free(miibus); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 688aede742c7..ceb45bc963a9 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1301,8 +1301,6 @@ static int au1000_remove(struct platform_device *pdev) int i; struct resource *base, *macen; - platform_set_drvdata(pdev, NULL); - unregister_netdev(dev); mdiobus_unregister(aup->mii_bus); mdiobus_free(aup->mii_bus); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 0b3e23ec37f7..e46466cb3627 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1847,7 +1847,6 @@ static int bcm_enet_remove(struct platform_device *pdev) clk_disable_unprepare(priv->mac_clk); clk_put(priv->mac_clk); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index cc9a185f0abb..3f1957158a3b 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -435,7 +435,6 @@ static int at91ether_remove(struct platform_device *pdev) unregister_netdev(dev); clk_disable(lp->pclk); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c89aa41dd448..4465e27ba9d1 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1649,7 +1649,6 @@ err_out_put_pclk: err_out_free_dev: free_netdev(dev); err_out: - platform_set_drvdata(pdev, NULL); return err; } @@ -1675,7 +1674,6 @@ static int __exit macb_remove(struct platform_device *pdev) clk_disable_unprepare(bp->pclk); clk_put(bp->pclk); free_netdev(dev); - platform_set_drvdata(pdev, NULL); } return 0; diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 4a1f2fa812ab..7cb148c495c9 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1790,7 +1790,6 @@ err_io: free_netdev(ndev); err_alloc: release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(pdev, NULL); return ret; } @@ -1813,7 +1812,6 @@ static int xgmac_remove(struct platform_device *pdev) free_irq(ndev->irq, ndev); free_irq(priv->pmt_irq, ndev); - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); netif_napi_del(&priv->napi); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 67b0388b6e68..e3d4ec836f8b 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -783,7 +783,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev) dev = platform_get_drvdata(pdev); if (dev == NULL) return 0; - platform_set_drvdata(pdev, NULL); ep = netdev_priv(dev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index dd243a1b03e0..a13b312b50f2 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1699,8 +1699,6 @@ dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); dm9000_release_board(pdev, netdev_priv(ndev)); free_netdev(ndev); /* free device structure */ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 5722bc61fa58..cf579fb39bc5 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1147,8 +1147,6 @@ static int ethoc_remove(struct platform_device *pdev) struct net_device *netdev = platform_get_drvdata(pdev); struct ethoc *priv = netdev_priv(netdev); - platform_set_drvdata(pdev, NULL); - if (netdev) { netif_napi_del(&priv->napi); phy_disconnect(priv->phy); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 21b85fb7d05f..934e1ae279f0 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1311,7 +1311,6 @@ err_ioremap: release_resource(priv->res); err_req_mem: netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); err_alloc_etherdev: return err; @@ -1335,7 +1334,6 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) release_resource(priv->res); netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a6eda8d83138..4658f4cc1969 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1149,7 +1149,6 @@ err_ioremap: release_resource(priv->res); err_req_mem: netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); err_alloc_etherdev: return err; @@ -1169,7 +1168,6 @@ static int __exit ftmac100_remove(struct platform_device *pdev) release_resource(priv->res); netif_napi_del(&priv->napi); - platform_set_drvdata(pdev, NULL); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 866b92220c15..9b0c6472bf04 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2024,8 +2024,6 @@ fec_drv_remove(struct platform_device *pdev) clk_disable_unprepare(fep->clk_ipg); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 0b5708520121..64646eb39e8b 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1211,7 +1211,6 @@ static int korina_remove(struct platform_device *pdev) iounmap(lp->rx_dma_regs); iounmap(lp->tx_dma_regs); - platform_set_drvdata(pdev, NULL); unregister_netdev(bif->dev); free_netdev(bif->dev); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2ad1494efbb3..afb8bcbe29f8 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2813,8 +2813,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev) free_netdev(mp->dev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c96678555233..cd0ea204ac24 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2804,8 +2804,6 @@ static int mvneta_remove(struct platform_device *pdev) irq_dispose_mapping(dev->irq); free_netdev(dev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 339bb323cb0c..2602cf7ba642 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1602,7 +1602,6 @@ static int pxa168_eth_remove(struct platform_device *pdev) unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index b6c60fdef4ff..106eb972f2ac 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1600,7 +1600,6 @@ ks8695_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ks8695_priv *ksp = netdev_priv(ndev); - platform_set_drvdata(pdev, NULL); netif_napi_del(&ksp->napi); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index fbcb9e74d7fc..e393d998be89 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1250,7 +1250,6 @@ static int ks8842_remove(struct platform_device *pdev) iounmap(adapter->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index ddaf138ce0d4..e9b1a830d582 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1671,7 +1671,6 @@ static int ks8851_remove(struct platform_device *pdev) iounmap(ks->hw_addr); free_netdev(netdev); release_mem_region(iomem->start, resource_size(iomem)); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 5da3ffb61062..dc2c6f561e9a 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -422,7 +422,6 @@ exit_free_pfifo: exit_free_xc: free_xc(priv->xc); exit_free_netdev: - platform_set_drvdata(pdev, NULL); free_netdev(ndev); exit: return ret; diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 3df8287b7452..e88bdb1aa669 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -1051,7 +1051,6 @@ failed_put_clk: clk_put(ether->clk); failed_free_rxirq: free_irq(ether->rxirq, pdev); - platform_set_drvdata(pdev, NULL); failed_free_txirq: free_irq(ether->txirq, pdev); failed_free_io: @@ -1080,7 +1079,6 @@ static int w90p910_ether_remove(struct platform_device *pdev) free_irq(ether->rxirq, dev); del_timer_sync(ðer->check_timer); - platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 55a5548d6add..a061b93efe66 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1483,7 +1483,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) return 0; err_out_unregister_netdev: - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); err_out_dma_unmap: if (!use_iram_for_net(&pldat->pdev->dev) || @@ -1511,7 +1510,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) struct netdata_local *pldat = netdev_priv(ndev); unregister_netdev(ndev); - platform_set_drvdata(pdev, NULL); if (!use_iram_for_net(&pldat->pdev->dev) || pldat->dma_buff_size > lpc32xx_return_iram_size()) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 42e9dd05c936..399a7c968e0d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2803,7 +2803,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); pm_runtime_disable(&pdev->dev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c index b6739afeaca1..a99739c5142c 100644 --- a/drivers/net/ethernet/s6gmac.c +++ b/drivers/net/ethernet/s6gmac.c @@ -1040,7 +1040,6 @@ static int s6gmac_remove(struct platform_device *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); free_netdev(dev); - platform_set_drvdata(pdev, NULL); } return 0; } diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 0ad5694b41f8..856e523ac936 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -818,7 +818,6 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, sp->srings_dma); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 4bdbaad9932d..9f5f35e041ac 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -863,7 +863,6 @@ static int __exit meth_remove(struct platform_device *pdev) unregister_netdev(dev); free_netdev(dev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 9dd842dbb859..345558fe7367 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -2087,7 +2087,6 @@ static int smc911x_drv_probe(struct platform_device *pdev) ndev->base_addr = res->start; ret = smc911x_probe(ndev); if (ret != 0) { - platform_set_drvdata(pdev, NULL); iounmap(addr); release_both: free_netdev(ndev); @@ -2113,7 +2112,6 @@ static int smc911x_drv_remove(struct platform_device *pdev) struct resource *res; DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); - platform_set_drvdata(pdev, NULL); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index dfbf978315df..cde13be7c7de 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2299,7 +2299,6 @@ static int smc_drv_probe(struct platform_device *pdev) return 0; out_iounmap: - platform_set_drvdata(pdev, NULL); iounmap(addr); out_release_attrib: smc_release_attrib(pdev, ndev); @@ -2319,8 +2318,6 @@ static int smc_drv_remove(struct platform_device *pdev) struct smc_local *lp = netdev_priv(ndev); struct resource *res; - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); free_irq(ndev->irq, ndev); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 3663b9e04a31..a1419211585b 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2284,7 +2284,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev) mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); - platform_set_drvdata(pdev, NULL); unregister_netdev(dev); free_irq(dev->irq, dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -2539,7 +2538,6 @@ out_disable_resources: out_enable_resources_fail: smsc911x_free_resources(pdev); out_request_resources_fail: - platform_set_drvdata(pdev, NULL); iounmap(pdata->ioaddr); free_netdev(dev); out_release_io_1: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1d3780f55ba2..17bc7827e7ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -171,8 +171,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) if (priv->plat->exit) priv->plat->exit(pdev); - platform_set_drvdata(pdev, NULL); - return ret; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 21a5b291b4b3..89a4c40d6d83 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1940,7 +1940,6 @@ static int cpsw_remove(struct platform_device *pdev) struct cpsw_priv *priv = netdev_priv(ndev); int i; - platform_set_drvdata(pdev, NULL); if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 860e15ddfbcb..efb6f65a8be3 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2037,8 +2037,6 @@ static int davinci_emac_remove(struct platform_device *pdev) dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); - platform_set_drvdata(pdev, NULL); - if (priv->txchan) cpdma_chan_destroy(priv->txchan); if (priv->rxchan) diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 3c69a0460832..01bdc6ca0755 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1682,7 +1682,6 @@ static int tsi108_ether_remove(struct platform_device *pdev) unregister_netdev(dev); tsi108_stop_ethernet(dev); - platform_set_drvdata(pdev, NULL); iounmap(priv->regs); iounmap(priv->phyregs); free_netdev(dev); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a518dcab396e..30fed08d1674 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -734,7 +734,6 @@ err_hw_probe: unregister_netdev(ndev); err_register: free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return err; } @@ -750,7 +749,6 @@ static int w5100_remove(struct platform_device *pdev) unregister_netdev(ndev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 6e00e3f94ce4..e92884564e1e 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -646,7 +646,6 @@ err_hw_probe: unregister_netdev(ndev); err_register: free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return err; } @@ -662,7 +661,6 @@ static int w5300_remove(struct platform_device *pdev) unregister_netdev(ndev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 6958a5e87703..3d689fcb7917 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1472,7 +1472,6 @@ err_phy_dis: phy_disconnect(port->phydev); err_free_mem: npe_port_tab[NPE_ID(port->id)] = NULL; - platform_set_drvdata(pdev, NULL); release_resource(port->mem_res); err_npe_rel: npe_release(port->npe); @@ -1489,7 +1488,6 @@ static int eth_remove_one(struct platform_device *pdev) unregister_netdev(dev); phy_disconnect(port->phydev); npe_port_tab[NPE_ID(port->id)] = NULL; - platform_set_drvdata(pdev, NULL); npe_release(port->npe); release_resource(port->mem_res); free_netdev(dev); -- cgit v1.2.3 From 87227b8b2d4d556a6924ad9af87450fdc3fcd7e3 Mon Sep 17 00:00:00 2001 From: Jean-Christophe PLAGNIOL-VILLARD Date: Thu, 23 May 2013 23:01:22 +0000 Subject: net: micrel : ks8851-ml: add dt support Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- .../devicetree/bindings/net/micrel-ks8851.txt | 9 ++++++ drivers/net/ethernet/micrel/ks8851_mll.c | 33 +++++++++++++++++----- 2 files changed, 35 insertions(+), 7 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/micrel-ks8851.txt (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt new file mode 100644 index 000000000000..11ace3c3d805 --- /dev/null +++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt @@ -0,0 +1,9 @@ +Micrel KS8851 Ethernet mac + +Required properties: +- compatible = "micrel,ks8851-ml" of parallel interface +- reg : 2 physical address and size of registers for data and command +- interrupts : interrupt connection + +Optional properties: +- local-mac-address : Ethernet mac address to use diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index e9b1a830d582..ac20098b542a 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -35,6 +35,9 @@ #include #include #include +#include +#include +#include #define DRV_NAME "ks8851_mll" @@ -1524,6 +1527,13 @@ static int ks_hw_init(struct ks_net *ks) return true; } +#if defined(CONFIG_OF) +static const struct of_device_id ks8851_ml_dt_ids[] = { + { .compatible = "micrel,ks8851-mll" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids); +#endif static int ks8851_probe(struct platform_device *pdev) { @@ -1532,7 +1542,7 @@ static int ks8851_probe(struct platform_device *pdev) struct net_device *netdev; struct ks_net *ks; u16 id, data; - struct ks8851_mll_platform_data *pdata; + const char *mac; io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -1619,13 +1629,21 @@ static int ks8851_probe(struct platform_device *pdev) ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); /* overwriting the default MAC address */ - pdata = pdev->dev.platform_data; - if (!pdata) { - netdev_err(netdev, "No platform data\n"); - err = -ENODEV; - goto err_pdata; + if (pdev->dev.of_node) { + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + memcpy(ks->mac_addr, mac, ETH_ALEN); + } else { + struct ks8851_mll_platform_data *pdata; + + pdata = pdev->dev.platform_data; + if (!pdata) { + netdev_err(netdev, "No platform data\n"); + err = -ENODEV; + goto err_pdata; + } + memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN); } - memcpy(ks->mac_addr, pdata->mac_addr, 6); if (!is_valid_ether_addr(ks->mac_addr)) { /* Use random MAC address if none passed */ eth_random_addr(ks->mac_addr); @@ -1679,6 +1697,7 @@ static struct platform_driver ks8851_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ks8851_ml_dt_ids), }, .probe = ks8851_probe, .remove = ks8851_remove, -- cgit v1.2.3 From 8b513d0cf603c0a9ccf86a92cb22931f05a7bc86 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 21 May 2013 23:13:12 +0900 Subject: treewide: Fix typo in printk Correct spelling typo in various part of drivers Signed-off-by: Masanari Iida Signed-off-by: Jiri Kosina --- drivers/gpu/drm/drm_fb_cma_helper.c | 4 ++-- drivers/gpu/drm/radeon/radeon_fb.c | 2 +- drivers/infiniband/ulp/isert/ib_isert.c | 2 +- drivers/media/platform/s5p-mfc/s5p_mfc_dec.c | 2 +- drivers/misc/dummy-irq.c | 2 +- drivers/misc/lattice-ecp3-config.c | 2 +- drivers/misc/mei/hbm.c | 2 +- drivers/misc/mei/init.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 2 +- drivers/nfc/pn533.c | 2 +- drivers/scsi/pm8001/pm80xx_hwi.c | 2 +- drivers/tty/serial/serial-tegra.c | 2 +- drivers/usb/musb/musb_dsps.c | 2 +- drivers/usb/musb/omap2430.c | 2 +- drivers/video/omap2/displays/panel-n8x0.c | 2 +- fs/btrfs/free-space-cache.c | 2 +- tools/testing/ktest/ktest.pl | 2 +- 17 files changed, 18 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 0b5af7d0edb1..c385cc5e730e 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -92,7 +92,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs); if (ret) { - dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret); + dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret); kfree(fb_cma); return ERR_PTR(ret); } @@ -376,7 +376,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, ret = drm_fb_helper_initial_config(helper, preferred_bpp); if (ret < 0) { - dev_err(dev->dev, "Failed to set inital hw configuration.\n"); + dev_err(dev->dev, "Failed to set initial hw configuration.\n"); goto err_drm_fb_helper_fini; } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index b1746741bc59..665ced3b7313 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -230,7 +230,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { - DRM_ERROR("failed to initalise framebuffer %d\n", ret); + DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto out_unref; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 41712f096515..2693129055c1 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1587,7 +1587,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_cmd, send_wr); - pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 4af53bd2f182..0fa1e9b38fc7 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -954,7 +954,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq, psize[0] = ctx->dec_src_buf_size; allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; } else { - mfc_err("This video node is dedicated to decoding. Decoding not initalised\n"); + mfc_err("This video node is dedicated to decoding. Decoding not initialized\n"); return -EINVAL; } return 0; diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c index c37eeedfe215..4d0db15df115 100644 --- a/drivers/misc/dummy-irq.c +++ b/drivers/misc/dummy-irq.c @@ -26,7 +26,7 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id) static int count = 0; if (count == 0) { - printk(KERN_INFO "dummy-irq: interrupt occured on IRQ %d\n", + printk(KERN_INFO "dummy-irq: interrupt occurred on IRQ %d\n", irq); count++; } diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index bb26f086bd8b..61fbe6acabef 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -170,7 +170,7 @@ static void firmware_load(const struct firmware *fw, void *context) /* Check result */ if (status & FPGA_STATUS_DONE) - dev_info(&spi->dev, "FPGA succesfully configured!\n"); + dev_info(&spi->dev, "FPGA successfully configured!\n"); else dev_info(&spi->dev, "FPGA not configured (DONE not set)\n"); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 6916045166eb..54ffcbc24c70 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -139,7 +139,7 @@ int mei_hbm_start_wait(struct mei_device *dev) if (ret <= 0 && (dev->hbm_state <= MEI_HBM_START)) { dev->hbm_state = MEI_HBM_IDLE; - dev_err(&dev->pdev->dev, "wating for mei start failed\n"); + dev_err(&dev->pdev->dev, "waiting for mei start failed\n"); return -ETIMEDOUT; } return 0; diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 713d89fedc46..422d9c37dec9 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -38,7 +38,7 @@ const char *mei_dev_state_str(int state) MEI_DEV_STATE(POWER_DOWN); MEI_DEV_STATE(POWER_UP); default: - return "unkown"; + return "unknown"; } #undef MEI_DEV_STATE } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 196b2d100407..8b59a710a4a5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -1675,7 +1675,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) qlcnic_sriov_vf_attach(adapter); adapter->fw_fail_cnt = 0; dev_info(dev, - "%s: Reinitalization of VF 0x%x done after FW reset\n", + "%s: Reinitialization of VF 0x%x done after FW reset\n", __func__, func); } else { dev_err(dev, diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 8f6f2baa930d..ec269e6f0375 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -1697,7 +1697,7 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg, goto done; if (!dev->poll_mod_count) { - nfc_dev_dbg(&dev->interface->dev, "Polling has been stoped."); + nfc_dev_dbg(&dev->interface->dev, "Polling has been stopped."); goto done; } diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 302514d8157b..1c5e7d233e78 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -3204,7 +3204,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("unresgister the deviece\n")); + pm8001_printk("unregister the device\n")); pm8001_mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 9799d043a9bd..ee7c8123c374 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -1377,7 +1377,7 @@ static int __init tegra_uart_init(void) ret = platform_driver_register(&tegra_uart_platform_driver); if (ret < 0) { - pr_err("Uart platfrom driver register failed, e = %d\n", ret); + pr_err("Uart platform driver register failed, e = %d\n", ret); uart_unregister_driver(&tegra_uart_driver); return ret; } diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index e1b661d04021..5233804d66b1 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -551,7 +551,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, - "failed to allocate musb platfrom data\n"); + "failed to allocate musb platform data\n"); ret = -ENOMEM; goto err2; } diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 628b93fe5ccc..2325ef6bdeed 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -513,7 +513,7 @@ static int omap2430_probe(struct platform_device *pdev) pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, - "failed to allocate musb platfrom data\n"); + "failed to allocate musb platform data\n"); goto err2; } diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c index f94ead6a3183..860b18014ad7 100644 --- a/drivers/video/omap2/displays/panel-n8x0.c +++ b/drivers/video/omap2/displays/panel-n8x0.c @@ -527,7 +527,7 @@ static int n8x0_panel_update(struct omap_dss_device *dssdev, dh = dssdev->panel.timings.y_res; if (x != 0 || y != 0 || w != dw || h != dh) { - dev_err(&dssdev->dev, "invaid update region %d, %d, %d, %d\n", + dev_err(&dssdev->dev, "invalid update region %d, %d, %d, %d\n", x, y, w, h); return -EINVAL; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index e53009657f0e..2750b5023526 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -3200,7 +3200,7 @@ static int test_extents(struct btrfs_block_group_cache *cache) ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096); if (ret) { - printk(KERN_ERR "Error removing middle peice %d\n", ret); + printk(KERN_ERR "Error removing middle piece %d\n", ret); return ret; } diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 0d7fd8b51544..999eab1bc64f 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -1796,7 +1796,7 @@ sub monitor { # We already booted into the kernel we are testing, # but now we booted into another kernel? # Consider this a triple fault. - doprint "Aleady booted in Linux kernel $version, but now\n"; + doprint "Already booted in Linux kernel $version, but now\n"; doprint "we booted into Linux kernel $1.\n"; doprint "Assuming that this is a triple fault.\n"; doprint "To disable this: set DETECT_TRIPLE_FAULT to 0\n"; -- cgit v1.2.3 From 351638e7deeed2ec8ce451b53d33921b3da68f83 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 28 May 2013 01:30:21 +0000 Subject: net: pass info struct via netdevice notifier So far, only net_device * could be passed along with netdevice notifier event. This patch provides a possibility to pass custom structure able to provide info that event listener needs to know. Signed-off-by: Jiri Pirko v2->v3: fix typo on simeth shortened dev_getter shortened notifier_info struct name v1->v2: fix notifier_call parameter in call_netdevice_notifier() Signed-off-by: David S. Miller --- arch/ia64/hp/sim/simeth.c | 2 +- arch/mips/txx9/generic/setup_tx4939.c | 3 +- drivers/infiniband/core/cma.c | 4 +- drivers/infiniband/hw/mlx4/main.c | 2 +- drivers/net/bonding/bond_main.c | 2 +- drivers/net/can/led.c | 4 +- drivers/net/ethernet/broadcom/cnic.c | 2 +- drivers/net/ethernet/marvell/skge.c | 2 +- drivers/net/ethernet/marvell/sky2.c | 2 +- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 +- drivers/net/ethernet/sfc/efx.c | 2 +- drivers/net/hamradio/bpqether.c | 7 +-- drivers/net/macvlan.c | 2 +- drivers/net/macvtap.c | 2 +- drivers/net/netconsole.c | 5 +- drivers/net/ppp/pppoe.c | 2 +- drivers/net/team/team.c | 2 +- drivers/net/wan/dlci.c | 2 +- drivers/net/wan/hdlc.c | 2 +- drivers/net/wan/lapbether.c | 2 +- drivers/scsi/fcoe/fcoe.c | 2 +- drivers/scsi/fcoe/fcoe_transport.c | 2 +- drivers/staging/csr/netdev.c | 2 +- drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c | 2 +- drivers/staging/ft1000/ft1000-usb/ft1000_proc.c | 2 +- drivers/staging/silicom/bpctl_mod.c | 2 +- include/linux/netdevice.h | 13 +++++ net/8021q/vlan.c | 2 +- net/appletalk/aarp.c | 2 +- net/appletalk/ddp.c | 2 +- net/atm/clip.c | 4 +- net/atm/mpc.c | 6 +-- net/ax25/af_ax25.c | 6 +-- net/batman-adv/hard-interface.c | 2 +- net/bridge/br_notify.c | 2 +- net/caif/caif_dev.c | 4 +- net/caif/caif_usb.c | 4 +- net/can/af_can.c | 4 +- net/can/bcm.c | 4 +- net/can/gw.c | 4 +- net/can/raw.c | 4 +- net/core/dev.c | 56 ++++++++++++++++++---- net/core/drop_monitor.c | 4 +- net/core/dst.c | 2 +- net/core/fib_rules.c | 4 +- net/core/netprio_cgroup.c | 2 +- net/core/pktgen.c | 2 +- net/core/rtnetlink.c | 2 +- net/decnet/af_decnet.c | 4 +- net/ieee802154/6lowpan.c | 5 +- net/ipv4/arp.c | 2 +- net/ipv4/devinet.c | 2 +- net/ipv4/fib_frontend.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv4/netfilter/ipt_MASQUERADE.c | 2 +- net/ipv6/addrconf.c | 4 +- net/ipv6/ip6mr.c | 2 +- net/ipv6/ndisc.c | 2 +- net/ipv6/netfilter/ip6t_MASQUERADE.c | 2 +- net/ipv6/route.c | 4 +- net/ipx/af_ipx.c | 2 +- net/iucv/af_iucv.c | 2 +- net/mac80211/iface.c | 5 +- net/netfilter/ipvs/ip_vs_ctl.c | 4 +- net/netfilter/nfnetlink_queue_core.c | 2 +- net/netfilter/xt_TEE.c | 2 +- net/netlabel/netlabel_unlabeled.c | 7 ++- net/netrom/af_netrom.c | 2 +- net/openvswitch/dp_notify.c | 2 +- net/packet/af_packet.c | 5 +- net/phonet/pn_dev.c | 4 +- net/rose/af_rose.c | 6 +-- net/sched/act_mirred.c | 2 +- net/tipc/eth_media.c | 4 +- net/tipc/ib_media.c | 4 +- net/wireless/core.c | 5 +- net/x25/af_x25.c | 2 +- net/xfrm/xfrm_policy.c | 2 +- security/selinux/netif.c | 2 +- 80 files changed, 172 insertions(+), 127 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index c13064e422df..d1b04c4c95e3 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -268,7 +268,7 @@ static __inline__ int dev_is_ethdev(struct net_device *dev) static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct simeth_local *local; struct in_device *in_dev; struct in_ifaddr **ifap = NULL; diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 729a50991780..b7eccbd17bf7 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -331,7 +331,8 @@ static int tx4939_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) { __u64 bit = 0; if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0)) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 71c2c7116802..34fbc2f60a09 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3269,9 +3269,9 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id } static int cma_netdev_callback(struct notifier_block *self, unsigned long event, - void *ctx) + void *ptr) { - struct net_device *ndev = (struct net_device *)ctx; + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct cma_device *cma_dev; struct rdma_id_private *id_priv; int ret = NOTIFY_DONE; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 23d734349d8e..a188d3178559 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1161,7 +1161,7 @@ static void netdev_removed(struct mlx4_ib_dev *dev, int port) static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mlx4_ib_dev *ibdev; struct net_device *oldnd; struct mlx4_ib_iboe *iboe; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 29b846cbfb48..f4489d65bf33 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3277,7 +3277,7 @@ static int bond_slave_netdev_event(unsigned long event, static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *event_dev = (struct net_device *)ptr; + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); pr_debug("event_dev: %s, event: %lx\n", event_dev ? event_dev->name : "None", diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c index f27fca65dc4a..a3d99a8fd2d1 100644 --- a/drivers/net/can/led.c +++ b/drivers/net/can/led.c @@ -88,9 +88,9 @@ EXPORT_SYMBOL_GPL(devm_can_led_init); /* NETDEV rename notifier to rename the associated led triggers too */ static int can_led_notifier(struct notifier_block *nb, unsigned long msg, - void *data) + void *ptr) { - struct net_device *netdev = data; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct can_priv *priv = safe_candev_priv(netdev); char name[CAN_LED_NAME_SZ]; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 6b0dc131b20e..d78d4cf140ed 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5622,7 +5622,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, static int cnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct cnic_dev *dev; int new_dev = 0; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 171f4b3dda07..c896079728e1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3706,7 +3706,7 @@ static const struct file_operations skge_debug_fops = { static int skge_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct skge_port *skge; struct dentry *d; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d175bbd3ffd3..e09a8c6f8536 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4642,7 +4642,7 @@ static const struct file_operations sky2_debug_fops = { static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct sky2_port *sky2 = netdev_priv(dev); if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index af951f343ff6..51e13d92761e 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -3311,7 +3311,7 @@ static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *orig_dev = dev; struct net_device *slave; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index da82f2eb73b4..6bb56d43614b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3530,7 +3530,7 @@ static int qlcnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct qlcnic_adapter *adapter; - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); recheck: if (dev == NULL) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 39e4cb39de29..46cc11d5e205 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2120,7 +2120,7 @@ static void efx_update_name(struct efx_nic *efx) static int efx_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = ptr; + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 02de6c891670..f91bf0ddf031 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -103,7 +103,7 @@ static struct packet_type bpq_packet_type __read_mostly = { }; static struct notifier_block bpq_dev_notifier = { - .notifier_call =bpq_device_event, + .notifier_call = bpq_device_event, }; @@ -544,9 +544,10 @@ static void bpq_free_device(struct net_device *ndev) /* * Handle device status changes. */ -static int bpq_device_event(struct notifier_block *this,unsigned long event, void *ptr) +static int bpq_device_event(struct notifier_block *this, + unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1c502bb0c916..edfddc5f61b4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -921,7 +921,7 @@ static struct rtnl_link_ops macvlan_link_ops = { static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 59e9605de316..68efb91a5633 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1053,7 +1053,7 @@ EXPORT_SYMBOL_GPL(macvtap_get_socket); static int macvtap_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan; struct device *classdev; dev_t devt; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 59ac143dec25..1d1d0a12765c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -653,12 +653,11 @@ static struct configfs_subsystem netconsole_subsys = { /* Handle network interface device notifications */ static int netconsole_netdev_event(struct notifier_block *this, - unsigned long event, - void *ptr) + unsigned long event, void *ptr) { unsigned long flags; struct netconsole_target *nt; - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); bool stopped = false; if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index bb07ba94c3aa..5f66e30d9823 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -338,7 +338,7 @@ static void pppoe_flush_dev(struct net_device *dev) static int pppoe_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); /* Only look at sockets that are using this specific device. */ switch (event) { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 7c43261975bd..9273f48a512b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2647,7 +2647,7 @@ static void team_port_change_check(struct team_port *port, bool linkup) static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port; port = team_port_get_rtnl(dev); diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 147614ed86aa..70ac59929f80 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -477,7 +477,7 @@ static void dlci_setup(struct net_device *dev) static int dlci_dev_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index a0a932c63d0a..9c33ca918e19 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -99,7 +99,7 @@ static inline void hdlc_proto_stop(struct net_device *dev) static int hdlc_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); hdlc_device *hdlc; unsigned long flags; int on; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index a73b49eb87e3..a33a46fa88dd 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -370,7 +370,7 @@ static int lapbeth_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct lapbethdev *lapbeth; - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 292b24f9bf93..ee721b6cbcdf 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1975,7 +1975,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, { struct fcoe_ctlr_device *cdev; struct fc_lport *lport = NULL; - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fcoe_port *port; diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index f3a5a53e8631..01adbe0ec53b 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -704,7 +704,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UNREGISTER: diff --git a/drivers/staging/csr/netdev.c b/drivers/staging/csr/netdev.c index a0177d998978..d49cdf84a496 100644 --- a/drivers/staging/csr/netdev.c +++ b/drivers/staging/csr/netdev.c @@ -2891,7 +2891,7 @@ void uf_net_get_name(struct net_device *dev, char *name, int len) */ static int uf_netdev_event(struct notifier_block *notif, unsigned long event, void* ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(netdev); unifi_priv_t *priv = NULL; static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}; diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c index 94e426e4d98b..b2330f1df7e7 100644 --- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c @@ -164,7 +164,7 @@ static const struct file_operations ft1000_proc_fops = { static int ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ft1000_info *info; info = netdev_priv(dev); diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c index eca6f0292b4b..5ead942be680 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c @@ -166,7 +166,7 @@ static const struct file_operations ft1000_proc_fops = { static int ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ft1000_info *info; struct proc_dir_entry *ft1000_proc_file; diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c index b7e570ccb759..c8ddb99e8526 100644 --- a/drivers/staging/silicom/bpctl_mod.c +++ b/drivers/staging/silicom/bpctl_mod.c @@ -133,7 +133,7 @@ static unsigned long str_to_hex(char *p); static int bp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); static bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL; int dev_num = 0, ret = 0, ret_d = 0, time_left = 0; /* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6b2bb460d1d7..13a34848b5e1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1599,6 +1599,19 @@ struct packet_offload { extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); + +struct netdev_notifier_info { + struct net_device *dev; +}; + +static inline struct net_device * +netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) +{ + return info->dev; +} + +extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, + struct netdev_notifier_info *info); extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 9424f3718ea7..2fb2d88e8c2e 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -341,7 +341,7 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event) static int vlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vlan_group *grp; struct vlan_info *vlan_info; int i, flgs; diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 173a2e82f486..690356fa52b9 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -332,7 +332,7 @@ static void aarp_expire_timeout(unsigned long unused) static int aarp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ct; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index ef12839a7cfe..7fee50d637f9 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -644,7 +644,7 @@ static inline void atalk_dev_down(struct net_device *dev) static int ddp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/atm/clip.c b/net/atm/clip.c index 8ae3a7879335..cce241eb01d9 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -539,9 +539,9 @@ static int clip_create(int number) } static int clip_device_event(struct notifier_block *this, unsigned long event, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index d4cc1be5c364..3af12755cd04 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -998,14 +998,12 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc) } static int mpoa_event_listener(struct notifier_block *mpoa_notifier, - unsigned long event, void *dev_ptr) + unsigned long event, void *ptr) { - struct net_device *dev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mpoa_client *mpc; struct lec_priv *priv; - dev = dev_ptr; - if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index e277e38f736b..4b4d2b779ec1 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -111,9 +111,9 @@ again: * Handle device status changes. */ static int ax25_device_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; @@ -1974,7 +1974,7 @@ static struct packet_type ax25_packet_type __read_mostly = { }; static struct notifier_block ax25_dev_notifier = { - .notifier_call =ax25_device_event, + .notifier_call = ax25_device_event, }; static int __init ax25_init(void) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 522243aff2f3..b6504eac0ed8 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -595,7 +595,7 @@ void batadv_hardif_remove_interfaces(void) static int batadv_hard_if_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *net_dev = ptr; + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); struct batadv_hard_iface *hard_iface; struct batadv_hard_iface *primary_if = NULL; struct batadv_priv *bat_priv; diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 1644b3e1f947..3a3f371b2841 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -31,7 +31,7 @@ struct notifier_block br_device_notifier = { */ static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_bridge_port *p; struct net_bridge *br; bool changed_addr; diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 1f9ece1a9c34..4dca159435cf 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -352,9 +352,9 @@ EXPORT_SYMBOL(caif_enroll_dev); /* notify Caif of device events */ static int caif_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct caif_device_entry *caifd = NULL; struct caif_dev_common *caifdev; struct cfcnfg *cfg; diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 942e00a425fd..75ed04b78fa4 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -121,9 +121,9 @@ static struct packet_type caif_usb_type __read_mostly = { }; static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct caif_dev_common common; struct cflayer *layer, *link_support; struct usbnet *usbnet; diff --git a/net/can/af_can.c b/net/can/af_can.c index c4e50852c9f4..3ab8dd2e1282 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -794,9 +794,9 @@ EXPORT_SYMBOL(can_proto_unregister); * af_can notifier to create/remove CAN netdevice specific structs */ static int can_notifier(struct notifier_block *nb, unsigned long msg, - void *data) + void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dev_rcv_lists *d; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/can/bcm.c b/net/can/bcm.c index 8f113e6ff327..46f20bfafc0e 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1350,9 +1350,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, * notification handler for netdevice status changes */ static int bcm_notifier(struct notifier_block *nb, unsigned long msg, - void *data) + void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); struct sock *sk = &bo->sk; struct bcm_op *op; diff --git a/net/can/gw.c b/net/can/gw.c index 3ee690e8c7d3..2f291f961a17 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -445,9 +445,9 @@ static inline void cgw_unregister_filter(struct cgw_job *gwj) } static int cgw_notifier(struct notifier_block *nb, - unsigned long msg, void *data) + unsigned long msg, void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/can/raw.c b/net/can/raw.c index 1085e65f848e..641e1c895123 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -239,9 +239,9 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) } static int raw_notifier(struct notifier_block *nb, - unsigned long msg, void *data) + unsigned long msg, void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); struct sock *sk = &ro->sk; diff --git a/net/core/dev.c b/net/core/dev.c index 5f747974ac58..54fce6006a83 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1391,6 +1391,20 @@ void dev_disable_lro(struct net_device *dev) } EXPORT_SYMBOL(dev_disable_lro); +static void netdev_notifier_info_init(struct netdev_notifier_info *info, + struct net_device *dev) +{ + info->dev = dev; +} + +static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, + struct net_device *dev) +{ + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, dev); + return nb->notifier_call(nb, val, &info); +} static int dev_boot_phase = 1; @@ -1423,7 +1437,7 @@ int register_netdevice_notifier(struct notifier_block *nb) goto unlock; for_each_net(net) { for_each_netdev(net, dev) { - err = nb->notifier_call(nb, NETDEV_REGISTER, dev); + err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); err = notifier_to_errno(err); if (err) goto rollback; @@ -1431,7 +1445,7 @@ int register_netdevice_notifier(struct notifier_block *nb) if (!(dev->flags & IFF_UP)) continue; - nb->notifier_call(nb, NETDEV_UP, dev); + call_netdevice_notifier(nb, NETDEV_UP, dev); } } @@ -1447,10 +1461,11 @@ rollback: goto outroll; if (dev->flags & IFF_UP) { - nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); - nb->notifier_call(nb, NETDEV_DOWN, dev); + call_netdevice_notifier(nb, NETDEV_GOING_DOWN, + dev); + call_netdevice_notifier(nb, NETDEV_DOWN, dev); } - nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); } } @@ -1488,10 +1503,11 @@ int unregister_netdevice_notifier(struct notifier_block *nb) for_each_net(net) { for_each_netdev(net, dev) { if (dev->flags & IFF_UP) { - nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); - nb->notifier_call(nb, NETDEV_DOWN, dev); + call_netdevice_notifier(nb, NETDEV_GOING_DOWN, + dev); + call_netdevice_notifier(nb, NETDEV_DOWN, dev); } - nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); } } unlock: @@ -1500,6 +1516,25 @@ unlock: } EXPORT_SYMBOL(unregister_netdevice_notifier); +/** + * call_netdevice_notifiers_info - call all network notifier blocks + * @val: value passed unmodified to notifier function + * @dev: net_device pointer passed unmodified to notifier function + * @info: notifier information data + * + * Call all network notifier blocks. Parameters and return value + * are as for raw_notifier_call_chain(). + */ + +int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, + struct netdev_notifier_info *info) +{ + ASSERT_RTNL(); + netdev_notifier_info_init(info, dev); + return raw_notifier_call_chain(&netdev_chain, val, info); +} +EXPORT_SYMBOL(call_netdevice_notifiers_info); + /** * call_netdevice_notifiers - call all network notifier blocks * @val: value passed unmodified to notifier function @@ -1511,8 +1546,9 @@ EXPORT_SYMBOL(unregister_netdevice_notifier); int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { - ASSERT_RTNL(); - return raw_notifier_call_chain(&netdev_chain, val, dev); + struct netdev_notifier_info info; + + return call_netdevice_notifiers_info(val, dev, &info); } EXPORT_SYMBOL(call_netdevice_notifiers); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index d23b6682f4e9..5e78d44333b9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -295,9 +295,9 @@ static int net_dm_cmd_trace(struct sk_buff *skb, } static int dropmon_net_event(struct notifier_block *ev_block, - unsigned long event, void *ptr) + unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *tmp; diff --git a/net/core/dst.c b/net/core/dst.c index df9cc810ec8e..ca4231ec7347 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -372,7 +372,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dst_entry *dst, *last = NULL; switch (event) { diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index d5a9f8ead0d8..21735440c44a 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -705,9 +705,9 @@ static void detach_rules(struct list_head *rules, struct net_device *dev) static int fib_rules_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct fib_rules_ops *ops; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 0777d0aa18c3..e533259dce3c 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -261,7 +261,7 @@ struct cgroup_subsys net_prio_subsys = { static int netprio_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netprio_map *old; /* diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 11f2704c3810..795498fd4587 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1921,7 +1921,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d static int pktgen_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id); if (pn->pktgen_exiting) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a08bd2b7fe3f..49c14451d8ab 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2667,7 +2667,7 @@ static void rtnetlink_rcv(struct sk_buff *skb) static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UP: diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index c21f200eed93..dd4d506ef923 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -2078,9 +2078,9 @@ out_err: } static int dn_device_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 55e1fd5b3e56..3b9d5f20bd1c 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -1352,10 +1352,9 @@ static inline void lowpan_netlink_fini(void) } static int lowpan_device_event(struct notifier_block *unused, - unsigned long event, - void *ptr) + unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); LIST_HEAD(del_list); struct lowpan_dev_record *entry, *tmp; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 247ec1951c35..bf574029a183 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1234,7 +1234,7 @@ out: static int arp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_CHANGEADDR: diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index dfc39d4d48b7..b047e2d8a614 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1333,7 +1333,7 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev, static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c7629a209f9d..05a4888dede9 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1038,7 +1038,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct in_device *in_dev; struct net *net = dev_net(dev); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d9610ae7855..f975399f3522 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1609,7 +1609,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct mr_table *mrt; struct vif_device *v; diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 5d5d4d1be9c2..dd5508bde799 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -108,7 +108,7 @@ static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - const struct net_device *dev = ptr; + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 432e084b6b62..bce073b4bbd4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2826,9 +2826,9 @@ static void addrconf_ip6_tnl_config(struct net_device *dev) } static int addrconf_notify(struct notifier_block *this, unsigned long event, - void *data) + void *ptr) { - struct net_device *dev = (struct net_device *) data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct inet6_dev *idev = __in6_dev_get(dev); int run_pending = 0; int err; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 241fb8ad9fcf..583e8d435f9a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1319,7 +1319,7 @@ static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc, static int ip6mr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct mr6_table *mrt; struct mif_device *v; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 2712ab22a174..a0962697a257 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1568,7 +1568,7 @@ int ndisc_rcv(struct sk_buff *skb) static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct inet6_dev *idev; diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 60e9053bab05..b76257cd7e1e 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c @@ -71,7 +71,7 @@ static int device_cmp(struct nf_conn *ct, void *ifindex) static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - const struct net_device *dev = ptr; + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index ad0aa6b0b86a..194c3cde1536 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2681,9 +2681,9 @@ errout: } static int ip6_route_dev_notify(struct notifier_block *this, - unsigned long event, void *data) + unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index f547a47d381c..7a1e0fc1bd4d 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -330,7 +330,7 @@ static __inline__ void __ipxitf_put(struct ipx_interface *intrfc) static int ipxitf_device_event(struct notifier_block *notifier, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ipx_interface *i, *tmp; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ae691651b721..168aff5e60de 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -2293,7 +2293,7 @@ out_unlock: static int afiucv_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *event_dev = (struct net_device *)ptr; + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); struct sock *sk; struct iucv_sock *iucv; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 60f1ce5e5e52..d2c3fd178dbe 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1717,10 +1717,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) } static int netdev_notify(struct notifier_block *nb, - unsigned long state, - void *ndev) + unsigned long state, void *ptr) { - struct net_device *dev = ndev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5b142fb16480..7c3ed429789e 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1487,9 +1487,9 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev) * Currently only NETDEV_DOWN is handled to release refs to cached dsts */ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 4e27fa035814..0f2ac8f2e7b7 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -800,7 +800,7 @@ static int nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index bd93e51d30ac..292934d23482 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -200,7 +200,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) static int tee_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct xt_tee_priv *priv; priv = container_of(this, struct xt_tee_priv, notifier); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 8a6c6ea466d8..af3531926ee0 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -708,7 +708,7 @@ unlhsh_remove_return: * netlbl_unlhsh_netdev_handler - Network device notification handler * @this: notifier block * @event: the event - * @ptr: the network device (cast to void) + * @ptr: the netdevice notifier info (cast to void) * * Description: * Handle network device events, although at present all we care about is a @@ -717,10 +717,9 @@ unlhsh_remove_return: * */ static int netlbl_unlhsh_netdev_handler(struct notifier_block *this, - unsigned long event, - void *ptr) + unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netlbl_unlhsh_iface *iface = NULL; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index ec0c80fde69f..698814bfa7ad 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -117,7 +117,7 @@ static void nr_kill_by_device(struct net_device *dev) */ static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index ef4feec6cd84..c3235675f359 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c @@ -78,7 +78,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct ovs_net *ovs_net; - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vport *vport = NULL; if (!ovs_is_internal_dev(dev)) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8ec1bca7f859..79fe63246b27 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3331,10 +3331,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, } -static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) +static int packet_notifier(struct notifier_block *this, + unsigned long msg, void *ptr) { struct sock *sk; - struct net_device *dev = data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); rcu_read_lock(); diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 45a7df6575de..56a6146ac94b 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -292,9 +292,9 @@ static void phonet_route_autodel(struct net_device *dev) /* notify Phonet of device events */ static int phonet_device_notify(struct notifier_block *me, unsigned long what, - void *arg) + void *ptr) { - struct net_device *dev = arg; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (what) { case NETDEV_REGISTER: diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 9c8347451597..e98fcfbe6007 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -202,10 +202,10 @@ static void rose_kill_by_device(struct net_device *dev) /* * Handle device status changes. */ -static int rose_device_event(struct notifier_block *this, unsigned long event, - void *ptr) +static int rose_device_event(struct notifier_block *this, + unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *)ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 5d676edc22a6..977c10e0631b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -243,7 +243,7 @@ nla_put_failure: static int mirred_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tcf_mirred *m; if (event == NETDEV_UNREGISTER) diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 120a676a3360..fc60bea63169 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -251,9 +251,9 @@ static void disable_bearer(struct tipc_bearer *tb_ptr) * specified device. */ static int recv_notification(struct notifier_block *nb, unsigned long evt, - void *dv) + void *ptr) { - struct net_device *dev = (struct net_device *)dv; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct eth_bearer *eb_ptr = ð_bearers[0]; struct eth_bearer *stop = ð_bearers[MAX_ETH_BEARERS]; diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c index 2a2864c25e15..baa9df4327d9 100644 --- a/net/tipc/ib_media.c +++ b/net/tipc/ib_media.c @@ -244,9 +244,9 @@ static void disable_bearer(struct tipc_bearer *tb_ptr) * specified device. */ static int recv_notification(struct notifier_block *nb, unsigned long evt, - void *dv) + void *ptr) { - struct net_device *dev = (struct net_device *)dv; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ib_bearer *ib_ptr = &ib_bearers[0]; struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS]; diff --git a/net/wireless/core.c b/net/wireless/core.c index 73405e00c800..01e41191f1bf 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -886,10 +886,9 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, } static int cfg80211_netdev_notifier_call(struct notifier_block *nb, - unsigned long state, - void *ndev) + unsigned long state, void *ptr) { - struct net_device *dev = ndev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; int ret; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 37ca9694aabe..1d964e23853f 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -224,7 +224,7 @@ static void x25_kill_by_device(struct net_device *dev) static int x25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 23cea0f74336..536ccc95de89 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2784,7 +2784,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net) static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_DOWN: diff --git a/security/selinux/netif.c b/security/selinux/netif.c index 47a49d1a6f6a..694e9e43855f 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c @@ -264,7 +264,7 @@ static int sel_netif_avc_callback(u32 event) static int sel_netif_netdev_notifier_handler(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; -- cgit v1.2.3 From be8b678c6c63010cd2b0f4f602bd3066f7827fb2 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Tue, 28 May 2013 20:34:24 +0000 Subject: cxgb3: Correct comparisons and calculations using skb->tail and skb-transport_header This corrects an regression introduced by "net: Use 16bits for *_headers fields of struct skbuff" when NET_SKBUFF_DATA_USES_OFFSET is not set. In that case skb->tail will be a pointer whereas skb->transport_header will be an offset from head. This is corrected by using wrappers that ensure that comparisons and calculations are always made using pointers. Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/sge.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 2fd773e267dc..46d1efc17527 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1584,9 +1584,8 @@ static void deferred_unmap_destructor(struct sk_buff *skb) p = dui->addr; if (skb->tail - skb->transport_header) - pci_unmap_single(dui->pdev, *p++, - skb->tail - skb->transport_header, - PCI_DMA_TODEVICE); + pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - + skb_transport_header(skb), PCI_DMA_TODEVICE); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) @@ -1647,8 +1646,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), - skb->tail - skb->transport_header, - addr); + skb_tail_pointer(skb) - + skb_transport_header(skb), addr); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1674,7 +1673,7 @@ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) flits = skb_transport_offset(skb) / 8; /* headers */ cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) + if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits_to_desc(flits + sgl_len(cnt)); } -- cgit v1.2.3 From dc7551cbf7744ef7374c3f3c46817413ad4bcc40 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 30 May 2013 09:51:34 +0000 Subject: netxen_nic: Log driver version with firmware version Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 51e13d92761e..2294cd2293b1 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -860,9 +860,9 @@ netxen_check_options(struct netxen_adapter *adapter) adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; } - dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n", - fw_major, fw_minor, fw_build, - adapter->ahw.cut_through ? "cut-through" : "legacy"); + dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", + NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, + adapter->ahw.cut_through ? "cut-through" : "legacy"); if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); -- cgit v1.2.3 From b08a92bb7381ff03d4101bcc61e65120bc690a4f Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 30 May 2013 09:51:35 +0000 Subject: netxen_nic: Log proper error message in case of mismatched adapter type o log "Unknown" board name and "Unknown" serial number in case of mismatched adapter type found. This will avoid weird characters logs when an adapter is in bad state or corrupted. Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic.h | 10 +++++++--- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 4 +++- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 322a36b76727..e5ab1ecb4bb2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -1855,7 +1855,7 @@ static const struct netxen_brdinfo netxen_boards[] = { #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) -static inline void get_brd_name_by_type(u32 type, char *name) +static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name) { int i, found = 0; for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { @@ -1864,10 +1864,14 @@ static inline void get_brd_name_by_type(u32 type, char *name) found = 1; break; } + } + if (!found) { + strcpy(name, "Unknown"); + return -EINVAL; } - if (!found) - name = "Unknown"; + + return 0; } static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 2294cd2293b1..558df468f2e4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -841,7 +841,9 @@ netxen_check_options(struct netxen_adapter *adapter) } if (adapter->portnum == 0) { - get_brd_name_by_type(adapter->ahw.board_type, brd_name); + if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, + brd_name)) + strcpy(serial_num, "Unknown"); pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", module_name(THIS_MODULE), -- cgit v1.2.3 From a120e864676548454f8d061e7d8a2e68c908a794 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 30 May 2013 09:51:36 +0000 Subject: netxen_nic: netxen_setup_intr() function code cleanup Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 77 +++++++++++++--------- 1 file changed, 46 insertions(+), 31 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 558df468f2e4..927ae9f801fb 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -593,47 +593,43 @@ static const struct net_device_ops netxen_netdev_ops = { }; static void -netxen_setup_intr(struct netxen_adapter *adapter) +netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) { struct netxen_legacy_intr_set *legacy_intrp; - struct pci_dev *pdev = adapter->pdev; - int err, num_msix; - - if (adapter->rss_supported) { - num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? - MSIX_ENTRIES_PER_ADAPTER : 2; - } else - num_msix = 1; - - adapter->max_sds_rings = 1; - - adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); + u32 tgt_status_reg, int_state_reg; if (adapter->ahw.revision_id >= NX_P3_B0) legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; else legacy_intrp = &legacy_intr[0]; + tgt_status_reg = legacy_intrp->tgt_status_reg; + int_state_reg = ISR_INT_STATE_REG; + adapter->int_vec_bit = legacy_intrp->int_vec_bit; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_status_reg); + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, - legacy_intrp->tgt_mask_reg); + legacy_intrp->tgt_mask_reg); adapter->pci_int_reg = netxen_get_ioaddr(adapter, - legacy_intrp->pci_int_reg); + legacy_intrp->pci_int_reg); adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); if (adapter->ahw.revision_id >= NX_P3_B1) adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - ISR_INT_STATE_REG); + int_state_reg); else adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, - CRB_INT_VECTOR); + CRB_INT_VECTOR); +} - netxen_set_msix_bit(pdev, 0); +static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, + int num_msix) +{ + struct pci_dev *pdev = adapter->pdev; + u32 value; + int err; if (adapter->msix_supported) { - netxen_init_msix_entries(adapter, num_msix); err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); if (err == 0) { @@ -644,26 +640,45 @@ netxen_setup_intr(struct netxen_adapter *adapter) adapter->max_sds_rings = num_msix; dev_info(&pdev->dev, "using msi-x interrupts\n"); - return; + return 0; } - - if (err > 0) - pci_disable_msix(pdev); - /* fall through for msi */ } if (use_msi && !pci_enable_msi(pdev)) { + value = msi_tgt_status[adapter->ahw.pci_func]; adapter->flags |= NETXEN_NIC_MSI_ENABLED; - adapter->tgt_status_reg = netxen_get_ioaddr(adapter, - msi_tgt_status[adapter->ahw.pci_func]); - dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); adapter->msix_entries[0].vector = pdev->irq; - return; + dev_info(&pdev->dev, "using msi interrupts\n"); + return 0; } - dev_info(&pdev->dev, "using legacy interrupts\n"); + return -EIO; +} + +static void netxen_setup_intr(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int num_msix; + + if (adapter->rss_supported) + num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? + MSIX_ENTRIES_PER_ADAPTER : 2; + else + num_msix = 1; + + adapter->max_sds_rings = 1; + adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); + + netxen_initialize_interrupt_registers(adapter); + netxen_set_msix_bit(pdev, 0); + + if (!netxen_setup_msi_interrupts(adapter, num_msix)) + return; + adapter->msix_entries[0].vector = pdev->irq; + dev_info(&pdev->dev, "using legacy interrupts\n"); } static void -- cgit v1.2.3 From b37eb210c076b0465fcb8d14694fcce3f267ac63 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 30 May 2013 09:51:37 +0000 Subject: netxen_nic: Avoid mixed mode interrupts o Adapters do not support co-existence of INTx Interrupt with MSI-X or MSI among multiple functions. Prevent attaching of a function during normal load, if adapter gets into mixed mode of interrupts Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/netxen/netxen_nic_hdr.h | 3 ++ .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 50 +++++++++++++++++++--- 2 files changed, 46 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h index 28e076960bcb..32c790659f9c 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -734,6 +734,9 @@ enum { #define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) #define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) #define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) +#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44) +#define NETXEN_MSI_MODE 0x1 +#define NETXEN_INTX_MODE 0x2 #define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) #define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 927ae9f801fb..20c7b3c12b56 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -592,6 +592,22 @@ static const struct net_device_ops netxen_netdev_ops = { #endif }; +static inline bool netxen_function_zero(struct pci_dev *pdev) +{ + return (PCI_FUNC(pdev->devfn) == 0) ? true : false; +} + +static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, + u32 mode) +{ + NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); +} + +static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) +{ + return NXRD32(adapter, NETXEN_INTR_MODE_REG); +} + static void netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) { @@ -654,10 +670,11 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, return 0; } + dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); return -EIO; } -static void netxen_setup_intr(struct netxen_adapter *adapter) +static int netxen_setup_intr(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int num_msix; @@ -674,11 +691,24 @@ static void netxen_setup_intr(struct netxen_adapter *adapter) netxen_initialize_interrupt_registers(adapter); netxen_set_msix_bit(pdev, 0); - if (!netxen_setup_msi_interrupts(adapter, num_msix)) - return; + if (netxen_function_zero(pdev)) { + if (!netxen_setup_msi_interrupts(adapter, num_msix)) + netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); + else + netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); + } else { + if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && + netxen_setup_msi_interrupts(adapter, num_msix)) { + dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); + return -EIO; + } + } - adapter->msix_entries[0].vector = pdev->irq; - dev_info(&pdev->dev, "using legacy interrupts\n"); + if (!NETXEN_IS_MSI_FAMILY(adapter)) { + adapter->msix_entries[0].vector = pdev->irq; + dev_info(&pdev->dev, "using legacy interrupts\n"); + } + return 0; } static void @@ -1525,7 +1555,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netxen_nic_clear_stats(adapter); - netxen_setup_intr(adapter); + err = netxen_setup_intr(adapter); + + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to setup interrupts, error = %d\n", err); + goto err_out_disable_msi; + } err = netxen_setup_netdev(adapter, netdev); if (err) @@ -1613,7 +1649,7 @@ static void netxen_nic_remove(struct pci_dev *pdev) clear_bit(__NX_RESETTING, &adapter->state); netxen_teardown_intr(adapter); - + netxen_set_interrupt_mode(adapter, 0); netxen_remove_diag_entries(adapter); netxen_cleanup_pci_map(adapter); -- cgit v1.2.3 From 90648153754cc1ca2eb08cd19c7b13354a795183 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 30 May 2013 09:51:38 +0000 Subject: netxen_nic: Update version to 4.0.81 Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index e5ab1ecb4bb2..3fe09ab2d7c9 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 80 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.80" +#define _NETXEN_NIC_LINUX_SUBVERSION 81 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.81" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) -- cgit v1.2.3 From ccfecdfe16a872ed3e8322ea48e34502568eb849 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 00:28:03 +0000 Subject: net: emaclite: Report failures in mdio setup Be more verbose when any problem happens. Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 919b983114e9..a16dc3508998 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -852,8 +852,10 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. */ - if (!np) + if (!np) { + dev_err(dev, "Failed to register mdio bus.\n"); return -ENODEV; + } /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. @@ -862,8 +864,10 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) XEL_MDIOCTRL_MDIOEN_MASK); bus = mdiobus_alloc(); - if (!bus) + if (!bus) { + dev_err(dev, "Failed to allocal mdiobus\n"); return -ENOMEM; + } of_address_to_resource(np, 0, &res); snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", @@ -879,8 +883,10 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) lp->mii_bus = bus; rc = of_mdiobus_register(bus, np); - if (rc) + if (rc) { + dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; + } return 0; -- cgit v1.2.3 From e0a3bc65448c01289a74329ddf6c84d8c0594e59 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 00:28:04 +0000 Subject: net: emaclite: Support multiple phys connected to one MDIO bus For system which contains at least two ethernet IP where one IP manage MDIO bus with several PHYs. Example dts node: ethernet_mac0: ethernet@81000000 { compatible = "xlnx,xps-ethernetlite-1.00.a"; device_type = "network"; interrupt-parent = <&xps_intc_0>; interrupts = < 1 0 >; local-mac-address = [ 00 0a 35 00 db bb ]; phy-handle = <ðernet_mac0_phy0>; reg = < 0x81000000 0x10000 >; xlnx,duplex = <0x1>; xlnx,family = "spartan3e"; xlnx,include-internal-loopback = <0x0>; xlnx,include-mdio = <0x1>; xlnx,rx-ping-pong = <0x0>; xlnx,tx-ping-pong = <0x0>; ethernet_mac0_mdio { #address-cells = <1>; #size-cells = <0>; ethernet_mac0_phy0: phy@1 { reg = <0x1>; } ; ethernet_mac0_phy1: phy@3 { reg = <0x3>; } ; } ; } ; ethernet_mac2: ethernet@81040000 { compatible = "xlnx,xps-ethernetlite-1.00.a"; device_type = "network"; interrupt-parent = <&xps_intc_0>; interrupts = < 11 0 >; local-mac-address = [ 00 0a 35 00 db bb ]; phy-handle = <ðernet_mac0_phy1>; reg = < 0x81040000 0x10000 >; xlnx,duplex = <0x1>; xlnx,family = "spartan3e"; xlnx,include-internal-loopback = <0x0>; xlnx,include-mdio = <0x0>; xlnx,rx-ping-pong = <0x0>; xlnx,tx-ping-pong = <0x0>; } ; Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index a16dc3508998..fcd1e0b46103 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -848,6 +848,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) int rc; struct resource res; struct device_node *np = of_get_parent(lp->phy_node); + struct device_node *npp; /* Don't register the MDIO bus if the phy_node or its parent node * can't be found. @@ -856,6 +857,17 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) dev_err(dev, "Failed to register mdio bus.\n"); return -ENODEV; } + npp = of_get_parent(np); + + of_address_to_resource(npp, 0, &res); + if (lp->ndev->mem_start != res.start) { + struct phy_device *phydev; + phydev = of_phy_find_device(lp->phy_node); + if (!phydev) + dev_info(dev, + "MDIO of the phy is not registered yet\n"); + return 0; + } /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. @@ -869,7 +881,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) return -ENOMEM; } - of_address_to_resource(np, 0, &res); snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", (unsigned long long)res.start); bus->priv = lp; -- cgit v1.2.3 From 3fb99fa7c7d2310791bf39929285da44b201fd40 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 00:28:05 +0000 Subject: net: emaclite: Let's make xemaclite_adjust_link static xemaclite_adjust_link is used locally. It removes sparse warning: drivers/net/ethernet/xilinx/xilinx_emaclite.c:916:6: warning: symbol 'xemaclite_adjust_link' was not declared. Should it be static? Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index fcd1e0b46103..93bb14e04d84 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -913,7 +913,7 @@ err_register: * There's nothing in the Emaclite device to be configured when the link * state changes. We just print the status. */ -void xemaclite_adjust_link(struct net_device *ndev) +static void xemaclite_adjust_link(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; -- cgit v1.2.3 From 123c1407af877b9b036b852b77013a6f9f6049b0 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 00:28:06 +0000 Subject: net: emaclite: Do not use microblaze and ppc IO functions Emaclite can be used on ARM zynq where in_be32/out_be32 IO functions are not present. Use standard __raw_readl/__raw_writel IO functions instead. Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 138 +++++++++++++------------- 1 file changed, 69 insertions(+), 69 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 93bb14e04d84..9227ed7bce10 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -159,34 +159,32 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) u32 reg_data; /* Enable the Tx interrupts for the first Buffer */ - reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_TSR_OFFSET, - reg_data | XEL_TSR_XMIT_IE_MASK); + reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); + __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_TSR_OFFSET); /* Enable the Tx interrupts for the second Buffer if * configured in HW */ if (drvdata->tx_ping_pong != 0) { - reg_data = in_be32(drvdata->base_addr + + reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET, - reg_data | XEL_TSR_XMIT_IE_MASK); + __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); } /* Enable the Rx interrupts for the first buffer */ - out_be32(drvdata->base_addr + XEL_RSR_OFFSET, - XEL_RSR_RECV_IE_MASK); + __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); /* Enable the Rx interrupts for the second Buffer if * configured in HW */ if (drvdata->rx_ping_pong != 0) { - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET, - XEL_RSR_RECV_IE_MASK); + __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); } /* Enable the Global Interrupt Enable */ - out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK); + __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } /** @@ -201,37 +199,37 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) u32 reg_data; /* Disable the Global Interrupt Enable */ - out_be32(drvdata->base_addr + XEL_GIER_OFFSET, XEL_GIER_GIE_MASK); + __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); /* Disable the Tx interrupts for the first buffer */ - reg_data = in_be32(drvdata->base_addr + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_TSR_OFFSET, - reg_data & (~XEL_TSR_XMIT_IE_MASK)); + reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); + __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_TSR_OFFSET); /* Disable the Tx interrupts for the second Buffer * if configured in HW */ if (drvdata->tx_ping_pong != 0) { - reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + + reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET, - reg_data & (~XEL_TSR_XMIT_IE_MASK)); + __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); } /* Disable the Rx interrupts for the first buffer */ - reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET); - out_be32(drvdata->base_addr + XEL_RSR_OFFSET, - reg_data & (~XEL_RSR_RECV_IE_MASK)); + reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); + __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_RSR_OFFSET); /* Disable the Rx interrupts for the second buffer * if configured in HW */ if (drvdata->rx_ping_pong != 0) { - reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + + reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET); - out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET + - XEL_RSR_OFFSET, - reg_data & (~XEL_RSR_RECV_IE_MASK)); + __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_BUFFER_OFFSET + + XEL_RSR_OFFSET); } } @@ -351,7 +349,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, byte_count = ETH_FRAME_LEN; /* Check if the expected buffer is available */ - reg_data = in_be32(addr + XEL_TSR_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { @@ -364,7 +362,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); - reg_data = in_be32(addr + XEL_TSR_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) @@ -375,15 +373,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); - out_be32(addr + XEL_TPLR_OFFSET, (byte_count & XEL_TPLR_LENGTH_MASK)); + __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), + addr + XEL_TPLR_OFFSET); /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame * has been transmitted */ - reg_data = in_be32(addr + XEL_TSR_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); - out_be32(addr + XEL_TSR_OFFSET, reg_data); + __raw_writel(reg_data, addr + XEL_TSR_OFFSET); return 0; } @@ -408,7 +407,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); /* Verify which buffer has valid data */ - reg_data = in_be32(addr + XEL_RSR_OFFSET); + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if (drvdata->rx_ping_pong != 0) @@ -425,14 +424,14 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) return 0; /* No data was available */ /* Verify that buffer has valid data */ - reg_data = in_be32(addr + XEL_RSR_OFFSET); + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK) return 0; /* No data was available */ } /* Get the protocol type of the ethernet frame that arrived */ - proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET + + proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); @@ -441,7 +440,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { if (proto_type == ETH_P_IP) { - length = ((ntohl(in_be32(addr + + length = ((ntohl(__raw_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & @@ -463,9 +462,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) data, length); /* Acknowledge the frame */ - reg_data = in_be32(addr + XEL_RSR_OFFSET); + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); reg_data &= ~XEL_RSR_RECV_DONE_MASK; - out_be32(addr + XEL_RSR_OFFSET, reg_data); + __raw_writel(reg_data, addr + XEL_RSR_OFFSET); return length; } @@ -492,14 +491,14 @@ static void xemaclite_update_address(struct net_local *drvdata, xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); - out_be32(addr + XEL_TPLR_OFFSET, ETH_ALEN); + __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); /* Update the MAC address in the EmacLite */ - reg_data = in_be32(addr + XEL_TSR_OFFSET); - out_be32(addr + XEL_TSR_OFFSET, reg_data | XEL_TSR_PROG_MAC_ADDR); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); /* Wait for EmacLite to finish with the MAC address update */ - while ((in_be32(addr + XEL_TSR_OFFSET) & + while ((__raw_readl(addr + XEL_TSR_OFFSET) & XEL_TSR_PROG_MAC_ADDR) != 0) ; } @@ -669,31 +668,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) u32 tx_status; /* Check if there is Rx Data available */ - if ((in_be32(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || - (in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) + if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & + XEL_RSR_RECV_DONE_MASK) || + (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK)) xemaclite_rx_handler(dev); /* Check if the Transmission for the first buffer is completed */ - tx_status = in_be32(base_addr + XEL_TSR_OFFSET); + tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - out_be32(base_addr + XEL_TSR_OFFSET, tx_status); + __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ - tx_status = in_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, - tx_status); + __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); tx_complete = true; } @@ -726,7 +726,7 @@ static int xemaclite_mdio_wait(struct net_local *lp) /* wait for the MDIO interface to not be busy or timeout after some time. */ - while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) & + while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (end - jiffies <= 0) { WARN_ON(1); @@ -762,17 +762,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * MDIO Address register. Set the Status bit in the MDIO Control * register to start a MDIO read transaction. */ - ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET); - out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET, - XEL_MDIOADDR_OP_MASK | - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg)); - out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, - ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK); + ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + __raw_writel(XEL_MDIOADDR_OP_MASK | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; - rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET); + rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", @@ -809,13 +809,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, * Data register. Finally, set the Status bit in the MDIO Control * register to start a MDIO write transaction. */ - ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET); - out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET, - ~XEL_MDIOADDR_OP_MASK & - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg)); - out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val); - out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, - ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK); + ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + __raw_writel(~XEL_MDIOADDR_OP_MASK & + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); + __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); return 0; } @@ -872,8 +872,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. */ - out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET, - XEL_MDIOCTRL_MDIOEN_MASK); + __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); bus = mdiobus_alloc(); if (!bus) { @@ -1197,8 +1197,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_warn(dev, "No MAC address found\n"); /* Clear the Tx CSR's in case this is a restart */ - out_be32(lp->base_addr + XEL_TSR_OFFSET, 0); - out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0); + __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); + __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); -- cgit v1.2.3 From 1156ee88dd3992bbacd5e4c659530f9d18c07378 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 00:28:07 +0000 Subject: net: emaclite: Enable emaclite for Xilinx Arm Zynq platform Enable emaclite for Xilinx ARM Zynq platform. Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 122d60c0481b..7b90a5eba099 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_XILINX bool "Xilinx devices" default y - depends on PPC || PPC32 || MICROBLAZE + depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from @@ -20,7 +20,7 @@ if NET_VENDOR_XILINX config XILINX_EMACLITE tristate "Xilinx 10/100 Ethernet Lite support" - depends on (PPC32 || MICROBLAZE) + depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ) select PHYLIB ---help--- This driver supports the 10/100 Ethernet Lite from Xilinx. -- cgit v1.2.3 From 9e7c414350a6fa4e5f9b7e5f7e074fa75ba850c3 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 30 May 2013 00:28:08 +0000 Subject: net: emaclite: Update driver header Correct email address and years. Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 9227ed7bce10..aa14d8adcecb 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -2,9 +2,9 @@ * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. * * This is a new flat driver which is based on the original emac_lite - * driver from John Williams . + * driver from John Williams . * - * 2007-2009 (c) Xilinx, Inc. + * 2007 - 2013 (c) Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the -- cgit v1.2.3 From cec753f5dd103fcfef28c1d1c942395000abe15e Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 29 May 2013 09:32:43 +0000 Subject: net: mv643xx_eth: use phy_disconnect instead of phy_detach Using a separated mdio bus driver with mvmdio, phy_detach on network device removal will not stop the phy and finally lead to NULL pointer dereference in mvmdio due to non-existent network device. Use phy_disconnect instead to properly stop phy device from accessing network device prior removal of the network device. Signed-off-by: Sebastian Hesselbarth Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index afb8bcbe29f8..8124464af83c 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2805,7 +2805,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev) unregister_netdev(mp->dev); if (mp->phy != NULL) - phy_detach(mp->phy); + phy_disconnect(mp->phy); cancel_work_sync(&mp->tx_timeout_task); if (!IS_ERR(mp->clk)) -- cgit v1.2.3 From 65a6f969fd1af549111b1667d9ca857e3cc649eb Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 29 May 2013 09:32:44 +0000 Subject: net: mv643xx_eth: use managed devm_ioremap for port registers Make use of managed devm_ioremap and remove corresponding iounmap. Signed-off-by: Sebastian Hesselbarth Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 8124464af83c..3417847b8485 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2470,7 +2470,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) if (msp == NULL) return -ENOMEM; - msp->base = ioremap(res->start, resource_size(res)); + msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (msp->base == NULL) return -ENOMEM; @@ -2498,7 +2498,6 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); - iounmap(msp->base); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); -- cgit v1.2.3 From cc9d459894b0927d3f9280b07b308cec2576ad80 Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 29 May 2013 09:32:46 +0000 Subject: net: mv643xx_eth: use of_phy_connect if phy_node present This connects to a phy node passed to the port device instead of probing the phy by phy_addr. Signed-off-by: Sebastian Hesselbarth Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 3417847b8485..4f17d398a8ae 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -60,6 +60,7 @@ #include #include #include +#include static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; @@ -2715,17 +2716,27 @@ static int mv643xx_eth_probe(struct platform_device *pdev) netif_set_real_num_tx_queues(dev, mp->txq_count); netif_set_real_num_rx_queues(dev, mp->rxq_count); - if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { + err = 0; + if (pd->phy_node) { + mp->phy = of_phy_connect(mp->dev, pd->phy_node, + mv643xx_eth_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); + if (!mp->phy) + err = -ENODEV; + } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { mp->phy = phy_scan(mp, pd->phy_addr); - if (IS_ERR(mp->phy)) { + if (IS_ERR(mp->phy)) err = PTR_ERR(mp->phy); - if (err == -ENODEV) - err = -EPROBE_DEFER; - goto out; - } - phy_init(mp, pd->speed, pd->duplex); + else + phy_init(mp, pd->speed, pd->duplex); } + if (err == -ENODEV) { + err = -EPROBE_DEFER; + goto out; + } + if (err) + goto out; SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); -- cgit v1.2.3 From cb85215ffc34479188d0ec25a56352f05ad672ce Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 29 May 2013 09:32:47 +0000 Subject: net: mv643xx_eth: proper initialization for Kirkwood SoCs Ethernet controllers found on Kirkwood SoCs not only suffer from loosing MAC address register contents on clock gating but also some important registers are reset to values that would break ethernet. This patch clears the CLK125_BYPASS_EN bit for DT enabled Kirkwood only by using of_device_is_compatible() instead of #ifdefs. Non-DT Kirkwood is not affected as it installs a clock gating workaround because of the MAC address issue above. Other Orion SoCs do not suffer from register reset, do not have the bit in question, or do not have the register at all. Moreover, system controllers on PPC using this driver should also be protected from clearing that bit. Signed-off-by: Sebastian Hesselbarth Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4f17d398a8ae..1b31d752ff90 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -116,6 +116,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define LINK_UP 0x00000002 #define TXQ_COMMAND 0x0048 #define TXQ_FIX_PRIO_CONF 0x004c +#define PORT_SERIAL_CONTROL1 0x004c +#define CLK125_BYPASS_EN 0x00000010 #define TX_BW_RATE 0x0050 #define TX_BW_MTU 0x0058 #define TX_BW_BURST 0x005c @@ -2701,6 +2703,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mp->dev = dev; + /* Kirkwood resets some registers on gated clocks. Especially + * CLK125_BYPASS_EN must be cleared but is not available on + * all other SoCs/System Controllers using this driver. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "marvell,kirkwood-eth-port")) + wrlp(mp, PORT_SERIAL_CONTROL1, + rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); + /* * Start with a default rate, and if there is a clock, allow * it to override the default. -- cgit v1.2.3 From 76723bca2802eb80990a5fefa179662e2e561d66 Mon Sep 17 00:00:00 2001 From: Sebastian Hesselbarth Date: Wed, 29 May 2013 09:32:48 +0000 Subject: net: mv643xx_eth: add DT parsing support This adds device tree parsing support for the shared driver of mv643xx_eth. As the bindings are slightly different from current PPC bindings new binding documentation is also added. Following PPC-style device setup, the shared driver now also adds port platform_devices and sets up port platform_data. Signed-off-by: Sebastian Hesselbarth Signed-off-by: David S. Miller --- .../devicetree/bindings/net/marvell-orion-net.txt | 85 ++++++++++++ drivers/net/ethernet/marvell/mv643xx_eth.c | 153 ++++++++++++++++++++- 2 files changed, 234 insertions(+), 4 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/marvell-orion-net.txt (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt new file mode 100644 index 000000000000..a73b79f227e1 --- /dev/null +++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt @@ -0,0 +1,85 @@ +Marvell Orion/Discovery ethernet controller +============================================= + +The Marvell Discovery ethernet controller can be found on Marvell Orion SoCs +(Kirkwood, Dove, Orion5x, and Discovery Innovation) and as part of Marvell +Discovery system controller chips (mv64[345]60). + +The Discovery ethernet controller is described with two levels of nodes. The +first level describes the ethernet controller itself and the second level +describes up to 3 ethernet port nodes within that controller. The reason for +the multiple levels is that the port registers are interleaved within a single +set of controller registers. Each port node describes port-specific properties. + +Note: The above separation is only true for Discovery system controllers. +For Orion SoCs we stick to the separation, although there each controller has +only one port associated. Multiple ports are implemented as multiple single-port +controllers. As Kirkwood has some issues with proper initialization after reset, +an extra compatible string is added for it. + +* Ethernet controller node + +Required controller properties: + - #address-cells: shall be 1. + - #size-cells: shall be 0. + - compatible: shall be one of "marvell,orion-eth", "marvell,kirkwood-eth". + - reg: address and length of the controller registers. + +Optional controller properties: + - clocks: phandle reference to the controller clock. + - marvell,tx-checksum-limit: max tx packet size for hardware checksum. + +* Ethernet port node + +Required port properties: + - device_type: shall be "network". + - compatible: shall be one of "marvell,orion-eth-port", + "marvell,kirkwood-eth-port". + - reg: port number relative to ethernet controller, shall be 0, 1, or 2. + - interrupts: port interrupt. + - local-mac-address: 6 bytes MAC address. + +Optional port properties: + - marvell,tx-queue-size: size of the transmit ring buffer. + - marvell,tx-sram-addr: address of transmit descriptor buffer located in SRAM. + - marvell,tx-sram-size: size of transmit descriptor buffer located in SRAM. + - marvell,rx-queue-size: size of the receive ring buffer. + - marvell,rx-sram-addr: address of receive descriptor buffer located in SRAM. + - marvell,rx-sram-size: size of receive descriptor buffer located in SRAM. + +and + + - phy-handle: phandle reference to ethernet PHY. + +or + + - speed: port speed if no PHY connected. + - duplex: port mode if no PHY connected. + +* Node example: + +mdio-bus { + ... + ethphy: ethernet-phy@8 { + device_type = "ethernet-phy"; + ... + }; +}; + +eth: ethernet-controller@72000 { + compatible = "marvell,orion-eth"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x72000 0x2000>; + clocks = <&gate_clk 2>; + marvell,tx-checksum-limit = <1600>; + + ethernet@0 { + device_type = "network"; + compatible = "marvell,orion-eth-port"; + reg = <0>; + interrupts = <29>; + phy-handle = <ðphy>; + local-mac-address = [00 00 00 00 00 00]; + }; +}; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1b31d752ff90..23ea7b6e23f1 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -60,6 +60,9 @@ #include #include #include +#include +#include +#include #include static char mv643xx_eth_driver_name[] = "mv643xx_eth"; @@ -2453,13 +2456,148 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp) } } +#if defined(CONFIG_OF) +static const struct of_device_id mv643xx_eth_shared_ids[] = { + { .compatible = "marvell,orion-eth", }, + { .compatible = "marvell,kirkwood-eth", }, + { } +}; +MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); +#endif + +#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60) +#define mv643xx_eth_property(_np, _name, _v) \ + do { \ + u32 tmp; \ + if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ + _v = tmp; \ + } while (0) + +static struct platform_device *port_platdev[3]; + +static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + struct device_node *pnp) +{ + struct platform_device *ppdev; + struct mv643xx_eth_platform_data ppd; + struct resource res; + const char *mac_addr; + int ret; + + memset(&ppd, 0, sizeof(ppd)); + ppd.shared = pdev; + + memset(&res, 0, sizeof(res)); + if (!of_irq_to_resource(pnp, 0, &res)) { + dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + return -EINVAL; + } + + if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { + dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + return -EINVAL; + } + + if (ppd.port_number >= 3) { + dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + return -EINVAL; + } + + mac_addr = of_get_mac_address(pnp); + if (mac_addr) + memcpy(ppd.mac_addr, mac_addr, 6); + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); + mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); + mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); + mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); + mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); + + ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); + if (!ppd.phy_node) { + ppd.phy_addr = MV643XX_ETH_PHY_NONE; + of_property_read_u32(pnp, "speed", &ppd.speed); + of_property_read_u32(pnp, "duplex", &ppd.duplex); + } + + ppdev = platform_device_alloc(MV643XX_ETH_NAME, ppd.port_number); + if (!ppdev) + return -ENOMEM; + ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto port_err; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto port_err; + + ret = platform_device_add(ppdev); + if (ret) + goto port_err; + + port_platdev[ppd.port_number] = ppdev; + + return 0; + +port_err: + platform_device_put(ppdev); + return ret; +} + +static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_platform_data *pd; + struct device_node *pnp, *np = pdev->dev.of_node; + int ret; + + /* bail out if not registered from DT */ + if (!np) + return 0; + + pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + pdev->dev.platform_data = pd; + + mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); + + for_each_available_child_of_node(np, pnp) { + ret = mv643xx_eth_shared_of_add_port(pdev, pnp); + if (ret) + return ret; + } + return 0; +} + +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} +#else +static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + return 0 +} + +#define mv643xx_eth_shared_of_remove() +#endif + static int mv643xx_eth_shared_probe(struct platform_device *pdev) { static int mv643xx_eth_version_printed; - struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; + struct mv643xx_eth_shared_platform_data *pd; struct mv643xx_eth_shared_private *msp; const struct mbus_dram_target_info *dram; struct resource *res; + int ret; if (!mv643xx_eth_version_printed++) pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", @@ -2472,6 +2610,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); if (msp == NULL) return -ENOMEM; + platform_set_drvdata(pdev, msp); msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (msp->base == NULL) @@ -2488,12 +2627,15 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) if (dram) mv643xx_eth_conf_mbus_windows(msp, dram); + ret = mv643xx_eth_shared_of_probe(pdev); + if (ret) + return ret; + pd = pdev->dev.platform_data; + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? pd->tx_csum_limit : 9 * 1024; infer_hw_params(msp); - platform_set_drvdata(pdev, msp); - return 0; } @@ -2501,9 +2643,9 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev) { struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); + mv643xx_eth_shared_of_remove(); if (!IS_ERR(msp->clk)) clk_disable_unprepare(msp->clk); - return 0; } @@ -2513,6 +2655,7 @@ static struct platform_driver mv643xx_eth_shared_driver = { .driver = { .name = MV643XX_ETH_SHARED_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), }, }; @@ -2721,6 +2864,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (!IS_ERR(mp->clk)) { clk_prepare_enable(mp->clk); mp->t_clk = clk_get_rate(mp->clk); + } else if (!IS_ERR(mp->shared->clk)) { + mp->t_clk = clk_get_rate(mp->shared->clk); } set_params(mp, pd); -- cgit v1.2.3 From b431becc35a9d25c4a0220fa2c4cf9b9f1dc8bac Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 30 May 2013 00:22:43 +0000 Subject: net, jme: remove redundant D0 power state set pci_enable_device() will set device power state to D0, so it's no need to do it again in jme_init_one(). Signed-off-by: Yijing Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/jme.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 070a6f1a0577..7fbe6abf6054 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3148,7 +3148,6 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_write = jme_mdio_write; jme_clear_pm(jme); - pci_set_power_state(jme->pdev, PCI_D0); device_set_wakeup_enable(&pdev->dev, true); jme_set_phyfifo_5level(jme); -- cgit v1.2.3 From e4afed373eefb91a80e69075fbcd5438c2d36283 Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 30 May 2013 00:26:09 +0000 Subject: qlcnic: remove redundant D0 power state set pci_enable_device() will set device power state to D0, so it's no need to do it again in qlcnic_attach_func(). Signed-off-by: Yijing Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 6bb56d43614b..0ae88355ad51 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3232,7 +3232,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev) if (err) return err; - pci_set_power_state(pdev, PCI_D0); pci_set_master(pdev); pci_restore_state(pdev); -- cgit v1.2.3 From 311503bb2119788ac88c406e0893351a2bb0705e Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 30 May 2013 00:27:06 +0000 Subject: tulip: remove redundant D0 power state set pci_enable_device() will set device power state to D0, so it's no need to do it again in tulip_init_one(). Signed-off-by: Yijing Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/tulip_core.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 1e9443d9fb57..c94152f1c6be 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1410,12 +1410,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return i; } - /* The chip will fail to enter a low-power state later unless - * first explicitly commanded into D0 */ - if (pci_set_power_state(pdev, PCI_D0)) { - pr_notice("Failed to set power state to D0\n"); - } - irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ -- cgit v1.2.3 From 492205050d77bcc4f85f6dc0da6b6fdbca1d6ff7 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 30 May 2013 03:49:20 +0000 Subject: net: Add EMAC ethernet driver found on Allwinner A10 SoC's The Allwinner A10 has an ethernet controller that seem to be developped internally by them. The exact feature set of this controller is unknown, since there is no public documentation for this IP, and this driver is mostly the one published by Allwinner that has been heavily cleaned up. Signed-off-by: Stefan Roese Signed-off-by: Maxime Ripard Tested-by: Richard Genoud Signed-off-by: David S. Miller --- .../bindings/net/allwinner,sun4i-emac.txt | 22 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/allwinner/Kconfig | 36 + drivers/net/ethernet/allwinner/Makefile | 5 + drivers/net/ethernet/allwinner/sun4i-emac.c | 960 +++++++++++++++++++++ drivers/net/ethernet/allwinner/sun4i-emac.h | 108 +++ 7 files changed, 1133 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt create mode 100644 drivers/net/ethernet/allwinner/Kconfig create mode 100644 drivers/net/ethernet/allwinner/Makefile create mode 100644 drivers/net/ethernet/allwinner/sun4i-emac.c create mode 100644 drivers/net/ethernet/allwinner/sun4i-emac.h (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt new file mode 100644 index 000000000000..b90bfcd138ff --- /dev/null +++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt @@ -0,0 +1,22 @@ +* Allwinner EMAC ethernet controller + +Required properties: +- compatible: should be "allwinner,sun4i-emac". +- reg: address and length of the register set for the device. +- interrupts: interrupt for the device +- phy: A phandle to a phy node defining the PHY address (as the reg + property, a single integer). +- clocks: A phandle to the reference clock for this device + +Optional properties: +- (local-)mac-address: mac address to be used by this driver + +Example: + +emac: ethernet@01c0b000 { + compatible = "allwinner,sun4i-emac"; + reg = <0x01c0b000 0x1000>; + interrupts = <55>; + clocks = <&ahb_gates 17>; + phy = <&phy0>; +}; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ed956e08d38b..18fd6fbeb109 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -20,6 +20,7 @@ config SUNGEM_PHY source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" +source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apple/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8268d85f9448..009da27b6e26 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ obj-$(CONFIG_NET_VENDOR_8390) += 8390/ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ +obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig new file mode 100644 index 000000000000..66d35324f31e --- /dev/null +++ b/drivers/net/ethernet/allwinner/Kconfig @@ -0,0 +1,36 @@ +# +# Allwinner device configuration +# + +config NET_VENDOR_ALLWINNER + bool "Allwinner devices" + default y + depends on ARCH_SUNXI + ---help--- + If you have a network (Ethernet) card belonging to this + class, say Y and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly + affect the kernel: saying N will just cause the configurator + to skip all the questions about Allwinner cards. If you say Y, + you will be asked for your specific card in the following + questions. + +if NET_VENDOR_ALLWINNER + +config SUN4I_EMAC + tristate "Allwinner A10 EMAC support" + depends on ARCH_SUNXI + depends on OF + select CRC32 + select NET_CORE + select MII + select PHYLIB + ---help--- + Support for Allwinner A10 EMAC ethernet driver. + + To compile this driver as a module, choose M here. The module + will be called sun4i-emac. + +endif # NET_VENDOR_ALLWINNER diff --git a/drivers/net/ethernet/allwinner/Makefile b/drivers/net/ethernet/allwinner/Makefile new file mode 100644 index 000000000000..03129f796514 --- /dev/null +++ b/drivers/net/ethernet/allwinner/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Allwinner device drivers. +# + +obj-$(CONFIG_SUN4I_EMAC) += sun4i-emac.o diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c new file mode 100644 index 000000000000..b411344e719e --- /dev/null +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -0,0 +1,960 @@ +/* + * Allwinner EMAC Fast Ethernet driver for Linux. + * + * Copyright 2012-2013 Stefan Roese + * Copyright 2013 Maxime Ripard + * + * Based on the Linux driver provided by Allwinner: + * Copyright (C) 1997 Sten Wang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sun4i-emac.h" + +#define DRV_NAME "sun4i-emac" +#define DRV_VERSION "1.02" + +#define EMAC_MAX_FRAME_LEN 0x0600 + +/* Transmit timeout, default 5 seconds. */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +/* EMAC register address locking. + * + * The EMAC uses an address register to control where data written + * to the data register goes. This means that the address register + * must be preserved over interrupts or similar calls. + * + * During interrupt and other critical calls, a spinlock is used to + * protect the system, but the calls themselves save the address + * in the address register in case they are interrupting another + * access to the device. + * + * For general accesses a lock is provided so that calls which are + * allowed to sleep are serialised so that the address register does + * not need to be saved. This lock also serves to serialise access + * to the EEPROM and PHY access registers which are shared between + * these two devices. + */ + +/* The driver supports the original EMACE, and now the two newer + * devices, EMACA and EMACB. + */ + +struct emac_board_info { + struct clk *clk; + struct device *dev; + struct platform_device *pdev; + spinlock_t lock; + void __iomem *membase; + u32 msg_enable; + struct net_device *ndev; + struct sk_buff *skb_last; + u16 tx_fifo_stat; + + int emacrx_completed_flag; + + struct phy_device *phy_dev; + struct device_node *phy_node; + unsigned int link; + unsigned int speed; + unsigned int duplex; + + phy_interface_t phy_interface; +}; + +static void emac_update_speed(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned int reg_val; + + /* set EMAC SPEED, depend on PHY */ + reg_val = readl(db->membase + EMAC_MAC_SUPP_REG); + reg_val &= ~(0x1 << 8); + if (db->speed == SPEED_100) + reg_val |= 1 << 8; + writel(reg_val, db->membase + EMAC_MAC_SUPP_REG); +} + +static void emac_update_duplex(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned int reg_val; + + /* set duplex depend on phy */ + reg_val = readl(db->membase + EMAC_MAC_CTL1_REG); + reg_val &= ~EMAC_MAC_CTL1_DUPLEX_EN; + if (db->duplex) + reg_val |= EMAC_MAC_CTL1_DUPLEX_EN; + writel(reg_val, db->membase + EMAC_MAC_CTL1_REG); +} + +static void emac_handle_link_change(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + struct phy_device *phydev = db->phy_dev; + unsigned long flags; + int status_change = 0; + + if (phydev->link) { + if (db->speed != phydev->speed) { + spin_lock_irqsave(&db->lock, flags); + db->speed = phydev->speed; + emac_update_speed(dev); + spin_unlock_irqrestore(&db->lock, flags); + status_change = 1; + } + + if (db->duplex != phydev->duplex) { + spin_lock_irqsave(&db->lock, flags); + db->duplex = phydev->duplex; + emac_update_duplex(dev); + spin_unlock_irqrestore(&db->lock, flags); + status_change = 1; + } + } + + if (phydev->link != db->link) { + if (!phydev->link) { + db->speed = 0; + db->duplex = -1; + } + db->link = phydev->link; + + status_change = 1; + } + + if (status_change) + phy_print_status(phydev); +} + +static int emac_mdio_probe(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + + /* to-do: PHY interrupts are currently not supported */ + + /* attach the mac to the phy */ + db->phy_dev = of_phy_connect(db->ndev, db->phy_node, + &emac_handle_link_change, 0, + db->phy_interface); + if (!db->phy_dev) { + netdev_err(db->ndev, "could not find the PHY\n"); + return -ENODEV; + } + + /* mask with MAC supported features */ + db->phy_dev->supported &= PHY_BASIC_FEATURES; + db->phy_dev->advertising = db->phy_dev->supported; + + db->link = 0; + db->speed = 0; + db->duplex = -1; + + return 0; +} + +static void emac_mdio_remove(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + + phy_disconnect(db->phy_dev); + db->phy_dev = NULL; +} + +static void emac_reset(struct emac_board_info *db) +{ + dev_dbg(db->dev, "resetting device\n"); + + /* RESET device */ + writel(0, db->membase + EMAC_CTL_REG); + udelay(200); + writel(EMAC_CTL_RESET, db->membase + EMAC_CTL_REG); + udelay(200); +} + +static void emac_outblk_32bit(void __iomem *reg, void *data, int count) +{ + writesl(reg, data, round_up(count, 4) / 4); +} + +static void emac_inblk_32bit(void __iomem *reg, void *data, int count) +{ + readsl(reg, data, round_up(count, 4) / 4); +} + +static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +/* ethtool ops */ +static void emac_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME)); + strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); + strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); +} + +static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_drvinfo = emac_get_drvinfo, + .get_settings = emac_get_settings, + .set_settings = emac_set_settings, + .get_link = ethtool_op_get_link, +}; + +unsigned int emac_setup(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* set up TX */ + reg_val = readl(db->membase + EMAC_TX_MODE_REG); + + writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, + db->membase + EMAC_TX_MODE_REG); + + /* set up RX */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + + writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | + EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | + EMAC_RX_CTL_ACCEPT_MULTICAST_EN | + EMAC_RX_CTL_ACCEPT_BROADCAST_EN, + db->membase + EMAC_RX_CTL_REG); + + /* set MAC */ + /* set MAC CTL0 */ + reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); + writel(reg_val | EMAC_MAC_CTL0_RX_FLOW_CTL_EN | + EMAC_MAC_CTL0_TX_FLOW_CTL_EN, + db->membase + EMAC_MAC_CTL0_REG); + + /* set MAC CTL1 */ + reg_val = readl(db->membase + EMAC_MAC_CTL1_REG); + reg_val |= EMAC_MAC_CTL1_LEN_CHECK_EN; + reg_val |= EMAC_MAC_CTL1_CRC_EN; + reg_val |= EMAC_MAC_CTL1_PAD_EN; + writel(reg_val, db->membase + EMAC_MAC_CTL1_REG); + + /* set up IPGT */ + writel(EMAC_MAC_IPGT_FULL_DUPLEX, db->membase + EMAC_MAC_IPGT_REG); + + /* set up IPGR */ + writel((EMAC_MAC_IPGR_IPG1 << 8) | EMAC_MAC_IPGR_IPG2, + db->membase + EMAC_MAC_IPGR_REG); + + /* set up Collison window */ + writel((EMAC_MAC_CLRT_COLLISION_WINDOW << 8) | EMAC_MAC_CLRT_RM, + db->membase + EMAC_MAC_CLRT_REG); + + /* set up Max Frame Length */ + writel(EMAC_MAX_FRAME_LEN, + db->membase + EMAC_MAC_MAXF_REG); + + return 0; +} + +unsigned int emac_powerup(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* initial EMAC */ + /* flush RX FIFO */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val |= 0x8; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + udelay(1); + + /* initial MAC */ + /* soft reset MAC */ + reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); + reg_val &= ~EMAC_MAC_CTL0_SOFT_RESET; + writel(reg_val, db->membase + EMAC_MAC_CTL0_REG); + + /* set MII clock */ + reg_val = readl(db->membase + EMAC_MAC_MCFG_REG); + reg_val &= (~(0xf << 2)); + reg_val |= (0xD << 2); + writel(reg_val, db->membase + EMAC_MAC_MCFG_REG); + + /* clear RX counter */ + writel(0x0, db->membase + EMAC_RX_FBC_REG); + + /* disable all interrupt and clear interrupt status */ + writel(0, db->membase + EMAC_INT_CTL_REG); + reg_val = readl(db->membase + EMAC_INT_STA_REG); + writel(reg_val, db->membase + EMAC_INT_STA_REG); + + udelay(1); + + /* set up EMAC */ + emac_setup(ndev); + + /* set mac_address to chip */ + writel(ndev->dev_addr[0] << 16 | ndev->dev_addr[1] << 8 | ndev-> + dev_addr[2], db->membase + EMAC_MAC_A1_REG); + writel(ndev->dev_addr[3] << 16 | ndev->dev_addr[4] << 8 | ndev-> + dev_addr[5], db->membase + EMAC_MAC_A0_REG); + + mdelay(1); + + return 0; +} + +static int emac_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct emac_board_info *db = netdev_priv(dev); + + if (netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev-> + dev_addr[2], db->membase + EMAC_MAC_A1_REG); + writel(dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev-> + dev_addr[5], db->membase + EMAC_MAC_A0_REG); + + return 0; +} + +/* Initialize emac board */ +static void emac_init_device(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_val; + + spin_lock_irqsave(&db->lock, flags); + + emac_update_speed(dev); + emac_update_duplex(dev); + + /* enable RX/TX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val | EMAC_CTL_RESET | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + + /* enable RX/TX0/RX Hlevel interrup */ + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + spin_unlock_irqrestore(&db->lock, flags); +} + +/* Our watchdog timed out. Called by the networking layer */ +static void emac_timeout(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long flags; + + if (netif_msg_timer(db)) + dev_err(db->dev, "tx time out.\n"); + + /* Save previous register address */ + spin_lock_irqsave(&db->lock, flags); + + netif_stop_queue(dev); + emac_reset(db); + emac_init_device(dev); + /* We can accept TX packets again */ + dev->trans_start = jiffies; + netif_wake_queue(dev); + + /* Restore previous register address */ + spin_unlock_irqrestore(&db->lock, flags); +} + +/* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; + unsigned long flags; + + channel = db->tx_fifo_stat & 3; + if (channel == 3) + return 1; + + channel = (channel == 1 ? 1 : 0); + + spin_lock_irqsave(&db->lock, flags); + + writel(channel, db->membase + EMAC_TX_INS_REG); + + emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG, + skb->data, skb->len); + dev->stats.tx_bytes += skb->len; + + db->tx_fifo_stat |= 1 << channel; + /* TX control: First packet immediately send, second packet queue */ + if (channel == 0) { + /* set TX len */ + writel(skb->len, db->membase + EMAC_TX_PL0_REG); + /* start translate from fifo to phy */ + writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1, + db->membase + EMAC_TX_CTL0_REG); + + /* save the time stamp */ + dev->trans_start = jiffies; + } else if (channel == 1) { + /* set TX len */ + writel(skb->len, db->membase + EMAC_TX_PL1_REG); + /* start translate from fifo to phy */ + writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1, + db->membase + EMAC_TX_CTL1_REG); + + /* save the time stamp */ + dev->trans_start = jiffies; + } + + if ((db->tx_fifo_stat & 3) == 3) { + /* Second packet */ + netif_stop_queue(dev); + } + + spin_unlock_irqrestore(&db->lock, flags); + + /* free this SKB */ + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* EMAC interrupt handler + * receive the packet to upper layer, free the transmitted packet + */ +static void emac_tx_done(struct net_device *dev, struct emac_board_info *db, + unsigned int tx_status) +{ + /* One packet sent complete */ + db->tx_fifo_stat &= ~(tx_status & 3); + if (3 == (tx_status & 3)) + dev->stats.tx_packets += 2; + else + dev->stats.tx_packets++; + + if (netif_msg_tx_done(db)) + dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); + + netif_wake_queue(dev); +} + +/* Received a packet and pass to upper layer + */ +static void emac_rx(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + struct sk_buff *skb; + u8 *rdptr; + bool good_packet; + static int rxlen_last; + unsigned int reg_val; + u32 rxhdr, rxstatus, rxcount, rxlen; + + /* Check packet ready or not */ + while (1) { + /* race warning: the first packet might arrive with + * the interrupts disabled, but the second will fix + * it + */ + rxcount = readl(db->membase + EMAC_RX_FBC_REG); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RXCount: %x\n", rxcount); + + if ((db->skb_last != NULL) && (rxlen_last > 0)) { + dev->stats.rx_bytes += rxlen_last; + + /* Pass to upper layer */ + db->skb_last->protocol = eth_type_trans(db->skb_last, + dev); + netif_rx(db->skb_last); + dev->stats.rx_packets++; + db->skb_last = NULL; + rxlen_last = 0; + + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val &= ~EMAC_RX_CTL_DMA_EN; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + } + + if (!rxcount) { + db->emacrx_completed_flag = 1; + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + /* had one stuck? */ + rxcount = readl(db->membase + EMAC_RX_FBC_REG); + if (!rxcount) + return; + } + + reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG); + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "receive header: %x\n", reg_val); + if (reg_val != EMAC_UNDOCUMENTED_MAGIC) { + /* disable RX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val & ~EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + + /* Flush RX FIFO */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + writel(reg_val | (1 << 3), + db->membase + EMAC_RX_CTL_REG); + + do { + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + } while (reg_val & (1 << 3)); + + /* enable RX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val | EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + db->emacrx_completed_flag = 1; + + return; + } + + /* A packet ready now & Get status/length */ + good_packet = true; + + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, + &rxhdr, sizeof(rxhdr)); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr))); + + rxlen = EMAC_RX_IO_DATA_LEN(rxhdr); + rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RX: status %02x, length %04x\n", + rxstatus, rxlen); + + /* Packet Status check */ + if (rxlen < 0x40) { + good_packet = false; + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); + } + + if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) { + good_packet = false; + + if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "crc error\n"); + dev->stats.rx_crc_errors++; + } + + if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "length error\n"); + dev->stats.rx_length_errors++; + } + } + + /* Move data from EMAC */ + skb = dev_alloc_skb(rxlen + 4); + if (good_packet && skb) { + skb_reserve(skb, 2); + rdptr = (u8 *) skb_put(skb, rxlen - 4); + + /* Read received packet from RX SRAM */ + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RxLen %x\n", rxlen); + + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, + rdptr, rxlen); + dev->stats.rx_bytes += rxlen; + + /* Pass to upper layer */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + } +} + +static irqreturn_t emac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct emac_board_info *db = netdev_priv(dev); + int int_status; + unsigned long flags; + unsigned int reg_val; + + /* A real interrupt coming */ + + /* holders of db->lock must always block IRQs */ + spin_lock_irqsave(&db->lock, flags); + + /* Disable all interrupts */ + writel(0, db->membase + EMAC_INT_CTL_REG); + + /* Got EMAC interrupt status */ + /* Got ISR */ + int_status = readl(db->membase + EMAC_INT_STA_REG); + /* Clear ISR status */ + writel(int_status, db->membase + EMAC_INT_STA_REG); + + if (netif_msg_intr(db)) + dev_dbg(db->dev, "emac interrupt %02x\n", int_status); + + /* Received the coming packet */ + if ((int_status & 0x100) && (db->emacrx_completed_flag == 1)) { + /* carrier lost */ + db->emacrx_completed_flag = 0; + emac_rx(dev); + } + + /* Transmit Interrupt check */ + if (int_status & (0x01 | 0x02)) + emac_tx_done(dev, db, int_status); + + if (int_status & (0x04 | 0x08)) + netdev_info(dev, " ab : %x\n", int_status); + + /* Re-enable interrupt mask */ + if (db->emacrx_completed_flag == 1) { + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + } + spin_unlock_irqrestore(&db->lock, flags); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Used by netconsole + */ +static void emac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + emac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Open the interface. + * The interface is opened whenever "ifconfig" actives it. + */ +static int emac_open(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + int ret; + + if (netif_msg_ifup(db)) + dev_dbg(db->dev, "enabling %s\n", dev->name); + + if (devm_request_irq(db->dev, dev->irq, &emac_interrupt, + 0, dev->name, dev)) + return -EAGAIN; + + /* Initialize EMAC board */ + emac_reset(db); + emac_init_device(dev); + + ret = emac_mdio_probe(dev); + if (ret < 0) { + netdev_err(dev, "cannot probe MDIO bus\n"); + return ret; + } + + phy_start(db->phy_dev); + netif_start_queue(dev); + + return 0; +} + +static void emac_shutdown(struct net_device *dev) +{ + unsigned int reg_val; + struct emac_board_info *db = netdev_priv(dev); + + /* Disable all interrupt */ + writel(0, db->membase + EMAC_INT_CTL_REG); + + /* clear interupt status */ + reg_val = readl(db->membase + EMAC_INT_STA_REG); + writel(reg_val, db->membase + EMAC_INT_STA_REG); + + /* Disable RX/TX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + reg_val &= ~(EMAC_CTL_TX_EN | EMAC_CTL_RX_EN | EMAC_CTL_RESET); + writel(reg_val, db->membase + EMAC_CTL_REG); +} + +/* Stop the interface. + * The interface is stopped when it is brought. + */ +static int emac_stop(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + + if (netif_msg_ifdown(db)) + dev_dbg(db->dev, "shutting down %s\n", ndev->name); + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + phy_stop(db->phy_dev); + + emac_mdio_remove(ndev); + + emac_shutdown(ndev); + + return 0; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_stop, + .ndo_start_xmit = emac_start_xmit, + .ndo_tx_timeout = emac_timeout, + .ndo_do_ioctl = emac_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = emac_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = emac_poll_controller, +#endif +}; + +/* Search EMAC board, allocate space and register it + */ +static int emac_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct emac_board_info *db; + struct net_device *ndev; + int ret = 0; + const char *mac_addr; + + ndev = alloc_etherdev(sizeof(struct emac_board_info)); + if (!ndev) { + dev_err(&pdev->dev, "could not allocate device.\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + db = netdev_priv(ndev); + memset(db, 0, sizeof(*db)); + + db->dev = &pdev->dev; + db->ndev = ndev; + db->pdev = pdev; + + spin_lock_init(&db->lock); + + db->membase = of_iomap(np, 0); + if (!db->membase) { + dev_err(&pdev->dev, "failed to remap registers\n"); + return -ENOMEM; + goto out; + } + + /* fill in parameters for net-dev structure */ + ndev->base_addr = (unsigned long)db->membase; + ndev->irq = irq_of_parse_and_map(np, 0); + if (ndev->irq == -ENXIO) { + netdev_err(ndev, "No irq resource\n"); + ret = ndev->irq; + goto out; + } + + db->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(db->clk)) + goto out; + + clk_prepare_enable(db->clk); + + db->phy_node = of_parse_phandle(np, "phy", 0); + if (!db->phy_node) { + dev_err(&pdev->dev, "no associated PHY\n"); + ret = -ENODEV; + goto out; + } + + /* Read MAC-address from DT */ + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(&pdev->dev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + db->emacrx_completed_flag = 1; + emac_powerup(ndev); + emac_reset(db); + + ether_setup(ndev); + + ndev->netdev_ops = &emac_netdev_ops; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); + ndev->ethtool_ops = &emac_ethtool_ops; + +#ifdef CONFIG_NET_POLL_CONTROLLER + ndev->poll_controller = &emac_poll_controller; +#endif + + platform_set_drvdata(pdev, ndev); + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Registering netdev failed!\n"); + ret = -ENODEV; + goto out; + } + + dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n", + ndev->name, db->membase, ndev->irq, ndev->dev_addr); + + return 0; + +out: + dev_err(db->dev, "not found (%d).\n", ret); + + free_netdev(ndev); + + return ret; +} + +static int emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + unregister_netdev(ndev); + free_netdev(ndev); + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +} + +static int emac_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + + netif_carrier_off(ndev); + netif_device_detach(ndev); + emac_shutdown(ndev); + + return 0; +} + +static int emac_resume(struct platform_device *dev) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct emac_board_info *db = netdev_priv(ndev); + + emac_reset(db); + emac_init_device(ndev); + netif_device_attach(ndev); + + return 0; +} + +static const struct of_device_id emac_of_match[] = { + {.compatible = "allwinner,sun4i-emac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, emac_of_match); + +static struct platform_driver emac_driver = { + .driver = { + .name = "sun4i-emac", + .of_match_table = emac_of_match, + }, + .probe = emac_probe, + .remove = emac_remove, + .suspend = emac_suspend, + .resume = emac_resume, +}; + +module_platform_driver(emac_driver); + +MODULE_AUTHOR("Stefan Roese "); +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Allwinner A10 emac network driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h new file mode 100644 index 000000000000..38c72d9ec600 --- /dev/null +++ b/drivers/net/ethernet/allwinner/sun4i-emac.h @@ -0,0 +1,108 @@ +/* + * Allwinner EMAC Fast Ethernet driver for Linux. + * + * Copyright 2012 Stefan Roese + * Copyright 2013 Maxime Ripard + * + * Based on the Linux driver provided by Allwinner: + * Copyright (C) 1997 Sten Wang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _SUN4I_EMAC_H_ +#define _SUN4I_EMAC_H_ + +#define EMAC_CTL_REG (0x00) +#define EMAC_CTL_RESET (1 << 0) +#define EMAC_CTL_TX_EN (1 << 1) +#define EMAC_CTL_RX_EN (1 << 2) +#define EMAC_TX_MODE_REG (0x04) +#define EMAC_TX_MODE_ABORTED_FRAME_EN (1 << 0) +#define EMAC_TX_MODE_DMA_EN (1 << 1) +#define EMAC_TX_FLOW_REG (0x08) +#define EMAC_TX_CTL0_REG (0x0c) +#define EMAC_TX_CTL1_REG (0x10) +#define EMAC_TX_INS_REG (0x14) +#define EMAC_TX_PL0_REG (0x18) +#define EMAC_TX_PL1_REG (0x1c) +#define EMAC_TX_STA_REG (0x20) +#define EMAC_TX_IO_DATA_REG (0x24) +#define EMAC_TX_IO_DATA1_REG (0x28) +#define EMAC_TX_TSVL0_REG (0x2c) +#define EMAC_TX_TSVH0_REG (0x30) +#define EMAC_TX_TSVL1_REG (0x34) +#define EMAC_TX_TSVH1_REG (0x38) +#define EMAC_RX_CTL_REG (0x3c) +#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1) +#define EMAC_RX_CTL_DMA_EN (1 << 2) +#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4) +#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5) +#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6) +#define EMAC_RX_CTL_PASS_LEN_ERR_EN (1 << 7) +#define EMAC_RX_CTL_PASS_LEN_OOR_EN (1 << 8) +#define EMAC_RX_CTL_ACCEPT_UNICAST_EN (1 << 16) +#define EMAC_RX_CTL_DA_FILTER_EN (1 << 17) +#define EMAC_RX_CTL_ACCEPT_MULTICAST_EN (1 << 20) +#define EMAC_RX_CTL_HASH_FILTER_EN (1 << 21) +#define EMAC_RX_CTL_ACCEPT_BROADCAST_EN (1 << 22) +#define EMAC_RX_CTL_SA_FILTER_EN (1 << 24) +#define EMAC_RX_CTL_SA_FILTER_INVERT_EN (1 << 25) +#define EMAC_RX_HASH0_REG (0x40) +#define EMAC_RX_HASH1_REG (0x44) +#define EMAC_RX_STA_REG (0x48) +#define EMAC_RX_IO_DATA_REG (0x4c) +#define EMAC_RX_IO_DATA_LEN(x) (x & 0xffff) +#define EMAC_RX_IO_DATA_STATUS(x) ((x >> 16) & 0xffff) +#define EMAC_RX_IO_DATA_STATUS_CRC_ERR (1 << 4) +#define EMAC_RX_IO_DATA_STATUS_LEN_ERR (3 << 5) +#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7) +#define EMAC_RX_FBC_REG (0x50) +#define EMAC_INT_CTL_REG (0x54) +#define EMAC_INT_STA_REG (0x58) +#define EMAC_MAC_CTL0_REG (0x5c) +#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2) +#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3) +#define EMAC_MAC_CTL0_SOFT_RESET (1 << 15) +#define EMAC_MAC_CTL1_REG (0x60) +#define EMAC_MAC_CTL1_DUPLEX_EN (1 << 0) +#define EMAC_MAC_CTL1_LEN_CHECK_EN (1 << 1) +#define EMAC_MAC_CTL1_HUGE_FRAME_EN (1 << 2) +#define EMAC_MAC_CTL1_DELAYED_CRC_EN (1 << 3) +#define EMAC_MAC_CTL1_CRC_EN (1 << 4) +#define EMAC_MAC_CTL1_PAD_EN (1 << 5) +#define EMAC_MAC_CTL1_PAD_CRC_EN (1 << 6) +#define EMAC_MAC_CTL1_AD_SHORT_FRAME_EN (1 << 7) +#define EMAC_MAC_CTL1_BACKOFF_DIS (1 << 12) +#define EMAC_MAC_IPGT_REG (0x64) +#define EMAC_MAC_IPGT_HALF_DUPLEX (0x12) +#define EMAC_MAC_IPGT_FULL_DUPLEX (0x15) +#define EMAC_MAC_IPGR_REG (0x68) +#define EMAC_MAC_IPGR_IPG1 (0x0c) +#define EMAC_MAC_IPGR_IPG2 (0x12) +#define EMAC_MAC_CLRT_REG (0x6c) +#define EMAC_MAC_CLRT_COLLISION_WINDOW (0x37) +#define EMAC_MAC_CLRT_RM (0x0f) +#define EMAC_MAC_MAXF_REG (0x70) +#define EMAC_MAC_SUPP_REG (0x74) +#define EMAC_MAC_TEST_REG (0x78) +#define EMAC_MAC_MCFG_REG (0x7c) +#define EMAC_MAC_A0_REG (0x98) +#define EMAC_MAC_A1_REG (0x9c) +#define EMAC_MAC_A2_REG (0xa0) +#define EMAC_SAFX_L_REG0 (0xa4) +#define EMAC_SAFX_H_REG0 (0xa8) +#define EMAC_SAFX_L_REG1 (0xac) +#define EMAC_SAFX_H_REG1 (0xb0) +#define EMAC_SAFX_L_REG2 (0xb4) +#define EMAC_SAFX_H_REG2 (0xb8) +#define EMAC_SAFX_L_REG3 (0xbc) +#define EMAC_SAFX_H_REG3 (0xc0) + +#define EMAC_PHY_DUPLEX (1 << 8) + +#define EMAC_EEPROM_MAGIC (0x444d394b) +#define EMAC_UNDOCUMENTED_MAGIC (0x0143414d) +#endif /* _SUN4I_EMAC_H_ */ -- cgit v1.2.3 From 28a19fe60f4f31cbd349f8473f0483aa7500acf5 Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Wed, 29 May 2013 20:22:01 +0000 Subject: drivers: net: davinci_cpdma: remove CRC bytes from skb added by CPDMA Additional 4 bytes found in the skb is the CRC calculated by the CPDMA hardware, check the CRC bit in CPDMA status field of Descriptor and remove the CRC length from the skb. This extra 4 byte can be seen when capturing packets using tcpdump. This has been tested in TI816x platform. Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 49dfd592ac1e..a377bc727740 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -64,6 +64,7 @@ #define CPDMA_DESC_TO_PORT_EN BIT(20) #define CPDMA_TO_PORT_SHIFT 16 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) +#define CPDMA_DESC_CRC_LEN 4 #define CPDMA_TEARDOWN_VALUE 0xfffffffc @@ -798,6 +799,10 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) status = -EBUSY; goto unlock_ret; } + + if (status & CPDMA_DESC_PASS_CRC) + outlen -= CPDMA_DESC_CRC_LEN; + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | CPDMA_DESC_PORT_MASK); -- cgit v1.2.3 From 5c510811547f88522b00623417b97d9fba85a06b Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Thu, 30 May 2013 02:52:23 +0000 Subject: be2net: Implement initiate FW dump feature for Lancer Added code to initiate FW dump via ethtool. Driver checks if the previous dump has been cleared before initiating the dump. It doesn't initiate the dump if it is not cleared. Signed-off-by: Kalesh AP Signed-off-by: Somnath Kotur Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 3 ++ drivers/net/ethernet/emulex/benet/be_cmds.c | 66 ++++++++++++++++++++++++++ drivers/net/ethernet/emulex/benet/be_cmds.h | 3 ++ drivers/net/ethernet/emulex/benet/be_ethtool.c | 29 +++++++++++ drivers/net/ethernet/emulex/benet/be_hw.h | 2 + drivers/net/ethernet/emulex/benet/be_main.c | 37 +-------------- 6 files changed, 105 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index edce38bf83c9..fcc30a2d831b 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -332,6 +332,9 @@ enum vf_state { #define BE_VF_UC_PMAC_COUNT 2 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) +/* Ethtool set_dump flags */ +#define LANCER_INITIATE_FW_DUMP 0x1 + struct phy_info { u8 transceiver; u8 autoneg; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index a236ecd27cf3..4d0ec3649a63 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -3255,6 +3255,72 @@ err: return status; } +static int lancer_wait_idle(struct be_adapter *adapter) +{ +#define SLIPORT_IDLE_TIMEOUT 30 + u32 reg_val; + int status = 0, i; + + for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { + reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); + if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) + break; + + ssleep(1); + } + + if (i == SLIPORT_IDLE_TIMEOUT) + status = -1; + + return status; +} + +int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) +{ + int status = 0; + + status = lancer_wait_idle(adapter); + if (status) + return status; + + iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); + + return status; +} + +/* Routine to check whether dump image is present or not */ +bool dump_present(struct be_adapter *adapter) +{ + u32 sliport_status = 0; + + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); +} + +int lancer_initiate_dump(struct be_adapter *adapter) +{ + int status; + + /* give firmware reset and diagnostic dump */ + status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | + PHYSDEV_CONTROL_DD_MASK); + if (status < 0) { + dev_err(&adapter->pdev->dev, "Firmware reset failed\n"); + return status; + } + + status = lancer_wait_idle(adapter); + if (status) + return status; + + if (!dump_present(adapter)) { + dev_err(&adapter->pdev->dev, "Dump image not present\n"); + return -1; + } + + return 0; +} + /* Uses sync mcc */ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 025bdb0d1764..5228d88c5a02 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1937,6 +1937,9 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd, struct be_fat_conf_params *cfgs); extern int lancer_wait_ready(struct be_adapter *adapter); +extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); +extern int lancer_initiate_dump(struct be_adapter *adapter); +extern bool dump_present(struct be_adapter *adapter); extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); extern int be_cmd_get_func_config(struct be_adapter *adapter); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index f3ee07758198..4f8c941217cc 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -669,6 +669,34 @@ be_set_phys_id(struct net_device *netdev, return 0; } +static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + int status; + + if (!lancer_chip(adapter)) { + dev_err(dev, "FW dump not supported\n"); + return -EOPNOTSUPP; + } + + if (dump_present(adapter)) { + dev_err(dev, "Previous dump not cleared, not forcing dump\n"); + return 0; + } + + switch (dump->flag) { + case LANCER_INITIATE_FW_DUMP: + status = lancer_initiate_dump(adapter); + if (!status) + dev_info(dev, "F/w dump initiated successfully\n"); + break; + default: + dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag); + return -EINVAL; + } + return status; +} static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -1106,6 +1134,7 @@ const struct ethtool_ops be_ethtool_ops = { .set_pauseparam = be_set_pauseparam, .get_strings = be_get_stat_strings, .set_phys_id = be_set_phys_id, + .set_dump = be_set_dump, .get_msglevel = be_get_msg_level, .set_msglevel = be_set_msg_level, .get_sset_count = be_get_sset_count, diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3c1099b47f2a..0311d9bfe858 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -53,10 +53,12 @@ #define PHYSDEV_CONTROL_OFFSET 0x414 #define SLIPORT_STATUS_ERR_MASK 0x80000000 +#define SLIPORT_STATUS_DIP_MASK 0x02000000 #define SLIPORT_STATUS_RN_MASK 0x01000000 #define SLIPORT_STATUS_RDY_MASK 0x00800000 #define SLI_PORT_CONTROL_IP_MASK 0x08000000 #define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 +#define PHYSDEV_CONTROL_DD_MASK 0x00000004 #define PHYSDEV_CONTROL_INP_MASK 0x40000000 #define SLIPORT_ERROR_NO_RESOURCE1 0x2 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 1140a86ff5ea..5d9044e4afe2 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3551,40 +3551,6 @@ static int be_flash_skyhawk(struct be_adapter *adapter, return 0; } -static int lancer_wait_idle(struct be_adapter *adapter) -{ -#define SLIPORT_IDLE_TIMEOUT 30 - u32 reg_val; - int status = 0, i; - - for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { - reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); - if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) - break; - - ssleep(1); - } - - if (i == SLIPORT_IDLE_TIMEOUT) - status = -1; - - return status; -} - -static int lancer_fw_reset(struct be_adapter *adapter) -{ - int status = 0; - - status = lancer_wait_idle(adapter); - if (status) - return status; - - iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db + - PHYSDEV_CONTROL_OFFSET); - - return status; -} - static int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw) { @@ -3662,7 +3628,8 @@ static int lancer_fw_download(struct be_adapter *adapter, } if (change_status == LANCER_FW_RESET_NEEDED) { - status = lancer_fw_reset(adapter); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { dev_err(&adapter->pdev->dev, "Adapter busy for FW reset.\n" -- cgit v1.2.3 From e69972f52bdc4bc384a21a6561d5673b4a94e989 Mon Sep 17 00:00:00 2001 From: Jay Hernandez Date: Thu, 30 May 2013 03:24:14 +0000 Subject: cxgb4: Force uninitialized state if FW_ON_ADAPTER is < FW_VERSION and we're the MASTER_PF Forcing uninitialized state allows us to upgrade and reinitialize the adapter. FW_VERSION_T4 = 1.4.0.0 FW_VERSION_T5 = 0.0.0.0 At this point driver supports above and greater than above version of firmware. If it doesn't find the required firmware version than it forces the adapter to be reinitialized as shown below. 1) If FW_ON_ADAPTER < FW_VERSION and we're the MASTER_PF force uninitialized state and a FW upgrade if available. - If FW_ON_ADAPTER < /lib/firmware/cxgb4/t*fw.bin we will update the adapters FW. - If FW_ON_ADAPTER >= /lib/firmware/cxgb4/t*fw.bin don't upgrade FW. - If upgrade_fw() fails force reinitialization of the adapter anyways, it might still work. Either way forcing the uninitialized state allows cxgb4 reinitialize FW. 2) If FW_ON_ADAPTER >= FW_VERSION driver follows normal path. Signed-off-by: Jay Hernandez Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 27 +++++++++++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 9 +++++++++ 3 files changed, 35 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 681804b30a3f..2aafb809e067 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -51,7 +51,7 @@ #include "t4_hw.h" #define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 1 +#define FW_VERSION_MINOR 4 #define FW_VERSION_MICRO 0 #define FW_VERSION_MAJOR_T5 0 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3cd397d60434..5a3256b083f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4842,8 +4842,17 @@ static int adap_init0(struct adapter *adap) * is excessively mismatched relative to the driver.) */ ret = t4_check_fw_version(adap); + + /* The error code -EFAULT is returned by t4_check_fw_version() if + * firmware on adapter < supported firmware. If firmware on adapter + * is too old (not supported by driver) and we're the MASTER_PF set + * adapter state to DEV_STATE_UNINIT to force firmware upgrade + * and reinitialization. + */ + if ((adap->flags & MASTER_PF) && ret == -EFAULT) + state = DEV_STATE_UNINIT; if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { - if (ret == -EINVAL || ret > 0) { + if (ret == -EINVAL || ret == -EFAULT || ret > 0) { if (upgrade_fw(adap) >= 0) { /* * Note that the chip was reset as part of the @@ -4852,7 +4861,21 @@ static int adap_init0(struct adapter *adap) */ reset = 0; ret = t4_check_fw_version(adap); - } + } else + if (ret == -EFAULT) { + /* + * Firmware is old but still might + * work if we force reinitialization + * of the adapter. Ignoring FW upgrade + * failure. + */ + dev_warn(adap->pdev_dev, + "Ignoring firmware upgrade " + "failure, and forcing driver " + "to reinitialize the " + "adapter.\n"); + ret = 0; + } } if (ret < 0) return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index d02d4e8c4417..bff89a437b76 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -938,6 +938,15 @@ int t4_check_fw_version(struct adapter *adapter) memcpy(adapter->params.api_vers, api_vers, sizeof(adapter->params.api_vers)); + if (major < exp_major || (major == exp_major && minor < exp_minor) || + (major == exp_major && minor == exp_minor && micro < exp_micro)) { + dev_err(adapter->pdev_dev, + "Card has firmware version %u.%u.%u, minimum " + "supported firmware is %u.%u.%u.\n", major, minor, + micro, exp_major, exp_minor, exp_micro); + return -EFAULT; + } + if (major != exp_major) { /* major mismatch - fail */ dev_err(adapter->pdev_dev, "card FW has major version %u, driver wants %u\n", -- cgit v1.2.3 From ada7c19e6d27bf4bb6a242960ac6e76f9f21cdd7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 May 2013 14:32:55 +0000 Subject: bnx2x: use XPS if possible for bnx2x_select_queue instead of pure hash The bnx2x_select_queue() was using __skb_tx_hash() to select the transmit queue, totally ignoring XPS settings, while XPS can help performance quite significantly, so change the bnx2x_select_queue() to use __dev_pick_tx() instead which will use XPS if configured. Based on patches from Ying Cai and Havard Skinnemoen Reported-by: govindarajulu.v Signed-off-by: Eric Dumazet Cc: Havard Skinnemoen Cc: Ying Cai Cc: Eilon Greenstein Acked-by: Dmitry Kravkov Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c80f1d26f40d..3651f6d449eb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1829,7 +1829,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) } /* select a non-FCoE queue */ - return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); + return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); } void bnx2x_set_num_queues(struct bnx2x *bp) -- cgit v1.2.3 From d76a611187c4840a4a45fb3f493f9b63c19df4ca Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 2 Jun 2013 00:06:17 +0000 Subject: bnx2x: Semantic change of empty lines This patch removes unnecessary blank lines and adds a few where such are needed (between variable declarations and code) Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 23 -------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 12 +--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 11 ---- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | 3 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 1 - .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 4 -- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 68 +--------------------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 30 ---------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | 4 -- 14 files changed, 3 insertions(+), 160 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 47b06fc22b97..0c1104f30de6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -34,12 +34,10 @@ #define BCM_DCBNL #endif - #include "bnx2x_hsi.h" #include "../cnic_if.h" - #define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) #include @@ -114,7 +112,6 @@ do { \ #define BNX2X_ERROR(fmt, ...) \ pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) - /* before we have a dev->name use dev_info() */ #define BNX2X_DEV_INFO(fmt, ...) \ do { \ @@ -147,7 +144,6 @@ do { \ #define U64_HI(x) ((u32)(((u64)(x)) >> 32)) #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) - #define REG_ADDR(bp, offset) ((bp->regview) + (offset)) #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) @@ -387,7 +383,6 @@ union db_prod { #define BIT_VEC64_ELEM_SHIFT 6 #define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) - #define __BIT_VEC64_SET_BIT(el, bit) \ do { \ el = ((el) | ((u64)0x1 << (bit))); \ @@ -398,7 +393,6 @@ union db_prod { el = ((el) & (~((u64)0x1 << (bit)))); \ } while (0) - #define BIT_VEC64_SET_BIT(vec64, idx) \ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ (idx) & BIT_VEC64_ELEM_MASK) @@ -419,8 +413,6 @@ union db_prod { /*******************************************************/ - - /* Number of u64 elements in SGE mask array */ #define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) @@ -580,12 +572,10 @@ struct bnx2x_fastpath { txdata_ptr[FIRST_TX_COS_INDEX] \ ->var) - #define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) #define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) - /* MC hsi */ #define MAX_FETCH_BD 13 /* HW max BDs per packet */ #define RX_COPY_THRESH 92 @@ -693,12 +683,10 @@ struct bnx2x_fastpath { FW_DROP_LEVEL(bp)) #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) - /* This is needed for determining of last_max */ #define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) #define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) - #define BNX2X_SWCID_SHIFT 17 #define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) @@ -723,7 +711,6 @@ struct bnx2x_fastpath { DPM_TRIGER_TYPE); \ } while (0) - /* TX CSUM helpers */ #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ skb->csum_offset) @@ -766,7 +753,6 @@ struct bnx2x_fastpath { #define BNX2X_RX_SUM_FIX(cqe) \ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) - #define FP_USB_FUNC_OFF \ offsetof(struct cstorm_status_block_u, func) #define FP_CSB_FUNC_OFF \ @@ -1068,7 +1054,6 @@ struct bnx2x_slowpath { struct eth_classify_rules_ramrod_data e2; } mac_rdata; - union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; @@ -1119,7 +1104,6 @@ struct bnx2x_slowpath { #define bnx2x_sp_mapping(bp, var) \ (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) - /* attn group wiring */ #define MAX_DYNAMIC_ATTN_GRPS 8 @@ -1225,7 +1209,6 @@ enum { BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; - struct bnx2x_prev_path_list { struct list_head list; u8 bus; @@ -1755,7 +1738,6 @@ extern int num_queues; #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ @@ -1853,9 +1835,6 @@ struct bnx2x_func_init_params { #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) - - - /** * bnx2x_set_mac_one - configure a single MAC address * @@ -2000,7 +1979,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 - /* DMAE command defines */ #define DMAE_TIMEOUT -1 #define DMAE_PCI_ERROR -2 /* E2 and onward */ @@ -2102,7 +2080,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) - #define BNX2X_BTR 4 #define MAX_SPQ_PENDING 8 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3651f6d449eb..b28aaf170755 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -165,7 +165,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); - nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { @@ -733,7 +732,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, dev_kfree_skb_any(skb); } - /* put new data in bin */ rx_buf->data = new_data; @@ -899,7 +897,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) cqe_fp); goto next_rx; - } queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->tpa_info[queue]; @@ -1004,7 +1001,6 @@ reuse_rx: le16_to_cpu(cqe_fp->vlan_tag)); napi_gro_receive(&fp->napi, skb); - next_rx: rx_buf->data = NULL; @@ -2322,10 +2318,10 @@ static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) static void bnx2x_bz_fp(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; - int cos; struct napi_struct orig_napi = fp->napi; struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; + /* bzero bnx2x_fastpath contents */ if (fp->tpa_info) memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * @@ -2435,7 +2431,6 @@ int bnx2x_load_cnic(struct bnx2x *bp) if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); - DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); return 0; @@ -2961,7 +2956,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_set_reset_global(bp); } - /* The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ @@ -4480,7 +4474,6 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp) alloc_err: bnx2x_free_mem_bp(bp); return -ENOMEM; - } int bnx2x_reload_if_running(struct net_device *dev) @@ -4522,7 +4515,6 @@ int bnx2x_get_cur_phy_idx(struct bnx2x *bp) } return sel_phy_idx; - } int bnx2x_get_link_cfg_idx(struct bnx2x *bp) { @@ -4739,7 +4731,6 @@ int bnx2x_resume(struct pci_dev *pdev) return rc; } - void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { @@ -4757,7 +4748,6 @@ static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, u8 fw_sb_id, u8 sb_index, u8 ticks) { - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); REG_WR8(bp, addr, ticks); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 151675d66b0d..f62ffc11ac54 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -22,7 +22,6 @@ #include #include - #include "bnx2x.h" #include "bnx2x_sriov.h" @@ -1171,7 +1170,6 @@ static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) { - /* the 'first' id is allocated for the cnic */ return bp->base_fw_ndsb; } @@ -1181,7 +1179,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) return bp->igu_base_sb; } - static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) { struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 4b077a7f16af..2689f28010fd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -253,7 +253,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, memset(&pg_help_data, 0, sizeof(struct pg_help_data)); - if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); @@ -298,7 +297,6 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, struct dcbx_pfc_feature *pfc, u32 error) { - if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); @@ -367,7 +365,6 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, struct lldp_remote_mib *remote_mib ; struct lldp_local_mib *local_mib; - switch (read_mib_type) { case DCBX_READ_LOCAL_MIB: mib_size = sizeof(struct lldp_local_mib); @@ -629,7 +626,6 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) return 0; } - #ifdef BCM_DCBNL static inline u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) @@ -896,13 +892,11 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, } af->app.default_pri = (u8)dp->admin_default_priority; - } /* Write the data. */ bnx2x_write_data(bp, (u32 *)&admin_mib, offset, sizeof(struct lldp_admin_mib)); - } void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) @@ -1181,7 +1175,6 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, BNX2X_ERR("dcbx error: Both groups must have priorities\n"); } - #ifndef POWER_OF_2 #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) #endif @@ -1524,7 +1517,6 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( } } - static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, @@ -1533,7 +1525,6 @@ static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, u32 pri_join_mask, u8 num_of_dif_pri) { - /* default E2 settings */ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; @@ -1629,7 +1620,6 @@ static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, u8 num_spread_of_entries, u8 strict_app_pris) { - if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, num_spread_of_entries, strict_app_pris)) { @@ -1876,7 +1866,6 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp) * dcbx negotiation. */ bnx2x_dcbx_update_tc_mapping(bp); - } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index d153f44cf8f9..125bd1b6586f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -134,8 +134,6 @@ enum { #define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 #define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 - - struct cos_entry_help_data { u32 pri_join_mask; u32 cos_bw; @@ -170,7 +168,6 @@ struct cos_help_data { (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) - struct pg_entry_help_data { u8 num_of_dif_pri; u8 pg; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index bff5e33eaa14..219dd84d4bf5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -28,7 +28,6 @@ #define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 #define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 - /* Possible Chips */ #define DUMP_CHIP_E1 1 #define DUMP_CHIP_E1H 2 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 263950cddfe2..91978eb166ea 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -733,7 +733,6 @@ static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, return false; } - static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, const struct wreg_addr *wreg_info) { @@ -3071,7 +3070,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) continue; @@ -3194,7 +3192,6 @@ static int bnx2x_set_phys_id(struct net_device *dev, static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) { - switch (info->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: @@ -3430,7 +3427,6 @@ static int bnx2x_set_channels(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); - DP(BNX2X_MSG_ETHTOOL, "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f6d0e209c44e..5081990daea0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -93,7 +93,6 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); - int num_queues; module_param(num_queues, int, 0); MODULE_PARM_DESC(num_queues, @@ -122,8 +121,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); - - struct workqueue_struct *bnx2x_wq; struct bnx2x_mac_vals { @@ -917,7 +914,6 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) sp_sb_data.p_func.vf_valid, sp_sb_data.state); - for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int loop; @@ -1290,7 +1286,6 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); - /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); @@ -1305,11 +1300,9 @@ void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) - int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) { u32 op_gen_command = 0; - u32 comp_addr = BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); int ret = 0; @@ -1352,7 +1345,6 @@ u8 bnx2x_is_pcie_pending(struct pci_dev *dev) */ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) { - /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, CFC_REG_NUM_LCIDS_INSIDE_PF, @@ -1360,7 +1352,6 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) poll_cnt)) return 1; - /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_PF_USAGE_CNT, @@ -1991,7 +1982,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) return 0; } - int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) { /* The GPIO should be swapped if swap register is set and active */ @@ -2347,7 +2337,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) return rc; } - /* Calculates the sum of vn_min_rates. It's needed for further normalizing of the min_rates. Returns: @@ -2419,7 +2408,6 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, input->vnic_max_rate[vn] = vn_max_rate; } - static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) { if (CHIP_REV_IS_SLOW(bp)) @@ -2901,7 +2889,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) return rc; } - static void storm_memset_func_cfg(struct bnx2x *bp, struct tstorm_eth_function_common_config *tcfg, u16 abs_fid) @@ -3006,7 +2993,6 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, if (IS_MF_AFEX(bp)) __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); - return flags | bnx2x_get_common_flags(bp, fp, true); } @@ -3196,7 +3182,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); } - static void bnx2x_e1h_disable(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -3540,10 +3525,8 @@ static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) return true; else return false; - } - /** * bnx2x_sp_post - place a single command on an SP ring * @@ -3615,7 +3598,6 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, else atomic_dec(&bp->cq_spq_left); - DP(BNX2X_MSG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), @@ -3836,7 +3818,6 @@ static void bnx2x_fan_failure(struct bnx2x *bp) set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); smp_mb__after_clear_bit(); schedule_delayed_work(&bp->sp_rtnl_task, 0); - } static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -4591,7 +4572,6 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) return bnx2x_parity_attn(bp, global, print, attn.sig); } - static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) { u32 val; @@ -4643,7 +4623,6 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); } - } static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) @@ -4878,7 +4857,6 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, BNX2X_ERR("Failed to schedule new commands: %d\n", rc); else if (rc > 0) DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); - } static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); @@ -5075,8 +5053,6 @@ static void bnx2x_eq_int(struct bnx2x *bp) if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) break; - - goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: @@ -5265,7 +5241,6 @@ static void bnx2x_sp_task(struct work_struct *work) /* ack status block only if something was actually handled */ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); - } /* must be called after the EQ processing (since eq leads to sriov @@ -5316,7 +5291,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) /* end of slow path */ - void bnx2x_drv_pulse(struct bnx2x *bp) { SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, @@ -5382,7 +5356,6 @@ static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) else for (i = 0; i < len; i++) REG_WR8(bp, addr + i, fill); - } /* helper: writes FP SP data to FW - data_size in dwords */ @@ -5461,10 +5434,8 @@ static void bnx2x_zero_sp_sb(struct bnx2x *bp) bnx2x_fill(bp, BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, CSTORM_SP_SYNC_BLOCK_SIZE); - } - static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { @@ -5474,7 +5445,6 @@ static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, hc_sm->time_to_expire = 0xFFFFFFFF; } - /* allocates state machine ids. */ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) { @@ -6001,6 +5971,7 @@ static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) for_each_tx_queue_cnic(bp, i) bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); } + static void bnx2x_init_tx_rings(struct bnx2x *bp) { int i; @@ -6772,7 +6743,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); - /* QM queues pointers table */ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); @@ -7013,7 +6983,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) u32 low, high; u32 val; - DP(NETIF_MSG_HW, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); @@ -7078,7 +7047,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0), 40); - bnx2x_init_block(bp, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(bp)) { if (IS_MF_AFEX(bp)) { @@ -7275,7 +7243,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) msleep(20); - if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", @@ -7295,7 +7262,6 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) bnx2x_ilt_wr(bp, i, 0); } - static void bnx2x_init_searcher(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -7331,7 +7297,6 @@ static int bnx2x_reset_nic_mode(struct bnx2x *bp) int rc, i, port = BP_PORT(bp); int vlan_en = 0, mac_en[NUM_MACS]; - /* Close input from network */ if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_set_rx_filter(&bp->link_params, 0); @@ -7480,7 +7445,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) /* Set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); DP(NETIF_MSG_IFUP, "NIC MODE configrued\n"); - } if (!CHIP_IS_E1x(bp)) { @@ -7734,7 +7698,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) return 0; } - void bnx2x_free_mem_cnic(struct bnx2x *bp) { bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); @@ -7779,7 +7742,6 @@ void bnx2x_free_mem(struct bnx2x *bp) bnx2x_iov_free_mem(bp); } - int bnx2x_alloc_mem_cnic(struct bnx2x *bp) { if (!CHIP_IS_E1x(bp)) @@ -8068,7 +8030,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); - } if (CNIC_SUPPORT(bp)) { @@ -8124,7 +8085,6 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) static void bnx2x_pf_q_prep_init(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) { - u8 cos; int cxt_index, cxt_offset; @@ -8205,7 +8165,6 @@ static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, return bnx2x_queue_state_change(bp, q_params); } - /** * bnx2x_setup_queue - setup queue * @@ -8254,7 +8213,6 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, DP(NETIF_MSG_IFUP, "init complete\n"); - /* Now move the Queue to the SETUP state... */ memset(setup_params, 0, sizeof(*setup_params)); @@ -8315,7 +8273,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - /* close tx-only connections */ for (tx_index = FIRST_TX_ONLY_COS_INDEX; tx_index < fp->max_cos; @@ -8369,7 +8326,6 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) return bnx2x_queue_state_change(bp, &q_params); } - static void bnx2x_reset_func(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -8740,7 +8696,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_iov_chip_cleanup(bp); - /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNC, PORT or COMMON HW @@ -8813,7 +8768,6 @@ unload_error: if (rc) BNX2X_ERR("HW_RESET failed\n"); - /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, keep_link); } @@ -9179,7 +9133,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global) if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) return -EAGAIN; - /* TBD: Indicate that "process kill" is in progress to MCP */ /* Clear "unprepared" bit */ @@ -9647,7 +9600,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; REG_WR(bp, vals->bmac_addr, wb_data[0]); REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); - } BNX2X_DEV_INFO("Disable emac Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; @@ -9681,7 +9633,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, if (mac_stopped) msleep(20); - } #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) @@ -9854,7 +9805,6 @@ static int bnx2x_do_flr(struct bnx2x *bp) u16 status; struct pci_dev *dev = bp->pdev; - if (CHIP_IS_E1x(bp)) { BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); return -EINVAL; @@ -10001,7 +9951,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) if (!timer_count) BNX2X_ERR("Failed to empty BRB, hope for the best\n"); - } /* No packets are in the pipeline, path is ready for reset */ @@ -10205,8 +10154,6 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bnx2x_init_shmem(bp); - - bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); @@ -10467,7 +10414,6 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; - } BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], @@ -10778,7 +10724,6 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp) */ if (!bp->cnic_eth_dev.max_iscsi_conn) bp->flags |= no_flags; - } static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) @@ -11442,7 +11387,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); - INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); @@ -11475,7 +11419,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_prev_unload(bp); } - if (CHIP_REV_IS_FPGA(bp)) dev_err(&bp->pdev->dev, "FPGA detected\n"); @@ -11558,7 +11501,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) return rc; } - /**************************************************************************** * General service functions ****************************************************************************/ @@ -12397,7 +12339,6 @@ static void bnx2x_release_firmware(struct bnx2x *bp) bp->firmware = NULL; } - static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { .init_hw_cmn_chip = bnx2x_init_hw_common_chip, .init_hw_cmn = bnx2x_init_hw_common, @@ -12676,7 +12617,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, } BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); - if (!NO_FCOE(bp)) { /* Add storage MAC address */ rtnl_lock(); @@ -13048,6 +12988,7 @@ static int __init bnx2x_init(void) static void __exit bnx2x_cleanup(void) { struct list_head *pos, *q; + pci_unregister_driver(&bnx2x_pci_driver); destroy_workqueue(bnx2x_wq); @@ -13103,7 +13044,6 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) BUG_ON(bp->cnic_spq_pending < count); bp->cnic_spq_pending -= count; - for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) & SPE_HDR_CONN_TYPE) >> @@ -13276,7 +13216,6 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) bnx2x_cnic_sp_post(bp, 0); } - /* Called with netif_addr_lock_bh() taken. * Sets an rx_mode config for an iSCSI ETH client. * Doesn't block. @@ -13317,7 +13256,6 @@ static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) } } - static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) { struct bnx2x *bp = netdev_priv(dev); @@ -13505,7 +13443,6 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp) { struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_cid_ilt_lines(bp); cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; @@ -13541,7 +13478,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, BNX2X_ERR("CNIC-related load failed\n"); return rc; } - } bp->cnic_enabled = true; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 32a9609cc98b..1f9dbb2f1756 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -142,7 +142,6 @@ free_and_exit: spin_unlock_bh(&o->lock); return rc; - } static inline void __bnx2x_exe_queue_reset_pending( @@ -163,13 +162,11 @@ static inline void __bnx2x_exe_queue_reset_pending( static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o) { - spin_lock_bh(&o->lock); __bnx2x_exe_queue_reset_pending(bp, o); spin_unlock_bh(&o->lock); - } /** @@ -308,7 +305,6 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state, /* can take a while if any port is running */ int cnt = 5000; - if (CHIP_REV_IS_EMUL(bp)) cnt *= 20; @@ -456,7 +452,6 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", counter, next); next += stride + size; - } } return counter * ETH_ALEN; @@ -518,7 +513,6 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, return 0; } - /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * bnx2x_check_mac_del(struct bnx2x *bp, @@ -609,7 +603,6 @@ static bool bnx2x_check_move_always_err( return false; } - static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) { struct bnx2x_raw_obj *raw = &o->raw; @@ -626,7 +619,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) return rx_tx_flag; } - void bnx2x_set_mac_in_nig(struct bnx2x *bp, bool add, unsigned char *dev_addr, int index) { @@ -707,7 +699,6 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, hdr->rule_cnt = (u8)rule_cnt; } - /* hw_config() callbacks */ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, @@ -960,7 +951,6 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - /* Reset the ramrod data buffer for the first rule */ if (rule_idx == 0) memset(data, 0, sizeof(*data)); @@ -1818,8 +1808,6 @@ int bnx2x_config_vlan_mac( return rc; } - - /** * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec * @@ -1934,7 +1922,6 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, state, pstate, type); } - void bnx2x_init_mac_obj(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, u8 cl_id, u32 cid, u8 func_id, void *rdata, @@ -2092,7 +2079,6 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, bnx2x_execute_vlan_mac, bnx2x_exeq_get_vlan_mac); } - } /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ @@ -2245,7 +2231,6 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, } cmd->state = cpu_to_le16(state); - } static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, @@ -2286,7 +2271,6 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, false); } - /* * If FCoE Queue configuration has been requested configure the Rx and * internal switching modes for this queue in separate rules. @@ -2909,7 +2893,6 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* Increase the total number of MACs pending to be configured */ @@ -3223,7 +3206,6 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; - } /* We want to ensure that commands are executed one by one for 57710. @@ -3342,7 +3324,6 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( return -1; } - static inline int bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { @@ -3352,7 +3333,6 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1( union bnx2x_mcast_config_data cfg_data = {NULL}; int cnt = 0; - /* If nothing to be done - return */ if (list_empty(&o->pending_cmds_head)) return 0; @@ -3550,7 +3530,6 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, /* Ramrod completion is pending */ return 1; } - } static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) @@ -3848,7 +3827,6 @@ static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, return true; } - static bool bnx2x_credit_pool_get_entry( struct bnx2x_credit_pool_obj *o, int *offset) @@ -4018,7 +3996,6 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, /* this should never happen! Block MAC operations. */ bnx2x_init_credit_pool(p, 0, 0); } - } } @@ -4215,7 +4192,6 @@ int bnx2x_config_rss(struct bnx2x *bp, return rc; } - void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, u8 cl_id, u32 cid, u8 func_id, u8 engine_id, @@ -4288,7 +4264,6 @@ int bnx2x_queue_state_change(struct bnx2x *bp, return !!test_bit(pending_bit, pending); } - static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, struct bnx2x_queue_state_params *params) { @@ -4403,7 +4378,6 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, gen_data->mtu = cpu_to_le16(params->mtu); gen_data->func_id = o->func_id; - gen_data->cos = params->cos; gen_data->traffic_type = @@ -4530,7 +4504,6 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, cpu_to_le16(params->silent_removal_value); rx_data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); - } /* initialize the general, tx and rx parts of a queue object */ @@ -4706,7 +4679,6 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, ¶ms->params.tx_only; u8 cid_index = tx_only_params->cid_index; - if (cid_index >= o->max_cos) { BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", o->cl_id, cid_index); @@ -4816,7 +4788,6 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, return -EINVAL; } - /* Clear the ramrod data */ memset(rdata, 0, sizeof(*rdata)); @@ -5636,7 +5607,6 @@ static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, drv->reset_hw_cmn(bp); } - static inline int bnx2x_func_hw_reset(struct bnx2x *bp, struct bnx2x_func_state_params *params) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 43c00bc84a08..c4fffe0faba0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1377,7 +1377,6 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, struct bnx2x_credit_pool_obj *p, u8 func_id, u8 func_num); - /****************** RSS CONFIGURATION ****************/ void bnx2x_init_rss_config_obj(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2ce7c7471367..ea492c7a96a1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3024,7 +3024,6 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) { - struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d67ddc554c0f..039792299c3b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -722,7 +722,6 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, struct pf_vf_bulletin_content *bulletin); int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); - enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); /* VF side vfpf channel functions */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 2ca3d94fcec2..98366abd02bd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1002,7 +1002,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; - UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index d117f472816c..853824d258e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -40,7 +40,6 @@ struct nig_stats { u32 egress_mac_pkt1_hi; }; - enum bnx2x_stats_event { STATS_EVENT_PMF = 0, STATS_EVENT_LINK_UP, @@ -208,7 +207,6 @@ struct bnx2x_eth_stats { u32 eee_tx_lpi; }; - struct bnx2x_eth_q_stats { u32 total_unicast_bytes_received_hi; u32 total_unicast_bytes_received_lo; @@ -331,7 +329,6 @@ struct bnx2x_fw_port_stats_old { u32 mac_discard; }; - /**************************************************************************** * Macros ****************************************************************************/ @@ -536,7 +533,6 @@ struct bnx2x_fw_port_stats_old { SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ } while (0) - /* forward */ struct bnx2x; -- cgit v1.2.3 From 16a5fd9265e757121bb5f1b9ad757836f370e76c Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 2 Jun 2013 00:06:18 +0000 Subject: bnx2x: Revise comments and alignment This patch correct various typos, fix comments conventions and adds/removes a few comments. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 25 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 48 ++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 37 +-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 6 - .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 10 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 81 ++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 254 +++++++++------------ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 54 ++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 29 +-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 8 +- 12 files changed, 251 insertions(+), 307 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 0c1104f30de6..8f551cf7bb30 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -362,7 +362,7 @@ union db_prod { /* * Number of required SGEs is the sum of two: * 1. Number of possible opened aggregations (next packet for - * these aggregations will probably consume SGE immidiatelly) + * these aggregations will probably consume SGE immediately) * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only * after placement on BD for new TPA aggregation) * @@ -486,10 +486,10 @@ struct bnx2x_fastpath { struct napi_struct napi; union host_hc_status_block status_blk; - /* chip independed shortcuts into sb structure */ + /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; __le16 *sb_running_index; - /* chip independed shortcut into rx_prods_offset memory */ + /* chip independent shortcut into rx_prods_offset memory */ u32 ustorm_rx_prods_offset; u32 rx_buf_size; @@ -603,7 +603,7 @@ struct bnx2x_fastpath { * START_BD(splitted) - includes unpaged data segment for GSO * PARSING_BD - for TSO and CSUM data * PARSING_BD2 - for encapsulation data - * Frag BDs - decribes pages for frags + * Frag BDs - describes pages for frags */ #define BDS_PER_TX_PKT 4 #define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) @@ -886,14 +886,14 @@ struct bnx2x_common { #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ (CHIP_REV(bp) == CHIP_REV_Ax)) /* This define is used in two main places: - * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher + * 1. In the early stages of nic_load, to know if to configure Parser / Searcher * to nic-only mode or to offload mode. Offload mode is configured if either the * chip is E1x (where MIC_MODE register is not applicable), or if cnic already * registered for this port (which means that the user wants storage services). * 2. During cnic-related load, to know if offload mode is already configured in - * the HW or needs to be configrued. + * the HW or needs to be configured. * Since the transition from nic-mode to offload-mode in HW causes traffic - * coruption, nic-mode is configured only in ports on which storage services + * corruption, nic-mode is configured only in ports on which storage services * where never requested. */ #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) @@ -994,14 +994,14 @@ extern struct workqueue_struct *bnx2x_wq; * If the maximum number of FP-SB available is X then: * a. If CNIC is supported it consumes 1 FP-SB thus the max number of * regular L2 queues is Y=X-1 - * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) * c. If the FCoE L2 queue is supported the actual number of L2 queues * is Y+1 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for * slow-path interrupts) or Y+2 if CNIC is supported (one additional * FP interrupt context for the CNIC). * e. The number of HW context (CID count) is always X or X+1 if FCoE - * L2 queue is supported. the cid for the FCoE L2 queue is always X. + * L2 queue is supported. The cid for the FCoE L2 queue is always X. */ /* fast-path interrupt contexts E1x */ @@ -1568,7 +1568,7 @@ struct bnx2x { struct mutex cnic_mutex; struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; - /* Start index of the "special" (CNIC related) L2 cleints */ + /* Start index of the "special" (CNIC related) L2 clients */ u8 cnic_base_cl_id; int dmae_ready; @@ -1682,7 +1682,7 @@ struct bnx2x { /* operation indication for the sp_rtnl task */ unsigned long sp_rtnl_state; - /* DCBX Negotation results */ + /* DCBX Negotiation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; @@ -2042,7 +2042,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) #define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit - indicates eror */ + * indicates error + */ #define MAX_DMAE_C_PER_PORT 8 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b28aaf170755..ef7fed52891a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -124,7 +124,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); /* Queue pointer cannot be re-set on an fp-basis, as moving pointer - * backward along the array could cause memory to be overriden + * backward along the array could cause memory to be overridden */ for (cos = 1; cos < bp->max_cos; cos++) { for (i = 0; i < old_eth_num - delta; i++) { @@ -258,7 +258,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) smp_mb(); if (unlikely(netif_tx_queue_stopped(txq))) { - /* Taking tx_lock() is needed to prevent reenabling the queue + /* Taking tx_lock() is needed to prevent re-enabling the queue * while it's empty. This could have happen if rx_action() gets * suspended in bnx2x_tx_int() after the condition before * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): @@ -571,7 +571,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return err; } - /* Unmap the page as we r going to pass it to the stack */ + /* Unmap the page as we're going to pass it to the stack */ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(&old_rx_pg, mapping), SGE_PAGES, DMA_FROM_DEVICE); @@ -1114,7 +1114,7 @@ static void bnx2x_fill_report_data(struct bnx2x *bp, memset(data, 0, sizeof(*data)); - /* Fill the report data: efective line speed */ + /* Fill the report data: effective line speed */ data->line_speed = line_speed; /* Link is down */ @@ -1157,7 +1157,7 @@ void bnx2x_link_report(struct bnx2x *bp) * * @bp: driver handle * - * None atomic inmlementation. + * None atomic implementation. * Should be called under the phy_lock. */ void __bnx2x_link_report(struct bnx2x *bp) @@ -1300,7 +1300,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); if (!fp->disable_tpa) { - /* Fill the per-aggregtion pool */ + /* Fill the per-aggregation pool */ for (i = 0; i < MAX_AGG_QS(bp); i++) { struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; @@ -1858,7 +1858,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) * * If the actual number of Tx queues (for each CoS) is less than 16 then there * will be the holes at the end of each group of 16 ETh L2 indices (0..15, - * 16..31,...) with indicies that are not coupled with any real Tx queue. + * 16..31,...) with indices that are not coupled with any real Tx queue. * * The proper configuration of skb->queue_mapping is handled by * bnx2x_select_queue() and __skb_tx_hash(). @@ -1920,7 +1920,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVREHEAD + mtu + BNX2X_FW_RX_ALIGN_END; - /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ + /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; else @@ -1933,7 +1933,7 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) int i; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); - /* Prepare the initial contents fo the indirection table if RSS is + /* Prepare the initial contents for the indirection table if RSS is * enabled */ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) @@ -2011,7 +2011,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) /* * Cleans the object that have internal lists without sending - * ramrods. Should be run when interrutps are disabled. + * ramrods. Should be run when interrupts are disabled. */ void bnx2x_squeeze_objects(struct bnx2x *bp) { @@ -2347,8 +2347,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * BNX2X_NUM_ETH_QUEUES(bp) + index]; - /* - * set the tpa flag for each queue. The tpa flag determines the queue + /* set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation */ fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || @@ -2473,6 +2472,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + /* zero the structure w/o any lock, before SP handler is initialized */ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, &bp->last_reported_link.link_report_flags); @@ -2537,8 +2537,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } /* configure multi cos mappings in kernel. - * this configuration may be overriden by a multi class queue discipline - * or by a dcbx negotiation result. + * this configuration may be overridden by a multi class queue + * discipline or by a dcbx negotiation result. */ bnx2x_setup_tc(bp->dev, bp->max_cos); @@ -2697,7 +2697,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start the Tx */ switch (load_mode) { case LOAD_NORMAL: - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); break; @@ -2842,7 +2842,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) } /* Nothing to do during unload if previous bnx2x_nic_load() - * have not completed succesfully - all resourses are released. + * have not completed successfully - all resources are released. * * we can get here only after unsuccessful ndo_* callback, during which * dev->IFF_UP flag is still on. @@ -2891,10 +2891,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); - /* - * Prevent transactions to host from the functions on the + /* Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global - * attention once gloabl blocks are reset and gates are opened + * attention once global blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ @@ -2915,7 +2914,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) } /* - * At this stage no more interrupts will arrive so we may safly clean + * At this stage no more interrupts will arrive so we may safely clean * the queueable objects here in case they failed to get cleaned so far. */ if (IS_PF(bp)) @@ -3587,7 +3586,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ - /* enable this debug print to view the tranmission details + /* enable this debug print to view the transmission details DP(NETIF_MSG_TX_QUEUED, "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", txdata->cid, fp_index, txdata_index, txdata, fp); */ @@ -3970,7 +3969,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - /* no traffic classes requested. aborting */ + /* no traffic classes requested. Aborting */ if (!num_tc) { netdev_reset_tc(dev); return 0; @@ -3997,8 +3996,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) prio, bp->prio_to_cos[prio]); } - - /* Use this configuration to diffrentiate tc0 from other COSes + /* Use this configuration to differentiate tc0 from other COSes This can be used for ets or pfc, and save the effort of setting up a multio class queue disc or negotiating DCBX with a switch netdev_set_prio_tc_map(dev, 0, 0); @@ -4629,7 +4627,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) changes = flags ^ bp->flags; - /* if GRO is changed while LRO is enabled, dont force a reload */ + /* if GRO is changed while LRO is enabled, don't force a reload */ if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) changes &= ~GRO_ENABLE_FLAG; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index f62ffc11ac54..4a643d7b9b22 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1331,8 +1331,8 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); /* - * 1. number of frags should not grow above MAX_SKB_FRAGS - * 2. frag must fit the page + * 1. Number of frags should not grow above MAX_SKB_FRAGS + * 2. Frag must fit the page */ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 2689f28010fd..210614e37b9b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -687,7 +687,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) } /* setup tc must be called under rtnl lock, but we can't take it here - * as we are handling an attetntion on a work queue which must be + * as we are handling an attention on a work queue which must be * flushed at some rtnl-locked contexts (e.g. if down) */ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) @@ -707,7 +707,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) */ bnx2x_dcbnl_update_applist(bp, true); - /* Read rmeote mib if dcbx is in the FW */ + /* Read remote mib if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_remote_mib(bp)) return; #endif @@ -738,7 +738,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_update_tc_mapping(bp); /* - * allow other funtions to update their netdevices + * allow other functions to update their netdevices * accordingly */ if (IS_MF(bp)) @@ -860,7 +860,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); } - /*For IEEE admin_recommendation_bw_precentage + /*For IEEE admin_recommendation_bw_percentage *For IEEE admin_recommendation_ets_pg */ af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { @@ -1070,7 +1070,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, bool pg_found = false; u32 i, traf_type, add_traf_type, add_pg; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - struct pg_entry_help_data *data = help_data->data; /*shotcut*/ + struct pg_entry_help_data *data = help_data->data; /*shortcut*/ /* Set to invalid */ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) @@ -1166,7 +1166,8 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); else /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[entry].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1277,7 +1278,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, } else { /* If there are only pauseable priorities or * only non-pauseable,* the lower priorities go - * to the first queue and the higherpriorities go + * to the first queue and the higher priorities go * to the second queue. */ cos_data->data[0].pausable = @@ -1477,7 +1478,7 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( * queue and one priority goes to the second queue. * * We will join this two cases: - * if one is BW limited it will go to the secoend queue + * if one is BW limited it will go to the second queue * otherwise the last priority will get it */ @@ -1497,7 +1498,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( false == b_found_strict) /* last entry will be handled separately * If no priority is strict than last - * enty goes to last queue.*/ + * entry goes to last queue. + */ entry = 1; cos_data->data[entry].pri_join_mask |= pri_tested; @@ -1509,7 +1511,8 @@ static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( b_found_strict = true; cos_data->data[1].pri_join_mask |= pri_tested; /* If we join a group and one is strict - * than the bw rulls */ + * than the bw rules + */ cos_data->data[1].strict = BNX2X_DCBX_STRICT_COS_HIGHEST; } @@ -1838,7 +1841,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, void bnx2x_dcbx_pmf_update(struct bnx2x *bp) { - /* if we need to syncronize DCBX result from prev PMF + /* if we need to synchronize DCBX result from prev PMF * read it from shmem and update bp and netdev accordingly */ if (SHMEM2_HAS(bp, drv_flags) && @@ -1932,14 +1935,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, return; /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ @@ -1984,14 +1987,14 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, DP(BNX2X_MSG_DCB, "prio = %d\n", prio); /** - * bw_pct ingnored - band-width percentage devision between user + * bw_pct ignored - band-width percentage devision between user * priorities within the same group is not * standard and hence not supported * - * prio_type igonred - priority levels within the same group are not + * prio_type ignored - priority levels within the same group are not * standard and hence are not supported. According * to the standard pgid 15 is dedicated to strict - * prioirty traffic (on the port level). + * priority traffic (on the port level). * * up_map ignored */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 219dd84d4bf5..12eb4baee9f6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -13,12 +13,6 @@ * consent. */ - -/* This struct holds a signature to ensure the dump returned from the driver - * match the meta data file inserted to grc_dump.tcl - * The signature is time stamp, diag version and grc_dump version - */ - #ifndef BNX2X_DUMP_H #define BNX2X_DUMP_H diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 91978eb166ea..72b1b59294b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -320,7 +320,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) speed = ethtool_cmd_speed(cmd); - /* If recieved a request for an unknown duplex, assume full*/ + /* If received a request for an unknown duplex, assume full*/ if (cmd->duplex == DUPLEX_UNKNOWN) cmd->duplex = DUPLEX_FULL; @@ -849,7 +849,7 @@ static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { - /* Read "paged" registes */ + /* Read "paged" registers */ bnx2x_read_pages_regs(bp, p, preset); } @@ -1154,8 +1154,8 @@ static int bnx2x_get_eeprom_len(struct net_device *dev) return bp->common.flash_size; } -/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had - * we done things the other way around, if two pfs from the same port would +/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, + * had we done things the other way around, if two pfs from the same port would * attempt to access nvram at the same time, we could run into a scenario such * as: * pf A takes the port lock. @@ -2070,7 +2070,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) EEE_MODE_OVERRIDE_NVRAM | EEE_MODE_OUTPUT_TIME; - /* Restart link to propogate changes */ + /* Restart link to propagate changes */ if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_force_link_reset(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5081990daea0..82f3696437c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -808,8 +808,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) u32 val = REG_RD(bp, addr); /* in E1 we must use only PCI configuration space to disable - * MSI/MSIX capablility - * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + * MSI/MSIX capability + * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block */ if (CHIP_IS_E1(bp)) { /* Since IGU_PF_CONF_MSI_MSIX_EN still always on @@ -1012,7 +1012,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) hc_sm_p[j].timer_value); } - /* Indecies data */ + /* Indices data */ for (j = 0; j < loop; j++) { pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, hc_index_p[j].flags, @@ -1107,7 +1107,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW * initialization. */ -#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ +#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ #define FLR_WAIT_INTERVAL 50 /* usec */ #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ @@ -1327,7 +1327,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) bnx2x_panic(); return 1; } - /* Zero completion for nxt FLR */ + /* Zero completion for next FLR */ REG_WR(bp, comp_addr, 0); return ret; @@ -2343,7 +2343,7 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) sum of vn_min_rates. or 0 - if all the min_rates are 0. - In the later case fainess algorithm should be deactivated. + In the later case fairness algorithm should be deactivated. If not all min_rates are zero then those that are zeroes will be set to 1. */ static void bnx2x_calc_vn_min(struct bnx2x *bp, @@ -2423,7 +2423,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp) int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); if (BP_NOMCP(bp)) - return; /* what should be the default bvalue in this case */ + return; /* what should be the default value in this case */ /* For 2 port configuration the absolute function number formula * is: @@ -2922,7 +2922,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) } /** - * bnx2x_get_tx_only_flags - Return common flags + * bnx2x_get_common_flags - Return common flags * * @bp device handle * @fp queue handle @@ -3110,7 +3110,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, txq_init->fw_sb_id = fp->fw_sb_id; /* - * set the tss leading client id for TX classfication == + * set the tss leading client id for TX classification == * leading RSS client id */ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); @@ -3197,7 +3197,7 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - /* Tx queue should be only reenabled */ + /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); /* @@ -3591,7 +3591,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, /* * It's ok if the actual decrement is issued towards the memory * somewhere between the spin_lock and spin_unlock. Thus no - * more explict memory barrier is needed. + * more explicit memory barrier is needed. */ if (common) atomic_dec(&bp->eq_spq_left); @@ -3660,7 +3660,7 @@ static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) rc |= BNX2X_DEF_SB_IDX; } - /* Do not reorder: indecies reading should complete before handling */ + /* Do not reorder: indices reading should complete before handling */ barrier(); return rc; } @@ -3809,8 +3809,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp) netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" "Please contact OEM Support for assistance\n"); - /* - * Schedule device reset (unload) + /* Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ @@ -4987,7 +4986,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) hw_cons = le16_to_cpu(*bp->eq_cons_sb); /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. - * when we get the the next-page we nned to adjust so the loop + * when we get the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ @@ -5194,7 +5193,7 @@ static void bnx2x_sp_task(struct work_struct *work) DP(BNX2X_MSG_SP, "sp task invoked\n"); - /* make sure the atomic interupt_occurred has been written */ + /* make sure the atomic interrupt_occurred has been written */ smp_rmb(); if (atomic_read(&bp->interrupt_occurred)) { @@ -5670,7 +5669,7 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) bp->eq_cons = 0; bp->eq_prod = NUM_EQ_DESC; bp->eq_cons_sb = BNX2X_EQ_INDEX; - /* we want a warning message before it gets rought... */ + /* we want a warning message before it gets wrought... */ atomic_set(&bp->eq_spq_left, min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } @@ -5754,7 +5753,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, break; case BNX2X_RX_MODE_PROMISC: - /* According to deffinition of SI mode, iface in promisc mode + /* According to definition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ @@ -5897,7 +5896,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init shortcut */ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); - /* Setup SB indicies */ + /* Setup SB indices */ fp->rx_cons_sb = BNX2X_RX_SB_INDEX; /* Configure Queue State object */ @@ -6652,7 +6651,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry - * to the last enrty in the ILT. + * to the last entry in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. @@ -7118,8 +7117,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: - * - SF mode: bits 3-7 are masked. only bits 0-2 are in use - * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF + * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use + * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(bp) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ @@ -7371,7 +7370,7 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) bnx2x_ilt_init_op_cnic(bp, INITOP_SET); if (CONFIGURE_NIC_MODE(bp)) { - /* Configrue searcher as part of function hw init */ + /* Configure searcher as part of function hw init */ bnx2x_init_searcher(bp); /* Reset NIC mode */ @@ -7641,7 +7640,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); - /* !!! these should become driver const once + /* !!! These should become driver const once rf-tool supports split-68 const */ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); @@ -7755,7 +7754,7 @@ int bnx2x_alloc_mem_cnic(struct bnx2x *bp) host_hc_status_block_e1x)); if (CONFIGURE_NIC_MODE(bp) && !bp->t2) - /* allocate searcher T2 table, as it wan't allocated before */ + /* allocate searcher T2 table, as it wasn't allocated before */ BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); /* write address to which L5 should insert its values */ @@ -8093,7 +8092,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); - /* If HC is supporterd, enable host coalescing in the transition + /* If HC is supported, enable host coalescing in the transition * to INIT state. */ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); @@ -8579,14 +8578,14 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB - * 2. Sync SP queue - this guarantes us that attention handling started - * 3. Wait, that TXdisable/enable transaction completes + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes * - * 1+2 guranty that if DCBx attention was scheduled it already changed - * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy - * received complettion for the transaction the state is TX_STOPPED. + * 1+2 guarantee that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ @@ -8705,7 +8704,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) /* * (assumption: No Attention from MCP at this stage) - * PMF probably in the middle of TXdisable/enable transaction + * PMF probably in the middle of TX disable/enable transaction */ rc = bnx2x_func_wait_started(bp); if (rc) { @@ -9320,7 +9319,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise - * the the gates will remain closed for that + * the gates will remain closed for that * engine. */ if (load_status || @@ -10056,7 +10055,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) break; } - /* non-common reply from MCP night require looping */ + /* non-common reply from MCP might require looping */ rc = bnx2x_prev_unload_uncommon(bp); if (rc != BNX2X_PREV_WAIT_NEEDED) break; @@ -11034,7 +11033,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else { bp->common.int_block = INT_BLOCK_IGU; - /* do not allow device reset during IGU info preocessing */ + /* do not allow device reset during IGU info processing */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); @@ -11113,7 +11112,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) E1H_FUNC_MAX * sizeof(struct drv_func_mb); /* * get mf configuration: - * 1. existence of MF configuration + * 1. Existence of MF configuration * 2. MAC address must be legal (check only upper bytes) * for Switch-Independent mode; * OVLAN must be legal for Switch-Dependent mode @@ -11490,7 +11489,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) /* We need at least one default status block for slow-path events, * second status block for the L2 queue, and a third status block for - * CNIC if supproted. + * CNIC if supported. */ if (CNIC_SUPPORT(bp)) bp->min_msix_vec_cnt = 3; @@ -12874,7 +12873,7 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) bnx2x_prev_unload(bp); - /* We should have resetted the engine, so It's fair to + /* We should have reseted the engine, so It's fair to * assume the FW will no longer write to the bnx2x driver. */ bnx2x_squeeze_objects(bp); @@ -12993,7 +12992,7 @@ static void __exit bnx2x_cleanup(void) destroy_workqueue(bnx2x_wq); - /* Free globablly allocated resources */ + /* Free globally allocated resources */ list_for_each_safe(pos, q, &bnx2x_prev_list) { struct bnx2x_prev_path_list *tmp = list_entry(pos, struct bnx2x_prev_path_list, list); @@ -13016,7 +13015,7 @@ module_exit(bnx2x_cleanup); * @bp: driver handle * @set: set or clear the CAM entry * - * This function will wait until the ramdord completion returns. + * This function will wait until the ramrod completion returns. * Return 0 if success, -ENODEV if ramrod doesn't return. */ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 1f9dbb2f1756..8f03c984550f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -35,9 +35,9 @@ /** * bnx2x_exe_queue_init - init the Exe Queue object * - * @o: poiter to the object + * @o: pointer to the object * @exe_len: length - * @owner: poiter to the owner + * @owner: pointer to the owner * @validate: validate function pointer * @optimize: optimize function pointer * @exec: execute function pointer @@ -176,7 +176,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, * @o: queue * @ramrod_flags: flags * - * (Atomicy is ensured using the exe_queue->lock). + * (Atomicity is ensured using the exe_queue->lock). */ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, struct bnx2x_exe_queue_obj *o, @@ -189,8 +189,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, spin_lock_bh(&o->lock); - /* - * Next step should not be performed until the current is finished, + /* Next step should not be performed until the current is finished, * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to * properly clear object internals without sending any command to the FW * which also implies there won't be any completion to clear the @@ -206,8 +205,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, } } - /* - * Run through the pending commands list and create a next + /* Run through the pending commands list and create a next * execution chunk. */ while (!list_empty(&o->exe_queue)) { @@ -217,8 +215,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, if (cur_len + elem->cmd_len <= o->exe_chunk_len) { cur_len += elem->cmd_len; - /* - * Prevent from both lists being empty when moving an + /* Prevent from both lists being empty when moving an * element. This will allow the call of * bnx2x_exe_queue_empty() without locking. */ @@ -238,14 +235,12 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); if (rc < 0) - /* - * In case of an error return the commands back to the queue - * and reset the pending_comp. + /* In case of an error return the commands back to the queue + * and reset the pending_comp. */ list_splice_init(&o->pending_comp, &o->exe_queue); else if (!rc) - /* - * If zero is returned, means there are no outstanding pending + /* If zero is returned, means there are no outstanding pending * completions and we may dismiss the pending list. */ __bnx2x_exe_queue_reset_pending(bp, o); @@ -685,7 +680,7 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, * * @cid: connection id * @type: BNX2X_FILTER_XXX_PENDING - * @hdr: poiter to header to setup + * @hdr: pointer to header to setup * @rule_cnt: * * currently we always configure one rule and echo field to contain a CID and an @@ -714,8 +709,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; - /* - * Set LLH CAM entry: currently only iSCSI and ETH macs are + /* Set LLH CAM entry: currently only iSCSI and ETH macs are * relevant. In addition, current implementation is tuned for a * single ETH MAC. * @@ -870,8 +864,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -959,7 +952,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, &rule_entry->pair.header); - /* Set VLAN and MAC themselvs */ + /* Set VLAN and MAC themselves */ rule_entry->pair.vlan = cpu_to_le16(vlan); bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, &rule_entry->pair.mac_mid, @@ -1011,8 +1004,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, struct bnx2x_raw_obj *raw = &o->raw; struct mac_configuration_cmd *config = (struct mac_configuration_cmd *)(raw->rdata); - /* - * 57710 and 57711 do not support MOVE command, + /* 57710 and 57711 do not support MOVE command, * so it's either ADD or DEL */ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? @@ -1036,7 +1028,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * * @bp: device handle * @p: command parameters - * @ppos: pointer to the cooky + * @ppos: pointer to the cookie * * reconfigure next MAC/VLAN/VLAN-MAC element from the * previously configured elements list. @@ -1044,7 +1036,7 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken * into an account * - * pointer to the cooky - that should be given back in the next call to make + * pointer to the cookie - that should be given back in the next call to make * function handle the next element. If *ppos is set to NULL it will restart the * iterator. If returned *ppos == NULL this means that the last element has been * handled. @@ -1092,8 +1084,7 @@ static int bnx2x_vlan_mac_restore(struct bnx2x *bp, return bnx2x_config_vlan_mac(bp, p); } -/* - * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a +/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a * pointer to an element with a specific criteria and NULL if such an element * hasn't been found. */ @@ -1177,8 +1168,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return rc; } - /* - * Check if there is a pending ADD command for this + /* Check if there is a pending ADD command for this * MAC/VLAN/VLAN-MAC. Return an error if there is. */ if (exeq->get(exeq, elem)) { @@ -1186,8 +1176,7 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, return -EEXIST; } - /* - * TODO: Check the pending MOVE from other objects where this + /* TODO: Check the pending MOVE from other objects where this * object is a destination object. */ @@ -1230,8 +1219,7 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, return -EEXIST; } - /* - * Check if there are pending DEL or MOVE commands for this + /* Check if there are pending DEL or MOVE commands for this * MAC/VLAN/VLAN-MAC. Return an error if so. */ memcpy(&query_elem, elem, sizeof(query_elem)); @@ -1282,8 +1270,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; - /* - * Check if we can perform this operation based on the current registry + /* Check if we can perform this operation based on the current registry * state. */ if (!src_o->check_move(bp, src_o, dest_o, @@ -1292,8 +1279,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, return -EINVAL; } - /* - * Check if there is an already pending DEL or MOVE command for the + /* Check if there is an already pending DEL or MOVE command for the * source object or ADD command for a destination object. Return an * error if so. */ @@ -1382,7 +1368,7 @@ static int bnx2x_remove_vlan_mac(struct bnx2x *bp, } /** - * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. + * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. * * @bp: device handle * @o: bnx2x_vlan_mac_obj @@ -1540,9 +1526,8 @@ static inline int bnx2x_vlan_mac_get_registry_elem( /* Get a new CAM offset */ if (!o->get_cam_offset(o, ®_elem->cam_offset)) { - /* - * This shell never happen, because we have checked the - * CAM availiability in the 'validate'. + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. */ WARN_ON(1); kfree(reg_elem); @@ -1589,8 +1574,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, struct bnx2x_vlan_mac_registry_elem *reg_elem; enum bnx2x_vlan_mac_cmd cmd; - /* - * If DRIVER_ONLY execution is requested, cleanup a registry + /* If DRIVER_ONLY execution is requested, cleanup a registry * and exit. Otherwise send a ramrod to FW. */ if (!drv_only) { @@ -1599,11 +1583,10 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, /* Set pending */ r->set_pending(r); - /* Fill tha ramrod data */ + /* Fill the ramrod data */ list_for_each_entry(elem, exe_chunk, link) { cmd = elem->cmd_data.vlan_mac.cmd; - /* - * We will add to the target object in MOVE command, so + /* We will add to the target object in MOVE command, so * change the object for a CAM search. */ if (cmd == BNX2X_VLAN_MAC_MOVE) @@ -1636,12 +1619,11 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp, idx++; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, @@ -1756,8 +1738,7 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * If nothing will be executed further in this iteration we want to + /* If nothing will be executed further in this iteration we want to * return PENDING if there are pending commands */ if (!bnx2x_exe_queue_empty(&o->exe_queue)) @@ -1776,13 +1757,11 @@ int bnx2x_config_vlan_mac( return rc; } - /* - * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set * then user want to wait until the last command is done. */ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { - /* - * Wait maximum for the current exe_queue length iterations plus + /* Wait maximum for the current exe_queue length iterations plus * one (for the current pending command). */ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; @@ -1817,7 +1796,7 @@ int bnx2x_config_vlan_mac( * @ramrod_flags: execution flags to be used for this deletion * * if the last operation has completed successfully and there are no - * moreelements left, positive value if the last operation has completed + * more elements left, positive value if the last operation has completed * successfully and there are more previously configured elements, negative * value is current operation has failed. */ @@ -1858,8 +1837,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, p.ramrod_flags = *ramrod_flags; p.user_req.cmd = BNX2X_VLAN_MAC_DEL; - /* - * Add all but the last VLAN-MAC to the execution queue without actually + /* Add all but the last VLAN-MAC to the execution queue without actually * execution anything. */ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); @@ -2035,8 +2013,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, /* CAM pool handling */ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; - /* - * CAM offset is relevant for 57710 and 57711 chips only which have a + /* CAM offset is relevant for 57710 and 57711 chips only which have a * single CAM for both MACs and VLAN-MAC pairs. So the offset * will be taken from MACs' pool object only. */ @@ -2103,12 +2080,12 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters = (struct tstorm_eth_mac_filter_config *)p->rdata; - /* initial seeting is drop-all */ + /* initial setting is drop-all */ u8 drop_all_ucast = 1, drop_all_mcast = 1; u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; u8 unmatched_unicast = 0; - /* In e1x there we only take into account rx acceot flag since tx switching + /* In e1x there we only take into account rx accept flag since tx switching * isn't enabled. */ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) /* accept matched ucast */ @@ -2271,8 +2248,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, false); } - /* - * If FCoE Queue configuration has been requested configure the Rx and + /* If FCoE Queue configuration has been requested configure the Rx and * internal switching modes for this queue in separate rules. * * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: @@ -2308,8 +2284,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, } } - /* - * Set the ramrod header (most importantly - number of rules to + /* Set the ramrod header (most importantly - number of rules to * configure). */ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); @@ -2318,12 +2293,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -2460,7 +2434,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, cur_mac = (struct bnx2x_mcast_mac_elem *) ((u8 *)new_cmd + sizeof(*new_cmd)); - /* Push the MACs of the current command into the pendig command + /* Push the MACs of the current command into the pending command * MACs list: FIFO */ list_for_each_entry(pos, &p->mcast_list, link) { @@ -3017,20 +2991,18 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, if (!o->total_pending_num) bnx2x_mcast_refresh_registry_e2(bp, o); - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3104,7 +3076,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, } } -/* On 57711 we write the multicast MACs' aproximate match +/* On 57711 we write the multicast MACs' approximate match * table by directly into the TSTORM's internal RAM. So we don't * really need to handle any tricks to make it work. */ @@ -3227,7 +3199,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp, /* If current command hasn't been handled yet and we are * here means that it's meant to be dropped and we have to - * update the number of outstandling MACs accordingly. + * update the number of outstanding MACs accordingly. */ if (p->mcast_list_len) o->total_pending_num -= o->max_cmd_len; @@ -3503,20 +3475,18 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp, if (rc) return rc; - /* - * If CLEAR_ONLY was requested - don't send a ramrod and clear + /* If CLEAR_ONLY was requested - don't send a ramrod and clear * RAMROD_PENDING status immediately. */ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { raw->clear_pending(raw); return 0; } else { - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -3977,8 +3947,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equaly divided between all active functions * on the PATH. */ if ((func_num > 0)) { @@ -3987,8 +3956,7 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, else cam_sz = BNX2X_CAM_SIZE_EMUL; - /* - * No need for CAM entries handling for 57712 and + /* No need for CAM entries handling for 57712 and * newer. */ bnx2x_init_credit_pool(p, -1, cam_sz); @@ -4005,14 +3973,12 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, u8 func_num) { if (CHIP_IS_E1x(bp)) { - /* - * There is no VLAN credit in HW on 57710 and 57711 only + /* There is no VLAN credit in HW on 57710 and 57711 only * MAC / MAC-VLAN can be set */ bnx2x_init_credit_pool(p, 0, -1); } else { - /* - * CAM credit is equaly divided between all active functions + /* CAM credit is equally divided between all active functions * on the PATH. */ if (func_num > 0) { @@ -4028,7 +3994,7 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, /** * bnx2x_debug_print_ind_table - prints the indirection table configuration. * - * @bp: driver hanlde + * @bp: driver handle * @p: pointer to rss configuration * * Prints it when NETIF_MSG_IFUP debug level is configured. @@ -4141,12 +4107,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; } - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ /* Send a ramrod */ @@ -4312,7 +4277,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp, } if (o->next_tx_only >= o->max_cos) - /* >= becuase tx only must always be smaller than cos since the + /* >= because tx only must always be smaller than cos since the * primary connection supports COS 0 */ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", @@ -4625,12 +4590,11 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_setup_data_cmn(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], @@ -4654,12 +4618,11 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_fill_setup_data_e2(bp, params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], @@ -4699,12 +4662,11 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, o->cids[cid_index], rdata->general.client_id, rdata->general.sp_client_id, rdata->general.cos); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], @@ -4733,7 +4695,7 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, ¶ms->update_flags); - /* Outer VLAN sripping */ + /* Outer VLAN stripping */ data->outer_vlan_removal_enable_flg = test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); data->outer_vlan_removal_change_flg = @@ -4794,12 +4756,11 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp, /* Fill the ramrod data */ bnx2x_q_fill_update_data(bp, o, update_params, rdata); - /* - * No need for an explicit memory barrier here as long we would - * need to ensure the ordering of writing to the SPQ element - * and updating of the SPQ producer which involves a memory - * read and we will have to put a full memory barrier there - * (inside bnx2x_sp_post()). + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). */ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, @@ -5009,8 +4970,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, ¶ms->params.update; u8 next_tx_only = o->num_tx_only; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5018,8 +4978,7 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_Q_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) { @@ -5228,8 +5187,7 @@ enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, if (o->pending) return BNX2X_F_STATE_MAX; - /* - * unsure the order of reading of o->pending and o->state + /* unsure the order of reading of o->pending and o->state * o->pending should be read first */ rmb(); @@ -5327,8 +5285,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; enum bnx2x_func_cmd cmd = params->cmd; - /* - * Forget all pending for completion commands if a driver only state + /* Forget all pending for completion commands if a driver only state * transition has been requested. */ if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { @@ -5336,8 +5293,7 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, o->next_state = BNX2X_F_STATE_MAX; } - /* - * Don't allow a next state transition if we are in the middle of + /* Don't allow a next state transition if we are in the middle of * the previous one. */ if (o->pending) @@ -5510,7 +5466,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, goto init_err; } - /* Handle the beginning of COMMON_XXX pases separatelly... */ + /* Handle the beginning of COMMON_XXX pases separately... */ switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: rc = bnx2x_func_init_cmn_chip(bp, drv); @@ -5544,7 +5500,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, init_err: drv->gunzip_end(bp); - /* In case of success, complete the comand immediatelly: no ramrods + /* In case of success, complete the command immediately: no ramrods * have been sent. */ if (!rc) @@ -5569,7 +5525,7 @@ static inline void bnx2x_func_reset_func(struct bnx2x *bp, } /** - * bnx2x_func_reset_port - reser HW at port stage + * bnx2x_func_reset_port - reset HW at port stage * * @bp: device handle * @drv: @@ -5591,7 +5547,7 @@ static inline void bnx2x_func_reset_port(struct bnx2x *bp, } /** - * bnx2x_func_reset_cmn - reser HW at common stage + * bnx2x_func_reset_cmn - reset HW at common stage * * @bp: device handle * @drv: @@ -5633,7 +5589,7 @@ static inline int bnx2x_func_hw_reset(struct bnx2x *bp, break; } - /* Complete the comand immediatelly: no ramrods have been sent. */ + /* Complete the command immediately: no ramrods have been sent. */ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index c4fffe0faba0..798dfe996733 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -34,8 +34,7 @@ enum { RAMROD_RESTORE, /* Execute the next command now */ RAMROD_EXEC, - /* - * Don't add a new command and continue execution of posponed + /* Don't add a new command and continue execution of postponed * commands. If not set a new command will be added to the * pending commands list. */ @@ -129,8 +128,7 @@ enum bnx2x_vlan_mac_cmd { struct bnx2x_vlan_mac_data { /* Requested command: BNX2X_VLAN_MAC_XX */ enum bnx2x_vlan_mac_cmd cmd; - /* - * used to contain the data related vlan_mac_flags bits from + /* used to contain the data related vlan_mac_flags bits from * ramrod parameters. */ unsigned long vlan_mac_flags; @@ -190,14 +188,10 @@ typedef struct bnx2x_exeq_elem * struct bnx2x_exeq_elem *elem); struct bnx2x_exe_queue_obj { - /* - * Commands pending for an execution. - */ + /* Commands pending for an execution. */ struct list_head exe_queue; - /* - * Commands pending for an completion. - */ + /* Commands pending for an completion. */ struct list_head pending_comp; spinlock_t lock; @@ -245,14 +239,13 @@ struct bnx2x_exe_queue_obj { }; /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ /* - * Element in the VLAN_MAC registry list having all currenty configured + * Element in the VLAN_MAC registry list having all currently configured * rules. */ struct bnx2x_vlan_mac_registry_elem { struct list_head link; - /* - * Used to store the cam offset used for the mac/vlan/vlan-mac. + /* Used to store the cam offset used for the mac/vlan/vlan-mac. * Relevant for 57710 and 57711 only. VLANs and MACs share the * same CAM for these chips. */ @@ -310,7 +303,7 @@ struct bnx2x_vlan_mac_obj { * @param n number of elements to get * @param buf buffer preallocated by caller into which elements * will be copied. Note elements are 4-byte aligned - * so buffer size must be able to accomodate the + * so buffer size must be able to accommodate the * aligned elements. * * @return number of copied bytes @@ -395,7 +388,7 @@ struct bnx2x_vlan_mac_obj { * @param bp * @param p Command parameters (RAMROD_COMP_WAIT bit in * ramrod_flags is only taken into an account) - * @param ppos a pointer to the cooky that should be given back in the + * @param ppos a pointer to the cookie that should be given back in the * next call to make function handle the next element. If * *ppos is set to NULL it will restart the iterator. * If returned *ppos == NULL this means that the last @@ -408,7 +401,7 @@ struct bnx2x_vlan_mac_obj { struct bnx2x_vlan_mac_registry_elem **ppos); /** - * Should be called on a completion arival. + * Should be called on a completion arrival. * * @param bp * @param o @@ -447,7 +440,7 @@ void bnx2x_set_mac_in_nig(struct bnx2x *bp, /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ -/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in +/* RX_MODE ramrod special flags: set in rx_mode_flags field in * a bnx2x_rx_mode_ramrod_params. */ enum { @@ -475,8 +468,7 @@ struct bnx2x_rx_mode_ramrod_params { unsigned long ramrod_flags; unsigned long rx_mode_flags; - /* - * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to * a tstorm_eth_mac_filter_config (e1x). */ void *rdata; @@ -646,12 +638,11 @@ struct bnx2x_credit_pool_obj { /* Maximum allowed credit. put() will check against it. */ int pool_sz; - /* - * Allocate a pool table statically. + /* Allocate a pool table statically. * - * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) * - * The set bit in the table will mean that the entry is available. + * The set bit in the table will mean that the entry is available. */ #define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; @@ -832,7 +823,7 @@ enum { BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; -/* Queue type options: queue type may be a compination of below. */ +/* Queue type options: queue type may be a combination of below. */ enum bnx2x_q_type { /** TODO: Consider moving both these flags into the init() * ramrod params. @@ -1002,10 +993,9 @@ struct bnx2x_queue_sp_obj { u8 cl_id; u8 func_id; - /* - * number of traffic classes supported by queue. - * The primary connection of the queue suppotrs the first traffic - * class. Any further traffic class is suppoted by a tx-only + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only * connection. * * Therefore max_cos is also a number of valid entries in the cids @@ -1021,7 +1011,7 @@ struct bnx2x_queue_sp_obj { /* BNX2X_Q_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1210,7 +1200,7 @@ struct bnx2x_func_sp_obj { /* BNX2X_FUNC_CMD_XX bits. This object implements "one * pending" paradigm but for debug and tracing purposes it's - * more convinient to have different bits for different + * more convenient to have different bits for different * commands. */ unsigned long pending; @@ -1329,7 +1319,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, * * @p: Command parameters * - * Return: 0 - if operation was successfull and there is no pending completions, + * Return: 0 - if operation was successful and there is no pending completions, * positive number - if there are pending completions, * negative - if there were errors */ @@ -1361,7 +1351,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * the current command will be enqueued to the tail of the * pending commands list. * - * Return: 0 is operation was successfull and there are no pending completions, + * Return: 0 is operation was successful and there are no pending completions, * negative if there were errors, positive if there are pending * completions. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index ea492c7a96a1..b2ab288aaf76 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1341,7 +1341,7 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, */ /* internal vf enable - until vf is enabled internally all transactions - * are blocked. this routine should always be called last with pretend. + * are blocked. This routine should always be called last with pretend. */ static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) { @@ -1743,7 +1743,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); - /* set the number of VF alllowed doorbells to the full DQ range */ + /* set the number of VF allowed doorbells to the full DQ range */ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); /* set the VF doorbell threshold */ @@ -2403,7 +2403,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) /* extract vf and rxq index from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); @@ -2461,7 +2461,7 @@ static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) { /* extract the vf from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); return bnx2x_vf_by_abs_fid(bp, abs_vfid); @@ -2480,7 +2480,7 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, if (vf) { /* extract queue index from vf_cid - relies on the following: * 1. vfid on cid reflects the true abs_vfid - * 2. the max number of VFs (per path) is 64 + * 2. The max number of VFs (per path) is 64 */ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); @@ -2705,7 +2705,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, } /* static allocation: - * the global maximum number are fixed per VF. fail the request if + * the global maximum number are fixed per VF. Fail the request if * requested number exceed these globals */ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { @@ -2890,7 +2890,7 @@ int bnx2x_vfop_close_cmd(struct bnx2x *bp, return -ENOMEM; } -/* VF release can be called either: 1. the VF was acquired but +/* VF release can be called either: 1. The VF was acquired but * not enabled 2. the vf was enabled or in the process of being * enabled */ @@ -3140,7 +3140,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* mac configured by ndo so its in bulletin board */ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); else - /* funtion has not been loaded yet. Show mac as 0s */ + /* function has not been loaded yet. Show mac as 0s */ memset(&ivi->mac, 0, ETH_ALEN); /* vlan */ @@ -3148,7 +3148,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, /* vlan configured by ndo so its in bulletin board */ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); else - /* funtion has not been loaded yet. Show vlans as 0s */ + /* function has not been loaded yet. Show vlans as 0s */ memset(&ivi->vlan, 0, VLAN_HLEN); } @@ -3188,7 +3188,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) return -EINVAL; } - /* update PF's copy of the VF's bulletin. will no longer accept mac + /* update PF's copy of the VF's bulletin. Will no longer accept mac * configuration requests from vf unless match this mac */ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; @@ -3357,8 +3357,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) return 0; } -/* crc is the first field in the bulletin board. compute the crc over the - * entire bulletin board excluding the crc field itself +/* crc is the first field in the bulletin board. Compute the crc over the + * entire bulletin board excluding the crc field itself. Use the length field + * as the Bulletin Board was posted by a PF with possibly a different version + * from the vf which will sample it. Therefore, the length is computed by the + * PF and the used blindly by the VF. */ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, struct pf_vf_bulletin_content *bulletin) @@ -3451,7 +3454,7 @@ int bnx2x_open_epilog(struct bnx2x *bp) * register_netdevice which must have rtnl lock taken. As we are holding * the lock right now, that could only work if the probe would not take * the lock. However, as the probe of the vf may be called from other - * contexts as well (such as passthrough to vm failes) it can't assume + * contexts as well (such as passthrough to vm fails) it can't assume * the lock is being held for it. Using delayed work here allows the * probe code to simply take the lock (i.e. wait for it to be released * if it is being held). We only want to do this if the number of VFs diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 039792299c3b..3e7cb8341841 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -197,7 +197,7 @@ struct bnx2x_virtf { u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ -#define VF_ACQUIRED 1 /* VF aquired, but not initalized */ +#define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 282606677bca..f55fa0725680 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -233,7 +233,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) attempts++; - /* test whether the PF accepted our request. If not, humble the + /* test whether the PF accepted our request. If not, humble * the request and try again. */ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { @@ -787,7 +787,7 @@ static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); } -/* enable vf_pf mailbox (aka vf-pf-chanell) */ +/* enable vf_pf mailbox (aka vf-pf-channel) */ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) { bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); @@ -1072,7 +1072,7 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); - /* outer vlan removal is set according to the PF's multi fuction mode */ + /* outer vlan removal is set according to PF's multi function mode */ if (IS_MF_SD(bp)) __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); } @@ -1104,7 +1104,7 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_setup_params *setup_p; - /* reinit the VF operation context */ + /* re-init the VF operation context */ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); setup_p = &vf->op_params.qctor.prep_qsetup; init_p = &vf->op_params.qctor.qstate.params.init; -- cgit v1.2.3 From 3cdeec22e40264e40d34d8242b4ce7461329a80a Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 2 Jun 2013 00:06:19 +0000 Subject: bnx2x: Semantic removal and beautification This patch introduces several small changes to the driver, none which actually change any flow: 1. Removes prototypes of unexisting functions and unused defines. 2. Fixes alignment and spacing issues. 3. Changes numeric usage into constants. 4. Remove unnecessary parenthesis. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 32 ++++++++++-------------- 2 files changed, 13 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 4a643d7b9b22..68fefa8d97a5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -493,9 +493,6 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); /* Error handling */ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); -/* validate currect fw is loaded */ -bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); - /* dev_close main block */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 82f3696437c2..28b8fd5460d7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -102,8 +102,6 @@ static int disable_tpa; module_param(disable_tpa, int, 0); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); -#define INT_MODE_INTx 1 -#define INT_MODE_MSI 2 int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " @@ -3068,7 +3066,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, * placed on the BD (not including paddings). */ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - - BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; @@ -3619,15 +3617,14 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) might_sleep(); for (j = 0; j < 1000; j++) { - val = (1UL << 31); - REG_WR(bp, GRCBASE_MCP + 0x9c, val); - val = REG_RD(bp, GRCBASE_MCP + 0x9c); - if (val & (1L << 31)) + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); + val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); + if (val & MCPR_ACCESS_LOCK_LOCK) break; msleep(5); } - if (!(val & (1L << 31))) { + if (!(val & MCPR_ACCESS_LOCK_LOCK)) { BNX2X_ERR("Cannot acquire MCP access lock register\n"); rc = -EBUSY; } @@ -3638,7 +3635,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) /* release split MCP access lock register */ static void bnx2x_release_alr(struct bnx2x *bp) { - REG_WR(bp, GRCBASE_MCP + 0x9c, 0); + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); } #define BNX2X_DEF_SB_ATT_IDX 0x0001 @@ -4086,7 +4083,7 @@ static void bnx2x_clear_reset_global(struct bnx2x *bp) */ static bool bnx2x_reset_is_global(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; @@ -4137,7 +4134,7 @@ void bnx2x_set_reset_in_progress(struct bnx2x *bp) */ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) { - u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); u32 bit = engine ? BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; @@ -6013,11 +6010,6 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_init_rx_rings(bp); bnx2x_init_tx_rings(bp); - if (IS_VF(bp)) { - bnx2x_memset_stats(bp); - return; - } - if (IS_PF(bp)) { /* Initialize MOD_ABS interrupts */ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, @@ -6028,6 +6020,8 @@ void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_init_def_sb(bp); bnx2x_update_dsb_idx(bp); bnx2x_init_sp_ring(bp); + } else { + bnx2x_memset_stats(bp); } } @@ -10011,7 +10005,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); - hw_lock_val = (REG_RD(bp, hw_lock_reg)); + hw_lock_val = REG_RD(bp, hw_lock_reg); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); @@ -10026,7 +10020,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { BNX2X_DEV_INFO("Release previously held alr\n"); - REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); + bnx2x_release_alr(bp); } do { @@ -11878,7 +11872,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_setup_tc = bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, - .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, #endif #ifdef NETDEV_FCOE_WWNN -- cgit v1.2.3 From 6bf07b8e3666257c5172ab1681b377e547747d9b Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 2 Jun 2013 00:06:20 +0000 Subject: bnx2x: Revise prints This patch revises many bnx2x prints - mainly fixing print typos and adding some new debug prints (mostly for parity issues). Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 8 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 16 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 257 ++++++++++++++++------ drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 6 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 6 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 8 +- 9 files changed, 224 insertions(+), 84 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8f551cf7bb30..f76597e5fa55 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1900,7 +1900,6 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, u8 src_type, u8 dst_type); int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); -void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl); /* FLR related routines */ u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ef7fed52891a..14b5acd8eafc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2162,10 +2162,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) bp->fw_stats_data_mapping = bp->fw_stats_mapping + bp->fw_stats_req_sz; - DP(BNX2X_MSG_SP, "statistics request base address set to %x %x", + DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", U64_HI(bp->fw_stats_req_mapping), U64_LO(bp->fw_stats_req_mapping)); - DP(BNX2X_MSG_SP, "statistics data base address set to %x %x", + DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", U64_HI(bp->fw_stats_data_mapping), U64_LO(bp->fw_stats_data_mapping)); return 0; @@ -2240,7 +2240,7 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code) /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n", + BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", loaded_fw, my_fw); return -EBUSY; } @@ -3977,7 +3977,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* requested to support too many traffic classes */ if (num_tc > bp->max_cos) { - BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n", + BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", num_tc, bp->max_cos); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 68fefa8d97a5..650bb52155a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -49,13 +49,15 @@ extern int int_mode; } \ } while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ -do { \ - x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ - GFP_KERNEL | __GFP_ZERO); \ - if (x == NULL) \ - goto alloc_mem_err; \ -} while (0) +#define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ + GFP_KERNEL | __GFP_ZERO); \ + if (x == NULL) \ + goto alloc_mem_err; \ + DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } while (0) #define BNX2X_ALLOC(x, size) \ do { \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 210614e37b9b..0c94df47e0e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2381,7 +2381,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, *flags |= DCB_FEATCFG_ERROR; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); + BNX2X_ERR("Non valid feature-ID\n"); rval = 1; break; } @@ -2422,7 +2422,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, flags & DCB_FEATCFG_WILLING ? 1 : 0; break; default: - BNX2X_ERR("Non valid featrue-ID\n"); + BNX2X_ERR("Non valid feature-ID\n"); rval = 1; break; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 28b8fd5460d7..60ff1aaba6dc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -371,9 +371,11 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" #define DMAE_DP_DST_NONE "dst_addr [none]" -void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) +static void bnx2x_dp_dmae(struct bnx2x *bp, + struct dmae_command *dmae, int msglvl) { u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + int i; switch (dmae->opcode & DMAE_COMMAND_DST) { case DMAE_CMD_DST_PCI: @@ -429,6 +431,10 @@ void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) dmae->comp_val); break; } + + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) + DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", + i, *(((u32 *)dmae) + i)); } /* copy command into DMAE command memory and set DMAE command go */ @@ -503,8 +509,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; int rc = 0; - /* - * Lock the dmae channel. Disable BHs to prevent a dead-lock + bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); + + /* Lock the dmae channel. Disable BHs to prevent a dead-lock * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ @@ -543,6 +550,7 @@ unlock: void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { @@ -566,11 +574,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae.len = len32; /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); + bnx2x_panic(); + } } void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) { + int rc; struct dmae_command dmae; if (!bp->dmae_ready) { @@ -598,7 +611,11 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae.len = len32; /* issue the command and wait for completion */ - bnx2x_issue_dmae_with_comp(bp, &dmae); + rc = bnx2x_issue_dmae_with_comp(bp, &dmae); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); + bnx2x_panic(); + }; } static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, @@ -834,7 +851,7 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp) REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); } static void bnx2x_igu_int_disable(struct bnx2x *bp) @@ -852,7 +869,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) - BNX2X_ERR("BUG! proper val not read from IGU!\n"); + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); } static void bnx2x_int_disable(struct bnx2x *bp) @@ -1021,6 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) #ifdef BNX2X_STOP_ON_ERROR /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); for (i = 0; i < NUM_EQ_DESC; i++) { u32 *data = (u32 *)&bp->eq_ring[i].message.data; @@ -1379,7 +1397,7 @@ static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) /* Wait DMAE PF usage counter to zero */ if (bnx2x_flr_clnup_poll_hw_counter(bp, dmae_reg_go_c[INIT_DMAE_C(bp)], - "DMAE dommand register timed out", + "DMAE command register timed out", poll_cnt)) return 1; @@ -1759,7 +1777,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) break; case (RAMROD_CMD_ID_ETH_TERMINATE): - DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); drv_cmd = BNX2X_Q_CMD_TERMINATE; break; @@ -1971,8 +1989,8 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) /* Validating that the resource is currently taken */ lock_status = REG_RD(bp, hw_lock_control_reg); if (!(lock_status & resource_bit)) { - BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n", - lock_status, resource_bit); + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", + lock_status, resource_bit); return -EFAULT; } @@ -4237,13 +4255,18 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } +static void _print_parity(struct bnx2x *bp, u32 reg) +{ + pr_cont(" [0x%08x] ", REG_RD(bp, reg)); +} + static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); } -static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4252,33 +4275,54 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "BRB"); + _print_parity(bp, + BRB1_REG_BRB1_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PARSER"); + _print_parity(bp, PRS_REG_PRS_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TSDM"); + _print_parity(bp, + TSDM_REG_TSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "SEARCHER"); + _print_parity(bp, SRC_REG_SRC_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TCM"); + _print_parity(bp, + TCM_REG_TCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TSEMI"); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_0); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XPB"); + _print_parity(bp, GRCBASE_XPB + + PB_REG_PB_PRTY_STS); + } break; } @@ -4290,8 +4334,9 @@ static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, - bool *global, bool print) +static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int par_num, bool *global, + bool print) { int i = 0; u32 cur_bit = 0; @@ -4300,37 +4345,66 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PBF"); + _print_parity(bp, PBF_REG_PBF_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "QM"); + _print_parity(bp, QM_REG_QM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "TM"); + _print_parity(bp, TM_REG_TM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XSDM"); + _print_parity(bp, + XSDM_REG_XSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XCM"); + _print_parity(bp, XCM_REG_XCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "XSEMI"); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_0); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DOORBELLQ"); + _print_parity(bp, + DORQ_REG_DORQ_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "NIG"); + if (CHIP_IS_E1x(bp)) { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS); + } else { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_0); + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_1); + } + } break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) @@ -4339,32 +4413,52 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DEBUG"); + _print_parity(bp, DBG_REG_DBG_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "USDM"); + _print_parity(bp, + USDM_REG_USDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "UCM"); + _print_parity(bp, UCM_REG_UCM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "USEMI"); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_0); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "UPB"); + _print_parity(bp, GRCBASE_UPB + + PB_REG_PB_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CSDM"); + _print_parity(bp, + CSDM_REG_CSDM_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CCM"); + _print_parity(bp, CCM_REG_CCM_PRTY_STS); + } break; } @@ -4376,8 +4470,8 @@ static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4386,12 +4480,23 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CSEMI"); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_0); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_1); + } break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PXP"); + _print_parity(bp, PXP_REG_PXP_PRTY_STS); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_0); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_1); + } break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) @@ -4399,24 +4504,42 @@ static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CFC"); + _print_parity(bp, + CFC_REG_CFC_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "CDU"); + _print_parity(bp, CDU_REG_CDU_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "DMAE"); + _print_parity(bp, + DMAE_REG_DMAE_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "IGU"); + if (CHIP_IS_E1x(bp)) + _print_parity(bp, + HC_REG_HC_PRTY_STS); + else + _print_parity(bp, + IGU_REG_IGU_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "MISC"); + _print_parity(bp, + MISC_REG_MISC_PRTY_STS); + } break; } @@ -4470,8 +4593,8 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, return par_num; } -static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, - bool print) +static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int par_num, bool print) { int i = 0; u32 cur_bit = 0; @@ -4480,12 +4603,18 @@ static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "PGLUE_B"); + _print_parity(bp, + PGLUE_B_REG_PGLUE_B_PRTY_STS); + } break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: - if (print) + if (print) { _print_next_block(par_num++, "ATC"); + _print_parity(bp, + ATC_REG_ATC_PRTY_STS); + } break; } @@ -4516,15 +4645,15 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, if (print) netdev_err(bp->dev, "Parity errors detected in blocks: "); - par_num = bnx2x_check_blocks_with_parity0( + par_num = bnx2x_check_blocks_with_parity0(bp, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); - par_num = bnx2x_check_blocks_with_parity1( + par_num = bnx2x_check_blocks_with_parity1(bp, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity2( + par_num = bnx2x_check_blocks_with_parity2(bp, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bnx2x_check_blocks_with_parity3( sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); - par_num = bnx2x_check_blocks_with_parity4( + par_num = bnx2x_check_blocks_with_parity4(bp, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) @@ -7437,7 +7566,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) } else { /* Set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); - DP(NETIF_MSG_IFUP, "NIC MODE configrued\n"); + DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); } if (!CHIP_IS_E1x(bp)) { @@ -8609,7 +8738,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, - "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -9426,14 +9555,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) return; } - /* if stop on error is defined no recovery flows should be executed */ + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { #ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" - "you will need to reboot when done\n"); - goto sp_rtnl_not_reset; + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; #endif - - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { /* * Clear all pending SP commands as we are going to reset the * function anyway. @@ -9448,6 +9575,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) } if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + /* * Clear all pending SP commands as we are going to reset the * function anyway. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index d22bc40091ec..8e627b886d7b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -35,6 +35,8 @@ #define ATC_REG_ATC_INT_STS_CLR 0x1101c0 /* [RW 5] Parity mask register #0 read/write */ #define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [R 5] Parity register #0 read */ +#define ATC_REG_ATC_PRTY_STS 0x1101cc /* [RC 5] Parity register #0 read clear */ #define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 /* [RW 19] Interrupt mask register #0 read/write */ @@ -2750,6 +2752,8 @@ #define PBF_REG_PBF_INT_STS 0x1401c8 /* [RW 20] Parity mask register #0 read/write */ #define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [R 28] Parity register #0 read */ +#define PBF_REG_PBF_PRTY_STS 0x1401d8 /* [RC 20] Parity register #0 read clear */ #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc /* [RW 16] The Ethernet type value for L2 tag 0 */ @@ -4517,6 +4521,8 @@ #define TM_REG_TM_INT_STS 0x1640f0 /* [RW 7] Parity mask register #0 read/write */ #define TM_REG_TM_PRTY_MASK 0x16410c +/* [R 7] Parity register #0 read */ +#define TM_REG_TM_PRTY_STS 0x164100 /* [RC 7] Parity register #0 read clear */ #define TM_REG_TM_PRTY_STS_CLR 0x164104 /* [RW 8] The event id for aggregated interrupt 0 */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index b2ab288aaf76..8a556dd888d5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1620,7 +1620,7 @@ next_vf_to_clean: i++) ; - DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i, + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, BNX2X_NR_VIRTFN(bp)); if (i < BNX2X_NR_VIRTFN(bp)) { @@ -3031,7 +3031,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) /* HW channel is only operational when PF is up */ if (bp->state != BNX2X_STATE_OPEN) { - BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down"); + BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); return -EINVAL; } @@ -3391,7 +3391,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, &bulletin)) break; - BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n", + BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", bulletin.crc, bnx2x_crc_vf_bulletin(bp, &bulletin)); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 3e7cb8341841..f08c604a4fbd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -496,7 +496,7 @@ enum { else if ((next) == VFOP_VERIFY_PEND) \ BNX2X_ERR("expected pending\n"); \ else { \ - DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \ + DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ atomic_set(&vf->op_in_progress, 1); \ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ return; \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index f55fa0725680..861809d3154b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -333,7 +333,7 @@ int bnx2x_vfpf_release(struct bnx2x *bp) DP(BNX2X_MSG_SP, "vf released\n"); } else { /* PF reports error */ - BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", + BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n", resp->hdr.status); rc = -EAGAIN; goto out; @@ -844,7 +844,6 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, dmae.dst_addr_hi = vf_addr_hi; } dmae.len = len32; - bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE); /* issue the command and wait for completion */ return bnx2x_issue_dmae_with_comp(bp, &dmae); @@ -1588,8 +1587,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, * support them. Or this may be because someone wrote a crappy * VF driver and is sending garbage over the channel. */ - BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", - mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length, + vf->state); for (i = 0; i < 20; i++) DP_CONT(BNX2X_MSG_IOV, "%x ", mbx->msg->req.tlv_buf_size.tlv_buffer[i]); -- cgit v1.2.3 From 639d65b85592cf31eca24f33ff00e309ebbf0d2b Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 2 Jun 2013 00:06:21 +0000 Subject: bnx2x: semi-Semantic changes This patch includes a few changes that change the driver's flow without truly changing anything in its functionality - use usleep_range for short sleeps instead of msleep and initialize Tx consumer during initialization for better information during errors. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 60ff1aaba6dc..658b9fd0275f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1954,7 +1954,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) if (lock_status & resource_bit) return 0; - msleep(5); + usleep_range(5000, 10000); } BNX2X_ERR("Timeout\n"); return -EAGAIN; @@ -3640,7 +3640,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp) if (val & MCPR_ACCESS_LOCK_LOCK) break; - msleep(5); + usleep_range(5000, 10000); } if (!(val & MCPR_ACCESS_LOCK_LOCK)) { BNX2X_ERR("Cannot acquire MCP access lock register\n"); @@ -6078,6 +6078,8 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); } + *txdata->tx_cons_sb = cpu_to_le16(0); + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); txdata->tx_db.data.zero_fill1 = 0; txdata->tx_db.data.prod = 0; @@ -6329,7 +6331,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0x10) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x10) { @@ -6344,7 +6346,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 1) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0x1) { @@ -6385,7 +6387,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) if (val == 0xb0) break; - msleep(10); + usleep_range(10000, 20000); count--; } if (val != 0xb0) { @@ -8500,7 +8502,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) * scan to complete */ for (i = 0; i < 200; i++) { - msleep(10); + usleep_range(10000, 20000); if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) break; } -- cgit v1.2.3 From 829a5071ccf040b78bd93286a218580c66833b00 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sat, 1 Jun 2013 23:02:26 +0000 Subject: bnx2x: fix a power state test If PCIe supports PM capabilities, bnx2x will always claim eeprom is accessible as PCI_D0 is zero. Reported-by: Dan Carpenter Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 72b1b59294b4..7c6faebb1838 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1390,7 +1390,8 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) rc = pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pm); - if ((rc && !netif_running(dev)) || (!rc && ((pm & PCI_D0) != PCI_D0))) + if ((rc && !netif_running(dev)) || + (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != PCI_D0))) return false; return true; -- cgit v1.2.3 From bd118b6e9509a19bfb82925152d9ae8fa398d567 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 29 May 2013 04:13:54 +0000 Subject: ibm-ethernet: delete stale MCA and duplicate PSERIES dependency MCA support has been removed but this dependency escaped removal. Also, there is a duplicate PPC_PSERIES dependency that appeared when the ethernet drivers were separated into vendor/model specific directories, so we remove that here as well. Reported-by: Robert de Rooy Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig index 6529d31595a7..563a1ac71dbc 100644 --- a/drivers/net/ethernet/ibm/Kconfig +++ b/drivers/net/ethernet/ibm/Kconfig @@ -5,8 +5,7 @@ config NET_VENDOR_IBM bool "IBM devices" default y - depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \ - (IBMEBUS && SPARSEMEM) + depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM) ---help--- If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from -- cgit v1.2.3 From 67d6bfa6863c8535445e1dcdb3385e3a05cd32a7 Mon Sep 17 00:00:00 2001 From: Jingoo Han Date: Fri, 31 May 2013 21:21:35 +0000 Subject: net: ethernet: replace strict_strtoul() with kstrtoul() The usage of strict_strtoul() is not preferred, because strict_strtoul() is obsolete. Thus, kstrtoul() should be used. Signed-off-by: Jingoo Han Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 4 ++-- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 20c7b3c12b56..c401b0b4353d 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2774,7 +2774,7 @@ netxen_store_bridged_mode(struct device *dev, if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) goto err_out; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) goto err_out; if (!netxen_config_bridged_mode(adapter, !!new)) @@ -2813,7 +2813,7 @@ netxen_store_diag_mode(struct device *dev, struct netxen_adapter *adapter = dev_get_drvdata(dev); unsigned long new; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 025a7192289d..7ec030abdf07 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -47,7 +47,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev, if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) goto err_out; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) goto err_out; if (!qlcnic_config_bridged_mode(adapter, !!new)) @@ -77,7 +77,7 @@ static ssize_t qlcnic_store_diag_mode(struct device *dev, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); unsigned long new; - if (strict_strtoul(buf, 2, &new)) + if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) -- cgit v1.2.3 From 5865fc1b6a99bc7dcf245770c2735577b8596b6e Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Sun, 2 Jun 2013 21:36:21 +0000 Subject: tg3: remove redundant pm init code Pci_enable_device() will set device pm state to D0, so it's no need to do it again in tg3_init_one(). Acked-by: Michael Chan Signed-off-by: Yijing Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d7755d849e53..3cfae0e49207 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17195,7 +17195,7 @@ static int tg3_init_one(struct pci_dev *pdev, { struct net_device *dev; struct tg3 *tp; - int i, err, pm_cap; + int i, err; u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; @@ -17217,25 +17217,10 @@ static int tg3_init_one(struct pci_dev *pdev, pci_set_master(pdev); - /* Find power-management capability. */ - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pm_cap == 0) { - dev_err(&pdev->dev, - "Cannot find Power Management capability, aborting\n"); - err = -EIO; - goto err_out_free_res; - } - - err = pci_set_power_state(pdev, PCI_D0); - if (err) { - dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); - goto err_out_free_res; - } - dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { err = -ENOMEM; - goto err_out_power_down; + goto err_out_free_res; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -17243,7 +17228,7 @@ static int tg3_init_one(struct pci_dev *pdev, tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; - tp->pm_cap = pm_cap; + tp->pm_cap = pdev->pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; @@ -17581,9 +17566,6 @@ err_out_iounmap: err_out_free_dev: free_netdev(dev); -err_out_power_down: - pci_set_power_state(pdev, PCI_D3hot); - err_out_free_res: pci_release_regions(pdev); -- cgit v1.2.3 From c5ceea7a2813fecded01567383cca4d6855164ab Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Mon, 3 Jun 2013 20:10:10 +0000 Subject: drivers: net: ethernet: cpsw: add phy-mode support to cpsw driver Adding phy-mode support to cpsw driver and updating the cpsw binding documentation. Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/cpsw.txt | 6 ++++++ drivers/net/ethernet/ti/cpsw.c | 2 ++ 2 files changed, 8 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index 4f2ca6b4a182..05d660e4ac64 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt @@ -28,6 +28,8 @@ Optional properties: Slave Properties: Required properties: - phy_id : Specifies slave phy id +- phy-mode : The interface between the SoC and the PHY (a string + that of_get_phy_mode() can understand) - mac-address : Specifies slave MAC address Optional properties: @@ -58,11 +60,13 @@ Examples: cpts_clock_shift = <29>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; + phy-mode = "rgmii-txid"; /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; cpsw_emac1: slave@1 { phy_id = <&davinci_mdio>, <1>; + phy-mode = "rgmii-txid"; /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; @@ -84,11 +88,13 @@ Examples: cpts_clock_shift = <29>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; + phy-mode = "rgmii-txid"; /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; cpsw_emac1: slave@1 { phy_id = <&davinci_mdio>, <1>; + phy-mode = "rgmii-txid"; /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 89a4c40d6d83..a45f64eef870 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1554,6 +1554,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + slave_data->phy_if = of_get_phy_mode(slave_node); + if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { -- cgit v1.2.3 From f1362e378ab27844f968f1f390e33b6c77861ec0 Mon Sep 17 00:00:00 2001 From: "Jens Renner \\(EFE\\)" Date: Sun, 2 Jun 2013 05:19:06 +0000 Subject: net: emaclite: Fix typo in error message s/allocal/allocate/ Signed-off-by: Jens Renner Acked-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index aa14d8adcecb..aaadf7e551df 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -877,7 +877,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus = mdiobus_alloc(); if (!bus) { - dev_err(dev, "Failed to allocal mdiobus\n"); + dev_err(dev, "Failed to allocate mdiobus\n"); return -ENOMEM; } -- cgit v1.2.3 From ff20877ab3be42cf9c21a9b0173db690aa3165b0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 3 Jun 2013 03:11:24 +0000 Subject: net: mv643xx_eth: add missing semicolon 76723bca28 "net: mv643xx_eth: add DT parsing support" added a dummy mv643xx_eth_shared_of_probe() fallback function with a typo. This adds the missing semicolon so we can build without CONFIG_OF again, and changes both dummy functions to the more conventional "static inline" syntax, which can avoid potential problems with the empty macro. Signed-off-by: Arnd Bergmann Cc: Sebastian Hesselbarth Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 23ea7b6e23f1..c7f9fb33ce95 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2582,12 +2582,14 @@ static void mv643xx_eth_shared_of_remove(void) } } #else -static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) { - return 0 + return 0; } -#define mv643xx_eth_shared_of_remove() +static inline void mv643xx_eth_shared_of_remove(void) +{ +} #endif static int mv643xx_eth_shared_probe(struct platform_device *pdev) -- cgit v1.2.3 From 93baf4c615584fa02c21aa78f305428fc7060656 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 3 Jun 2013 03:36:52 +0000 Subject: net: sun4i-emac: fix a typo in emac_probe() Just fixed a typo in emac_probe(). Signed-off-by: Wei Yongjun Acked-by: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/allwinner/sun4i-emac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index b411344e719e..26083cdc25ab 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -821,7 +821,7 @@ static int emac_probe(struct platform_device *pdev) db->membase = of_iomap(np, 0); if (!db->membase) { dev_err(&pdev->dev, "failed to remap registers\n"); - return -ENOMEM; + ret = -ENOMEM; goto out; } -- cgit v1.2.3 From 8cc3e439ab920703264a62e1b19559b39e20d77e Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Tue, 4 Jun 2013 04:52:23 +0000 Subject: net: mvneta: read MAC address from hardware when available This patch improves the logic used by the mvneta driver to find a MAC address for a particular interface. Until now, it was only looking at the Device Tree, and if no address was found, was falling back to generating a random MAC address. This patch adds the intermediate solution of reading the MAC address from the hardware registers, in case it has been set by the bootloader. So the order is now: 1) MAC address from the Device Tree 2) MAC address from the hardware registers 3) Random MAC address This requires moving the MAC address initialization a little bit later in the ->probe() code, because it now requires the hardware registers to be remapped. Signed-off-by: Thomas Petazzoni Cc: Sergei Shtylyov Cc: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 44 ++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd0ea204ac24..712779fb12b7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2251,6 +2251,21 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) return 0; } +/* Get mac address */ +static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_h; + + mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); + mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = (mac_addr_l >> 8) & 0xFF; + addr[5] = mac_addr_l & 0xFF; +} + /* Handle setting mac address */ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) { @@ -2667,7 +2682,9 @@ static int mvneta_probe(struct platform_device *pdev) u32 phy_addr; struct mvneta_port *pp; struct net_device *dev; - const char *mac_addr; + const char *dt_mac_addr; + char hw_mac_addr[ETH_ALEN]; + const char *mac_from; int phy_mode; int err; @@ -2703,13 +2720,6 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_irq; } - mac_addr = of_get_mac_address(dn); - - if (!mac_addr || !is_valid_ether_addr(mac_addr)) - eth_hw_addr_random(dev); - else - memcpy(dev->dev_addr, mac_addr, ETH_ALEN); - dev->tx_queue_len = MVNETA_MAX_TXD; dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvneta_netdev_ops; @@ -2740,6 +2750,21 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk); + dt_mac_addr = of_get_mac_address(dn); + if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { + mac_from = "device tree"; + memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); + } else { + mvneta_get_mac_addr(pp, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + mac_from = "hardware"; + memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); + } else { + mac_from = "random"; + eth_hw_addr_random(dev); + } + } + pp->tx_done_timer.data = (unsigned long)dev; pp->tx_ring_size = MVNETA_MAX_TXD; @@ -2772,7 +2797,8 @@ static int mvneta_probe(struct platform_device *pdev) goto err_deinit; } - netdev_info(dev, "mac: %pM\n", dev->dev_addr); + netdev_info(dev, "Using %s mac address %pM\n", mac_from, + dev->dev_addr); platform_set_drvdata(pdev, pp->dev); -- cgit v1.2.3 From e6309cff76a6adc0010609d45d34173e524a546d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 3 Jun 2013 07:54:55 +0000 Subject: net/mlx4: use one page fragment per incoming frame mlx4 driver has a suboptimal memory allocation strategy for regular MTU=1500 frames, as it uses two page fragments : One of 512 bytes and one of 1024 bytes. This makes GRO less effective, as each GSO packet contains 8 MSS instead of 16 MSS. Performance of a single TCP flow gains 25 % increase with the following patch. Before patch : A:~# netperf -H 192.168.0.2 -Cc MIGRATED TCP STREAM TEST ... Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % S us/KB us/KB 87380 16384 16384 10.00 13798.47 3.06 4.20 0.436 0.598 After patch : A:~# netperf -H 192.68.0.2 -Cc MIGRATED TCP STREAM TEST ... Recv Send Send Utilization Service Demand Socket Socket Message Elapsed Send Recv Send Recv Size Size Size Time Throughput local remote local remote bytes bytes bytes secs. 10^6bits/s % S % S us/KB us/KB 87380 16384 16384 10.00 17273.80 3.44 4.19 0.391 0.477 Signed-off-by: Eric Dumazet Cc: Amir Vadai Acked-By: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index b1d7657b2bf5..b1f51c1f635c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -98,11 +98,11 @@ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) #define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE) -/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU +/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU * and 4K allocations) */ enum { - FRAG_SZ0 = 512 - NET_IP_ALIGN, - FRAG_SZ1 = 1024, + FRAG_SZ0 = 1536 - NET_IP_ALIGN, + FRAG_SZ1 = 4096, FRAG_SZ2 = 4096, FRAG_SZ3 = MLX4_EN_ALLOC_SIZE }; -- cgit v1.2.3 From c08355fb61fb064d141b8bd156145d326f33022f Mon Sep 17 00:00:00 2001 From: "govindarajulu.v" Date: Mon, 3 Jun 2013 08:07:42 +0000 Subject: mlx4: use __netdev_pick_tx instead of __skb_tx_hash in mlx4_en_select_queue mlx4_en_select_queue() uses __skb_tx_hash to select the transmit queue. XPS settings are ignored by this. Instead, we can use __netdev_pick_tx to select the transmit queue. Compile test only. Signed-off-by: govindarajulu.v Acked-By: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4e6877a032a8..7c492382da09 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -544,7 +544,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) if (vlan_tx_tag_present(skb)) up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; - return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up; + return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) -- cgit v1.2.3 From 10e179e364beafc23d837e81cf98d99720f42551 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 3 Jun 2013 11:36:50 +0000 Subject: net: sun4i-emac: remove erroneous assignment The newly added sun4i-emac driver causes a build error when CONFIG_NET_POLL_CONTROLLER is set, because it attempts to assign a pointer to netdev->poll_controller, which has been replaced with ops->ndo_poll_controller in 2.6.31! The correct assignment is present as well, so we just need to remove the wrong one. Signed-off-by: Arnd Bergmann Cc: Stefan Roese Cc: Maxime Ripard Cc: Richard Genoud Acked-by: Stefan Roese Acked-by: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/allwinner/sun4i-emac.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 26083cdc25ab..0bb2f4af9952 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -869,10 +869,6 @@ static int emac_probe(struct platform_device *pdev) ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &emac_ethtool_ops; -#ifdef CONFIG_NET_POLL_CONTROLLER - ndev->poll_controller = &emac_poll_controller; -#endif - platform_set_drvdata(pdev, ndev); /* Carrier starts down, phylib will bring it up */ -- cgit v1.2.3 From 15dd16c233b6d5ceea3a00f18d7c42902d0ac6b6 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Mon, 3 Jun 2013 22:11:16 +0000 Subject: cxgb3: Correct comparisons and calculations using skb->tail and skb-transport_header This corrects an regression introduced by "net: Use 16bits for *_headers fields of struct skbuff" when NET_SKBUFF_DATA_USES_OFFSET is not set. In that case skb->tail will be a pointer whereas skb->transport_header will be an offset from head. This is corrected by using wrappers that ensure that comparisons and calculations are always made using pointers. Cc: Simon Horman Signed-off-by: Li RongQing Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 46d1efc17527..687ec4a8bb48 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -1583,7 +1583,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb) dui = (struct deferred_unmap_info *)skb->head; p = dui->addr; - if (skb->tail - skb->transport_header) + if (skb_tail_pointer(skb) - skb_transport_header(skb)) pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - skb_transport_header(skb), PCI_DMA_TODEVICE); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 2bfbb206b35a..ac311f5f3eb9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1294,7 +1294,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) flits = skb_transport_offset(skb) / 8U; /* headers */ cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) + if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits + sgl_len(cnt); } -- cgit v1.2.3 From 7a3e2585f2fd543ac7284a1f09641628f730f720 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Tue, 4 Jun 2013 00:03:27 +0000 Subject: net: emaclite: Use platform resource table Read data directly from platform recource table and do not use of_irq_to_resource(). Also use devm_request_and_ioremap() for probe functions simplification. Signed-off-by: Michal Simek Acked-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 67 +++++++++------------------ 1 file changed, 22 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index aaadf7e551df..c4d8aeffb4ba 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1074,13 +1074,14 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) * This function un maps the IO region of the Emaclite device and frees the net * device. */ -static void xemaclite_remove_ndev(struct net_device *ndev) +static void xemaclite_remove_ndev(struct net_device *ndev, + struct platform_device *pdev) { if (ndev) { struct net_local *lp = netdev_priv(ndev); if (lp->base_addr) - iounmap((void __iomem __force *) (lp->base_addr)); + devm_iounmap(&pdev->dev, lp->base_addr); free_netdev(ndev); } } @@ -1126,8 +1127,7 @@ static struct net_device_ops xemaclite_netdev_ops; */ static int xemaclite_of_probe(struct platform_device *ofdev) { - struct resource r_irq; /* Interrupt resources */ - struct resource r_mem; /* IO mem resources */ + struct resource *res; struct net_device *ndev = NULL; struct net_local *lp = NULL; struct device *dev = &ofdev->dev; @@ -1137,20 +1137,6 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_info(dev, "Device Tree Probing\n"); - /* Get iospace for the device */ - rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); - if (rc) { - dev_err(dev, "invalid address\n"); - return rc; - } - - /* Get IRQ for the device */ - rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); - if (!rc) { - dev_err(dev, "no IRQ found\n"); - return rc; - } - /* Create an ethernet device instance */ ndev = alloc_etherdev(sizeof(struct net_local)); if (!ndev) @@ -1159,29 +1145,25 @@ static int xemaclite_of_probe(struct platform_device *ofdev) dev_set_drvdata(dev, ndev); SET_NETDEV_DEV(ndev, &ofdev->dev); - ndev->irq = r_irq.start; - ndev->mem_start = r_mem.start; - ndev->mem_end = r_mem.end; - lp = netdev_priv(ndev); lp->ndev = ndev; - if (!request_mem_region(ndev->mem_start, - ndev->mem_end - ndev->mem_start + 1, - DRIVER_NAME)) { - dev_err(dev, "Couldn't lock memory region at %p\n", - (void *)ndev->mem_start); - rc = -EBUSY; - goto error2; + /* Get IRQ for the device */ + res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(dev, "no IRQ found\n"); + goto error; } - /* Get the virtual base address for the device */ - lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem)); - if (NULL == lp->base_addr) { - dev_err(dev, "EmacLite: Could not allocate iomem\n"); - rc = -EIO; - goto error1; - } + ndev->irq = res->start; + + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + lp->base_addr = devm_request_and_ioremap(&ofdev->dev, res); + if (!lp->base_addr) + goto error; + + ndev->mem_start = res->start; + ndev->mem_end = res->end; spin_lock_init(&lp->reset_lock); lp->next_tx_buf_to_use = 0x0; @@ -1219,7 +1201,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error1; + goto error; } dev_info(dev, @@ -1228,11 +1210,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned int __force)lp->base_addr, ndev->irq); return 0; -error1: - release_mem_region(ndev->mem_start, resource_size(&r_mem)); - -error2: - xemaclite_remove_ndev(ndev); +error: + xemaclite_remove_ndev(ndev, ofdev); return rc; } @@ -1267,9 +1246,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) of_node_put(lp->phy_node); lp->phy_node = NULL; - release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1); - - xemaclite_remove_ndev(ndev); + xemaclite_remove_ndev(ndev, of_dev); dev_set_drvdata(dev, NULL); return 0; -- cgit v1.2.3 From aff5c3557cf72fcad913d8b4b44b055a4e034bb5 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Tue, 4 Jun 2013 00:31:19 +0000 Subject: net: sun4i-emac: Remove redundant platform_set_drvdata() Commit 0998d06310 (device-core: Ensure drvdata = NULL when no driver is bound) removes the need to set driver data field to NULL. Signed-off-by: Sachin Kamat Cc: Stefan Roese Cc: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/allwinner/sun4i-emac.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 0bb2f4af9952..e9c8dbe4c88f 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -898,8 +898,6 @@ static int emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - unregister_netdev(ndev); free_netdev(ndev); -- cgit v1.2.3 From 11a164a04382d735230b01f4cc46ad78a7c4abf6 Mon Sep 17 00:00:00 2001 From: Sachin Kamat Date: Tue, 4 Jun 2013 00:31:20 +0000 Subject: net: sun4i-emac: Staticize local symbols Some symbols referenced only in this file are made static. Signed-off-by: Sachin Kamat Cc: Stefan Roese Cc: Maxime Ripard Signed-off-by: David S. Miller --- drivers/net/ethernet/allwinner/sun4i-emac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index e9c8dbe4c88f..50b853a79d77 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -258,7 +258,7 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_link = ethtool_op_get_link, }; -unsigned int emac_setup(struct net_device *ndev) +static unsigned int emac_setup(struct net_device *ndev) { struct emac_board_info *db = netdev_priv(ndev); unsigned int reg_val; @@ -310,7 +310,7 @@ unsigned int emac_setup(struct net_device *ndev) return 0; } -unsigned int emac_powerup(struct net_device *ndev) +static unsigned int emac_powerup(struct net_device *ndev) { struct emac_board_info *db = netdev_priv(ndev); unsigned int reg_val; -- cgit v1.2.3 From 41d5ffeb9d12f760722f00ba0756389d32435124 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:43:16 +0000 Subject: sh_eth: remove #ifdef around EDSR and GECMR bit definitions Remove #ifdef around 'enum EDSR_BIT' and 'enum GECMR_BIT', replacing it with the comments on which SoCs these registers exist. SH7757 also has EDSR, so add a comment about it to 'enum EDSR_BIT'. Signed-off-by: Nobuhiro Iwamatsu [Sergei: folded in the former patch #2, updated the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 1ddc9f235bcb..8daba1d70539 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -166,19 +166,16 @@ enum { /* * Register's bits */ -#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\ - defined(CONFIG_ARCH_R8A7740) -/* EDSR */ +/* EDSR : sh7734, sh7757, sh7763, and r8a7740 only */ enum EDSR_BIT { EDSR_ENT = 0x01, EDSR_ENR = 0x02, }; #define EDSR_ENALL (EDSR_ENT|EDSR_ENR) -/* GECMR */ +/* GECMR : sh7734, sh7763 and r8a7740 only */ enum GECMR_BIT { GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01, }; -#endif /* EDMR */ enum DMAC_M_BIT { -- cgit v1.2.3 From ddcd91c6b8ec60a6bd5d3691d2cfaf4447215da1 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:44:27 +0000 Subject: sh_eth: use EDSR_ENALL to set EDSR Use now always available EDSR_ENALL instead of the bare number to set EDSR. Signed-off-by: Nobuhiro Iwamatsu [Sergei: added the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 399a7c968e0d..8b7ff6a03699 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -523,7 +523,7 @@ static int sh_eth_reset(struct net_device *ndev) int ret = 0; if (sh_eth_is_gether(mdp)) { - sh_eth_write(ndev, 0x03, EDSR); + sh_eth_write(ndev, EDSR_ENALL, EDSR); sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); -- cgit v1.2.3 From 04b0ed2ad49834fe47d3c4171ad189c688a222b9 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:45:25 +0000 Subject: sh_eth: remove duplicate sh_eth_set_duplex() definitions Remove all the duplicate definitions of sh_eth_set_duplex() under different #ifdef's, leaving only one outside the #ifdef's. We have to annotate it with '__maybe_unused' since it's called not from all SoC #ifdef blocks. Signed-off-by: Nobuhiro Iwamatsu [Sergei: annotated sh_eth_set_duplex() as '__maybe_unused', added the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 59 +++-------------------------------- 1 file changed, 5 insertions(+), 54 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 8b7ff6a03699..856ea881b45c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -341,10 +341,7 @@ static void sh_eth_select_mii(struct net_device *ndev) } #endif -/* There is CPU dependent code */ -#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) -#define SH_ETH_RESET_DEFAULT 1 -static void sh_eth_set_duplex(struct net_device *ndev) +static void __maybe_unused sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -354,6 +351,9 @@ static void sh_eth_set_duplex(struct net_device *ndev) sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } +/* There is CPU dependent code */ +#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) +#define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -391,15 +391,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { }; #elif defined(CONFIG_CPU_SUBTYPE_SH7724) #define SH_ETH_RESET_DEFAULT 1 -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} static void sh_eth_set_rate(struct net_device *ndev) { @@ -443,16 +434,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { #define SH_ETH_HAS_TSU 1 static int sh_eth_check_reset(struct net_device *ndev); -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -552,16 +533,6 @@ out: return ret; } -static void sh_eth_set_duplex_giga(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - static void sh_eth_set_rate_giga(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -584,7 +555,7 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) /* SH7757(GETHERC) */ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .chip_reset = sh_eth_chip_reset_giga, - .set_duplex = sh_eth_set_duplex_giga, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga, .ecsr_value = ECSR_ICD | ECSR_MPD, @@ -634,16 +605,6 @@ static void sh_eth_chip_reset(struct net_device *ndev) mdelay(1); } -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -771,16 +732,6 @@ out: return ret; } -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); -- cgit v1.2.3 From 9f86134155047720a3685cda21467f68695152d2 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:48:09 +0000 Subject: sh_eth: remove SH_ETH_HAS_TSU Remove SH_ETH_HAS_TSU #define's and #ifdef's. Set three 'struct net_device_ops' methods that depend on the presence of TSU basing on the 'tsu' field of 'struct sh_eth_cpu_data'. Signed-off-by: Nobuhiro Iwamatsu [Sergei: made two method assignments one-liners, added the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 856ea881b45c..f8d60947fa72 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -431,7 +431,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { }; #elif defined(CONFIG_CPU_SUBTYPE_SH7757) #define SH_ETH_HAS_BOTH_MODULES 1 -#define SH_ETH_HAS_TSU 1 static int sh_eth_check_reset(struct net_device *ndev); static void sh_eth_set_rate(struct net_device *ndev) @@ -592,7 +591,6 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) } #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) -#define SH_ETH_HAS_TSU 1 static int sh_eth_check_reset(struct net_device *ndev); static void sh_eth_reset_hw_crc(struct net_device *ndev); @@ -693,7 +691,6 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev) } #elif defined(CONFIG_ARCH_R8A7740) -#define SH_ETH_HAS_TSU 1 static int sh_eth_check_reset(struct net_device *ndev); static void sh_eth_chip_reset(struct net_device *ndev) @@ -791,7 +788,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { }; #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) #define SH_ETH_RESET_DEFAULT 1 -#define SH_ETH_HAS_TSU 1 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, @@ -2105,7 +2101,6 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, return phy_mii_ioctl(phydev, rq, cmd); } -#if defined(SH_ETH_HAS_TSU) /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, int entry) @@ -2448,7 +2443,6 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, return 0; } -#endif /* SH_ETH_HAS_TSU */ /* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) @@ -2587,16 +2581,11 @@ static const u16 *sh_eth_get_register_offset(int register_type) return reg_offset; } -static const struct net_device_ops sh_eth_netdev_ops = { +static struct net_device_ops sh_eth_netdev_ops = { .ndo_open = sh_eth_open, .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, -#if defined(SH_ETH_HAS_TSU) - .ndo_set_rx_mode = sh_eth_set_multicast_list, - .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, -#endif .ndo_tx_timeout = sh_eth_tx_timeout, .ndo_do_ioctl = sh_eth_do_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -2677,6 +2666,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev) sh_eth_set_default_cpu_data(mdp->cd); /* set function */ + if (mdp->cd->tsu) { + sh_eth_netdev_ops.ndo_set_rx_mode = sh_eth_set_multicast_list; + sh_eth_netdev_ops.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid; + sh_eth_netdev_ops.ndo_vlan_rx_kill_vid = + sh_eth_vlan_rx_kill_vid; + } + ndev->netdev_ops = &sh_eth_netdev_ops; SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT; -- cgit v1.2.3 From 5b3dfd13ae8bdebea67c02612fe282baff850bb0 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:49:30 +0000 Subject: sh_eth: add IRQ flags to 'struct sh_eth_cpu_data' The driver supports some SH and SH-Mobile SOCs. There are SOCs with two or more Ethernet devices, for these we need to pass IRQF_SHARED to request_irq(). Add the 'irq_flags' field to the 'struct sh_eth_cpu_data' instead of #ifdef'fery. Signed-off-by: Nobuhiro Iwamatsu [Sergei: properly aligned request_irq() call continuation line, reworded the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 13 +++++-------- drivers/net/ethernet/renesas/sh_eth.h | 1 + 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f8d60947fa72..d0151d02416f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -462,6 +462,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, + .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@ -570,6 +571,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .fdr_value = 0x0000072f, .rmcr_value = 0x00000001, + .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@ -650,6 +652,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { #if defined(CONFIG_CPU_SUBTYPE_SH7734) .hw_crc = 1, .select_mii = 1, +#else + .irq_flags = IRQF_SHARED, #endif }; @@ -1908,14 +1912,7 @@ static int sh_eth_open(struct net_device *ndev) pm_runtime_get_sync(&mdp->pdev->dev); ret = request_irq(ndev->irq, sh_eth_interrupt, -#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ - defined(CONFIG_CPU_SUBTYPE_SH7757) - IRQF_SHARED, -#else - 0, -#endif - ndev->name, ndev); + mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); return ret; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 8daba1d70539..729e77e66f19 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -463,6 +463,7 @@ struct sh_eth_cpu_data { unsigned long tx_error_check; /* hardware features */ + unsigned long irq_flags; /* IRQ configuration flags */ unsigned no_psr:1; /* EtherC DO NOT have PSR */ unsigned apr:1; /* EtherC have APR */ unsigned mpr:1; /* EtherC have MPR */ -- cgit v1.2.3 From b7feacf1ee6944fc571802d58dcffaf13b4fb4cc Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:50:30 +0000 Subject: sh_eth: remove #ifdef around sh_eth_select_mii() We can simply remove #ifdef'fery around sh_eth_select_mii(). We have to annotate it with '__maybe_unused' then. Signed-off-by: Nobuhiro Iwamatsu [Sergei: added the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d0151d02416f..2234a945ae5b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -313,10 +313,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADRL31] = 0x01fc, }; -#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_ARCH_R8A7740) -static void sh_eth_select_mii(struct net_device *ndev) +static void __maybe_unused sh_eth_select_mii(struct net_device *ndev) { u32 value = 0x0; struct sh_eth_private *mdp = netdev_priv(ndev); @@ -339,7 +336,6 @@ static void sh_eth_select_mii(struct net_device *ndev) sh_eth_write(ndev, value, RMII_MII); } -#endif static void __maybe_unused sh_eth_set_duplex(struct net_device *ndev) { -- cgit v1.2.3 From dabdde9ea77d4a77094726c82468a3bd767fb29b Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:51:39 +0000 Subject: sh_eth: consolidate sh_eth_reset() This driver has sh_eth_reset() function for each SoC and this function is almost always the same, except for the several a bit different variations for Gigabit Ethernet. Consolidate every variation into a single function -- which allows us to get rid of some more #ifdef'fery. Signed-off-by: Nobuhiro Iwamatsu [Sergei: moved the new sh_eth_reset() and sh_eth_is_gether() up to decrease the patch size, fixed function call continuation lines' indentation, reworded the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 176 ++++++++++------------------------ 1 file changed, 51 insertions(+), 125 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 2234a945ae5b..d190c1e0b0c0 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -313,6 +313,14 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADRL31] = 0x01fc, }; +static int sh_eth_is_gether(struct sh_eth_private *mdp) +{ + if (mdp->reg_offset == sh_eth_offset_gigabit) + return 1; + else + return 0; +} + static void __maybe_unused sh_eth_select_mii(struct net_device *ndev) { u32 value = 0x0; @@ -349,7 +357,6 @@ static void __maybe_unused sh_eth_set_duplex(struct net_device *ndev) /* There is CPU dependent code */ #if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) -#define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -386,7 +393,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .hw_swap = 1, }; #elif defined(CONFIG_CPU_SUBTYPE_SH7724) -#define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_rate(struct net_device *ndev) { @@ -427,7 +433,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { }; #elif defined(CONFIG_CPU_SUBTYPE_SH7757) #define SH_ETH_HAS_BOTH_MODULES 1 -static int sh_eth_check_reset(struct net_device *ndev); static void sh_eth_set_rate(struct net_device *ndev) { @@ -493,42 +498,6 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) } } -static int sh_eth_is_gether(struct sh_eth_private *mdp); -static int sh_eth_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - int ret = 0; - - if (sh_eth_is_gether(mdp)) { - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); - } - -out: - return ret; -} - static void sh_eth_set_rate_giga(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -589,8 +558,6 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) } #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) -static int sh_eth_check_reset(struct net_device *ndev); -static void sh_eth_reset_hw_crc(struct net_device *ndev); static void sh_eth_chip_reset(struct net_device *ndev) { @@ -653,45 +620,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { #endif }; -static int sh_eth_reset(struct net_device *ndev) -{ - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - - /* Reset HW CRC register */ - sh_eth_reset_hw_crc(ndev); - - /* Select MII mode */ - if (sh_eth_my_cpu_data.select_mii) - sh_eth_select_mii(ndev); -out: - return ret; -} - -static void sh_eth_reset_hw_crc(struct net_device *ndev) -{ - if (sh_eth_my_cpu_data.hw_crc) - sh_eth_write(ndev, 0x0, CSMR); -} #elif defined(CONFIG_ARCH_R8A7740) -static int sh_eth_check_reset(struct net_device *ndev); static void sh_eth_chip_reset(struct net_device *ndev) { @@ -704,31 +634,6 @@ static void sh_eth_chip_reset(struct net_device *ndev) sh_eth_select_mii(ndev); } -static int sh_eth_reset(struct net_device *ndev) -{ - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - -out: - return ret; -} - static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -777,7 +682,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { }; #elif defined(CONFIG_CPU_SUBTYPE_SH7619) -#define SH_ETH_RESET_DEFAULT 1 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -787,7 +691,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .hw_swap = 1, }; #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -#define SH_ETH_RESET_DEFAULT 1 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, @@ -822,17 +725,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; } -#if defined(SH_ETH_RESET_DEFAULT) -/* Chip Reset */ -static int sh_eth_reset(struct net_device *ndev) -{ - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); - - return 0; -} -#else static int sh_eth_check_reset(struct net_device *ndev) { int ret = 0; @@ -850,7 +742,49 @@ static int sh_eth_check_reset(struct net_device *ndev) } return ret; } -#endif + +static int sh_eth_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (sh_eth_is_gether(mdp)) { + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + + ret = sh_eth_check_reset(ndev); + if (ret) + goto out; + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_crc) + sh_eth_write(ndev, 0x0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } + +out: + return ret; +} #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) static void sh_eth_set_receive_align(struct sk_buff *skb) @@ -926,14 +860,6 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac) } } -static int sh_eth_is_gether(struct sh_eth_private *mdp) -{ - if (mdp->reg_offset == sh_eth_offset_gigabit) - return 1; - else - return 0; -} - static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) { if (sh_eth_is_gether(mdp)) -- cgit v1.2.3 From 540ad1b888ad9564520c1c8c48ad675f76ffce62 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Thu, 6 Jun 2013 09:52:37 +0000 Subject: sh_eth: enclose PM code into #ifdef CONFIG_PM Put '#ifdef CONFIG_PM' around sh_eth_runtime_nop() and 'sh_eth_dev_pm_ops'. Add '#define SH_ETH_PM_OPS' to facilitate initialization of driver's 'pm' field depending on whether CONFIG_PM is enabled. Signed-off-by: Nobuhiro Iwamatsu [Sergei: added the changelog, reworded the subject, changing the prefix.] Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d190c1e0b0c0..0699b1ec7e32 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2673,6 +2673,7 @@ static int sh_eth_drv_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int sh_eth_runtime_nop(struct device *dev) { /* @@ -2686,17 +2687,21 @@ static int sh_eth_runtime_nop(struct device *dev) return 0; } -static struct dev_pm_ops sh_eth_dev_pm_ops = { +static const struct dev_pm_ops sh_eth_dev_pm_ops = { .runtime_suspend = sh_eth_runtime_nop, .runtime_resume = sh_eth_runtime_nop, }; +#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) +#else +#define SH_ETH_PM_OPS NULL +#endif static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, .remove = sh_eth_drv_remove, .driver = { .name = CARDNAME, - .pm = &sh_eth_dev_pm_ops, + .pm = SH_ETH_PM_OPS, }, }; -- cgit v1.2.3 From 1b44791ab4ed27e6fa69f5dfa81b0fd48b1d050d Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Tue, 4 Jun 2013 21:57:11 +0000 Subject: net/macb: increase RX buffer size for GEM Macb Ethernet controller requires a RX buffer of 128 bytes. It is highly sub-optimal for Gigabit-capable GEM that is able to use a bigger DMA buffer. Change this constant and associated macros with data stored in the private structure. RX DMA buffer size has to be multiple of 64 bytes as indicated in DMA Configuration Register specification. Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 45 ++++++++++++++++++++++++++++++------- drivers/net/ethernet/cadence/macb.h | 1 + 2 files changed, 38 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 4465e27ba9d1..aab7bb22b1d0 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -32,7 +32,9 @@ #include "macb.h" -#define RX_BUFFER_SIZE 128 +#define MACB_RX_BUFFER_SIZE 128 +#define GEM_RX_BUFFER_SIZE 2048 +#define RX_BUFFER_MULTIPLE 64 /* bytes */ #define RX_RING_SIZE 512 /* must be power of 2 */ #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) @@ -92,7 +94,7 @@ static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) static void *macb_rx_buffer(struct macb *bp, unsigned int index) { - return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index); + return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); } void macb_set_hwaddr(struct macb *bp) @@ -575,7 +577,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, skb_put(skb, len); for (frag = first_frag; ; frag++) { - unsigned int frag_len = RX_BUFFER_SIZE; + unsigned int frag_len = bp->rx_buffer_size; if (offset + frag_len > len) { BUG_ON(frag != last_frag); @@ -583,7 +585,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, } skb_copy_to_linear_data_offset(skb, offset, macb_rx_buffer(bp, frag), frag_len); - offset += RX_BUFFER_SIZE; + offset += bp->rx_buffer_size; desc = macb_rx_desc(bp, frag); desc->addr &= ~MACB_BIT(RX_USED); @@ -870,6 +872,30 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +static void macb_init_rx_buffer_size(struct macb *bp) +{ + if (!macb_is_gem(bp)) { + bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; + } else { + bp->rx_buffer_size = GEM_RX_BUFFER_SIZE; + + if (bp->rx_buffer_size > PAGE_SIZE) { + netdev_warn(bp->dev, + "RX buffer cannot be bigger than PAGE_SIZE, shrinking\n"); + bp->rx_buffer_size = PAGE_SIZE; + } + if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { + netdev_warn(bp->dev, + "RX buffer must be multiple of %d bytes, shrinking\n", + RX_BUFFER_MULTIPLE); + bp->rx_buffer_size = + rounddown(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); + } + bp->rx_buffer_size = max(RX_BUFFER_MULTIPLE, GEM_RX_BUFFER_SIZE); + } +} + + static void macb_free_consistent(struct macb *bp) { if (bp->tx_skb) { @@ -888,7 +914,7 @@ static void macb_free_consistent(struct macb *bp) } if (bp->rx_buffers) { dma_free_coherent(&bp->pdev->dev, - RX_RING_SIZE * RX_BUFFER_SIZE, + RX_RING_SIZE * bp->rx_buffer_size, bp->rx_buffers, bp->rx_buffers_dma); bp->rx_buffers = NULL; } @@ -921,7 +947,7 @@ static int macb_alloc_consistent(struct macb *bp) "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); - size = RX_RING_SIZE * RX_BUFFER_SIZE; + size = RX_RING_SIZE * bp->rx_buffer_size; bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) @@ -946,7 +972,7 @@ static void macb_init_rings(struct macb *bp) for (i = 0; i < RX_RING_SIZE; i++) { bp->rx_ring[i].addr = addr; bp->rx_ring[i].ctrl = 0; - addr += RX_BUFFER_SIZE; + addr += bp->rx_buffer_size; } bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); @@ -1056,7 +1082,7 @@ static void macb_configure_dma(struct macb *bp) if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); - dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); + dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); dmacfg |= GEM_BF(FBLDO, 16); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); dmacfg &= ~GEM_BIT(ENDIA); @@ -1244,6 +1270,9 @@ static int macb_open(struct net_device *dev) if (!bp->phy_dev) return -EAGAIN; + /* RX buffers initialization */ + macb_init_rx_buffer_size(bp); + err = macb_alloc_consistent(bp); if (err) { netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 548c0ecae869..9b5e19f8b61d 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -551,6 +551,7 @@ struct macb { unsigned int rx_tail; struct macb_dma_desc *rx_ring; void *rx_buffers; + size_t rx_buffer_size; unsigned int tx_head, tx_tail; struct macb_dma_desc *tx_ring; -- cgit v1.2.3 From 4df95131ea803bcb94f472d465c73ed57015c470 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Tue, 4 Jun 2013 21:57:12 +0000 Subject: net/macb: change RX path for GEM GEM is able to adapt its DMA buffer size, so change the RX path to take advantage of this possibility and remove all kind of memcpy in this path. This modification introduces function pointers for managing differences between MACB and GEM adapter type. Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 308 ++++++++++++++++++++++++++++++------ drivers/net/ethernet/cadence/macb.h | 13 ++ 2 files changed, 272 insertions(+), 49 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index aab7bb22b1d0..f7e21f278e05 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -33,7 +33,6 @@ #include "macb.h" #define MACB_RX_BUFFER_SIZE 128 -#define GEM_RX_BUFFER_SIZE 2048 #define RX_BUFFER_MULTIPLE 64 /* bytes */ #define RX_RING_SIZE 512 /* must be power of 2 */ #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) @@ -530,6 +529,155 @@ static void macb_tx_interrupt(struct macb *bp) netif_wake_queue(bp->dev); } +static void gem_rx_refill(struct macb *bp) +{ + unsigned int entry; + struct sk_buff *skb; + struct macb_dma_desc *desc; + dma_addr_t paddr; + + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + u32 addr, ctrl; + + entry = macb_rx_ring_wrap(bp->rx_prepared_head); + desc = &bp->rx_ring[entry]; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + bp->rx_prepared_head++; + + if ((addr & MACB_BIT(RX_USED))) + continue; + + if (bp->rx_skbuff[entry] == NULL) { + /* allocate sk_buff for this free entry in ring */ + skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); + if (unlikely(skb == NULL)) { + netdev_err(bp->dev, + "Unable to allocate sk_buff\n"); + break; + } + bp->rx_skbuff[entry] = skb; + + /* now fill corresponding descriptor entry */ + paddr = dma_map_single(&bp->pdev->dev, skb->data, + bp->rx_buffer_size, DMA_FROM_DEVICE); + + if (entry == RX_RING_SIZE - 1) + paddr |= MACB_BIT(RX_WRAP); + bp->rx_ring[entry].addr = paddr; + bp->rx_ring[entry].ctrl = 0; + + /* properly align Ethernet header */ + skb_reserve(skb, NET_IP_ALIGN); + } + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", + bp->rx_prepared_head, bp->rx_tail); +} + +/* Mark DMA descriptors from begin up to and not including end as unused */ +static void discard_partial_frame(struct macb *bp, unsigned int begin, + unsigned int end) +{ + unsigned int frag; + + for (frag = begin; frag != end; frag++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + /* + * When this happens, the hardware stats registers for + * whatever caused this is updated, so we don't have to record + * anything. + */ +} + +static int gem_rx(struct macb *bp, int budget) +{ + unsigned int len; + unsigned int entry; + struct sk_buff *skb; + struct macb_dma_desc *desc; + int count = 0; + + while (count < budget) { + u32 addr, ctrl; + + entry = macb_rx_ring_wrap(bp->rx_tail); + desc = &bp->rx_ring[entry]; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + + if (!(addr & MACB_BIT(RX_USED))) + break; + + desc->addr &= ~MACB_BIT(RX_USED); + bp->rx_tail++; + count++; + + if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { + netdev_err(bp->dev, + "not whole frame pointed by descriptor\n"); + bp->stats.rx_dropped++; + break; + } + skb = bp->rx_skbuff[entry]; + if (unlikely(!skb)) { + netdev_err(bp->dev, + "inconsistent Rx descriptor chain\n"); + bp->stats.rx_dropped++; + break; + } + /* now everything is ready for receiving packet */ + bp->rx_skbuff[entry] = NULL; + len = MACB_BFEXT(RX_FRMLEN, ctrl); + + netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); + + skb_put(skb, len); + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); + dma_unmap_single(&bp->pdev->dev, addr, + len, DMA_FROM_DEVICE); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb_checksum_none_assert(skb); + + bp->stats.rx_packets++; + bp->stats.rx_bytes += skb->len; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", + skb->len, skb->csum); + print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->mac_header, 16, true); + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->data, 32, true); +#endif + + netif_receive_skb(skb); + } + + gem_rx_refill(bp); + + return count; +} + static int macb_rx_frame(struct macb *bp, unsigned int first_frag, unsigned int last_frag) { @@ -608,27 +756,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, return 0; } -/* Mark DMA descriptors from begin up to and not including end as unused */ -static void discard_partial_frame(struct macb *bp, unsigned int begin, - unsigned int end) -{ - unsigned int frag; - - for (frag = begin; frag != end; frag++) { - struct macb_dma_desc *desc = macb_rx_desc(bp, frag); - desc->addr &= ~MACB_BIT(RX_USED); - } - - /* Make descriptor updates visible to hardware */ - wmb(); - - /* - * When this happens, the hardware stats registers for - * whatever caused this is updated, so we don't have to record - * anything. - */ -} - static int macb_rx(struct macb *bp, int budget) { int received = 0; @@ -689,7 +816,7 @@ static int macb_poll(struct napi_struct *napi, int budget) netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); - work_done = macb_rx(bp, budget); + work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { napi_complete(napi); @@ -872,29 +999,63 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static void macb_init_rx_buffer_size(struct macb *bp) +static void macb_init_rx_buffer_size(struct macb *bp, size_t size) { if (!macb_is_gem(bp)) { bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; } else { - bp->rx_buffer_size = GEM_RX_BUFFER_SIZE; + bp->rx_buffer_size = size; - if (bp->rx_buffer_size > PAGE_SIZE) { - netdev_warn(bp->dev, - "RX buffer cannot be bigger than PAGE_SIZE, shrinking\n"); - bp->rx_buffer_size = PAGE_SIZE; - } if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { - netdev_warn(bp->dev, - "RX buffer must be multiple of %d bytes, shrinking\n", + netdev_dbg(bp->dev, + "RX buffer must be multiple of %d bytes, expanding\n", RX_BUFFER_MULTIPLE); bp->rx_buffer_size = - rounddown(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); + roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); } - bp->rx_buffer_size = max(RX_BUFFER_MULTIPLE, GEM_RX_BUFFER_SIZE); } + + netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", + bp->dev->mtu, bp->rx_buffer_size); } +static void gem_free_rx_buffers(struct macb *bp) +{ + struct sk_buff *skb; + struct macb_dma_desc *desc; + dma_addr_t addr; + int i; + + if (!bp->rx_skbuff) + return; + + for (i = 0; i < RX_RING_SIZE; i++) { + skb = bp->rx_skbuff[i]; + + if (skb == NULL) + continue; + + desc = &bp->rx_ring[i]; + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); + dma_unmap_single(&bp->pdev->dev, addr, skb->len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = NULL; + } + + kfree(bp->rx_skbuff); + bp->rx_skbuff = NULL; +} + +static void macb_free_rx_buffers(struct macb *bp) +{ + if (bp->rx_buffers) { + dma_free_coherent(&bp->pdev->dev, + RX_RING_SIZE * bp->rx_buffer_size, + bp->rx_buffers, bp->rx_buffers_dma); + bp->rx_buffers = NULL; + } +} static void macb_free_consistent(struct macb *bp) { @@ -902,6 +1063,7 @@ static void macb_free_consistent(struct macb *bp) kfree(bp->tx_skb); bp->tx_skb = NULL; } + bp->macbgem_ops.mog_free_rx_buffers(bp); if (bp->rx_ring) { dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, bp->rx_ring, bp->rx_ring_dma); @@ -912,12 +1074,37 @@ static void macb_free_consistent(struct macb *bp) bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; } - if (bp->rx_buffers) { - dma_free_coherent(&bp->pdev->dev, - RX_RING_SIZE * bp->rx_buffer_size, - bp->rx_buffers, bp->rx_buffers_dma); - bp->rx_buffers = NULL; - } +} + +static int gem_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * sizeof(struct sk_buff *); + bp->rx_skbuff = kzalloc(size, GFP_KERNEL); + if (!bp->rx_skbuff) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); + return 0; +} + +static int macb_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * bp->rx_buffer_size; + bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->rx_buffers_dma, GFP_KERNEL); + if (!bp->rx_buffers) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + return 0; } static int macb_alloc_consistent(struct macb *bp) @@ -947,14 +1134,8 @@ static int macb_alloc_consistent(struct macb *bp) "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); - size = RX_RING_SIZE * bp->rx_buffer_size; - bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, - &bp->rx_buffers_dma, GFP_KERNEL); - if (!bp->rx_buffers) + if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; - netdev_dbg(bp->dev, - "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); return 0; @@ -963,6 +1144,21 @@ out_err: return -ENOMEM; } +static void gem_init_rings(struct macb *bp) +{ + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + bp->tx_ring[i].addr = 0; + bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); + } + bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + + bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; + + gem_rx_refill(bp); +} + static void macb_init_rings(struct macb *bp) { int i; @@ -1259,6 +1455,7 @@ EXPORT_SYMBOL_GPL(macb_set_rx_mode); static int macb_open(struct net_device *dev) { struct macb *bp = netdev_priv(dev); + size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; int err; netdev_dbg(bp->dev, "open\n"); @@ -1271,7 +1468,7 @@ static int macb_open(struct net_device *dev) return -EAGAIN; /* RX buffers initialization */ - macb_init_rx_buffer_size(bp); + macb_init_rx_buffer_size(bp, bufsz); err = macb_alloc_consistent(bp); if (err) { @@ -1282,7 +1479,7 @@ static int macb_open(struct net_device *dev) napi_enable(&bp->napi); - macb_init_rings(bp); + bp->macbgem_ops.mog_init_rings(bp); macb_init_hw(bp); /* schedule a link state check */ @@ -1601,6 +1798,19 @@ static int __init macb_probe(struct platform_device *pdev) dev->base_addr = regs->start; + /* setup appropriated routines according to adapter type */ + if (macb_is_gem(bp)) { + bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = gem_init_rings; + bp->macbgem_ops.mog_rx = gem_rx; + } else { + bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = macb_init_rings; + bp->macbgem_ops.mog_rx = macb_rx; + } + /* Set MII management clock divider */ config = macb_mdc_clk_div(bp); config |= macb_dbw(bp); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 9b5e19f8b61d..f4076155bed7 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -545,11 +545,22 @@ struct gem_stats { u32 rx_udp_checksum_errors; }; +struct macb; + +struct macb_or_gem_ops { + int (*mog_alloc_rx_buffers)(struct macb *bp); + void (*mog_free_rx_buffers)(struct macb *bp); + void (*mog_init_rings)(struct macb *bp); + int (*mog_rx)(struct macb *bp, int budget); +}; + struct macb { void __iomem *regs; unsigned int rx_tail; + unsigned int rx_prepared_head; struct macb_dma_desc *rx_ring; + struct sk_buff **rx_skbuff; void *rx_buffers; size_t rx_buffer_size; @@ -574,6 +585,8 @@ struct macb { dma_addr_t tx_ring_dma; dma_addr_t rx_buffers_dma; + struct macb_or_gem_ops macbgem_ops; + struct mii_bus *mii_bus; struct phy_device *phy_dev; unsigned int link; -- cgit v1.2.3 From ca162a82f56921442f5db72a3a472010e5a62c4b Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 7 Jun 2013 10:48:00 +0000 Subject: fec: Only pass pdev in fec_ptp_init() Passing pdev in fec_ptp_init() is enough, since we can get ndev locally. Signed-off-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 2 +- drivers/net/ethernet/freescale/fec_main.c | 2 +- drivers/net/ethernet/freescale/fec_ptp.c | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index b11cdbcc04ff..e3ed6c5ae801 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -275,7 +275,7 @@ struct fec_enet_private { struct regulator *reg_phy; }; -void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); +void fec_ptp_init(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c42c42a0cc57..fa4a68fc01cb 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1945,7 +1945,7 @@ fec_probe(struct platform_device *pdev) fec_reset_phy(pdev); if (fep->bufdesc_ex) - fec_ptp_init(ndev, pdev); + fec_ptp_init(pdev); ret = fec_enet_init(ndev); if (ret) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 25fc960cbf0e..5007e4f9fff9 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -347,8 +347,9 @@ static void fec_time_keep(unsigned long _data) * cyclecounter init routine and exits. */ -void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) +void fec_ptp_init(struct platform_device *pdev) { + struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); fep->ptp_caps.owner = THIS_MODULE; -- cgit v1.2.3 From afe391ad4b05f8f00ca955e165f3b37b480506b7 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 13:54:02 +0000 Subject: sh_eth: create initial ID table We are trying to get away from the current driver's scheme of identifying a SoC based on #ifdef's and the platform device ID table matching seems to be a good replacement -- we can use the 'driver_data' field of 'struct platform_device_id' as a pointer to a 'struct sh_eth_cpu_data'. Start by creating the initial table with driver's name as the only entry without the driver data. Check the driver data in the probe() method and if it's not NULL override 'mdp->cd' from it. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 0699b1ec7e32..cf062de40b65 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2519,6 +2519,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd = pdev->dev.platform_data; + const struct platform_device_id *id = platform_get_device_id(pdev); /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2582,6 +2583,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) #else mdp->cd = &sh_eth_my_cpu_data; #endif + if (id->driver_data) + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; sh_eth_set_default_cpu_data(mdp->cd); /* set function */ @@ -2696,9 +2699,16 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { #define SH_ETH_PM_OPS NULL #endif +static struct platform_device_id sh_eth_id_table[] = { + { CARDNAME }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_eth_id_table); + static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, .pm = SH_ETH_PM_OPS, -- cgit v1.2.3 From 7bbe150d8c57c59689c8c50fd7b9915f4a7e10da Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 13:55:08 +0000 Subject: sh_eth: get SH771x support out of #ifdef Get the SH771[02] data in the driver out of #ifdef by adding "sh771x-ether" to the platform driver's ID table. Change the Ether platform device's name in the SH platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/sh/boards/mach-se/770x/setup.c | 8 ++++---- drivers/net/ethernet/renesas/sh_eth.c | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c index 9759d6ba7ffb..658326f44df8 100644 --- a/arch/sh/boards/mach-se/770x/setup.c +++ b/arch/sh/boards/mach-se/770x/setup.c @@ -128,8 +128,8 @@ static struct resource sh_eth0_resources[] = { }; static struct platform_device sh_eth0_device = { - .name = "sh-eth", - .id = 0, + .name = "sh771x-ether", + .id = 0, .dev = { .platform_data = PHY_ID, }, @@ -151,8 +151,8 @@ static struct resource sh_eth1_resources[] = { }; static struct platform_device sh_eth1_device = { - .name = "sh-eth", - .id = 1, + .name = "sh771x-ether", + .id = 1, .dev = { .platform_data = PHY_ID, }, diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index cf062de40b65..f26546aa1792 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -690,12 +690,12 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tpauser = 1, .hw_swap = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +#endif + +static struct sh_eth_cpu_data sh771x_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, }; -#endif static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) { @@ -2700,6 +2700,7 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { #endif static struct platform_device_id sh_eth_id_table[] = { + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, { CARDNAME }, { } }; -- cgit v1.2.3 From c18a79abe31f555ec3b363b5b8c1d003230053b6 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 13:56:05 +0000 Subject: sh_eth: get SH7619 support out of #ifdef Get the SH7619 data in the driver out of #ifdef by adding "sh7619-ether" to the platform driver's ID table. Change the Ether platform device's name in the SH platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/sh/kernel/cpu/sh2/setup-sh7619.c | 4 ++-- drivers/net/ethernet/renesas/sh_eth.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index e0b740c831c7..bb11e1925178 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -124,8 +124,8 @@ static struct resource eth_resources[] = { }; static struct platform_device eth_device = { - .name = "sh-eth", - .id = -1, + .name = "sh7619-ether", + .id = -1, .dev = { .platform_data = (void *)1, }, diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f26546aa1792..e0568a9352e5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -680,9 +680,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tsu = 1, .select_mii = 1, }; +#endif -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data sh7619_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .apr = 1, @@ -690,7 +690,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tpauser = 1, .hw_swap = 1, }; -#endif static struct sh_eth_cpu_data sh771x_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -2700,6 +2699,7 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { #endif static struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, { CARDNAME }, { } -- cgit v1.2.3 From e5c9b4cd665106d9b5397114ea81a53059410b6a Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 13:57:12 +0000 Subject: sh_eth: get R8A7740 support out of #ifdef Get the R8A7740 code/data in the driver out of #ifdef by adding "r8a7740-gether" to the platform driver's ID table. Change the GEther platform device's name in the ARM platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/arm/mach-shmobile/board-armadillo800eva.c | 2 +- arch/arm/mach-shmobile/clock-r8a7740.c | 2 +- drivers/net/ethernet/renesas/sh_eth.c | 16 +++++++--------- 3 files changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index b85b2882dbd0..2b04c8011e13 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -377,7 +377,7 @@ static struct resource sh_eth_resources[] = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", + .name = "r8a7740-gether", .id = -1, .dev = { .platform_data = &sh_eth_platdata, diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index c0d39aa6de50..ae93f94d15bd 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -591,7 +591,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("e6860000.sdhi", &mstp_clks[MSTP313]), CLKDEV_DEV_ID("sh_mmcif", &mstp_clks[MSTP312]), CLKDEV_DEV_ID("e6bd0000.mmcif", &mstp_clks[MSTP312]), - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP309]), + CLKDEV_DEV_ID("r8a7740-gether", &mstp_clks[MSTP309]), CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]), CLKDEV_DEV_ID("e6870000.sdhi", &mstp_clks[MSTP415]), diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e0568a9352e5..da620ecd4151 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -619,11 +619,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .irq_flags = IRQF_SHARED, #endif }; +#endif - -#elif defined(CONFIG_ARCH_R8A7740) - -static void sh_eth_chip_reset(struct net_device *ndev) +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -634,7 +632,7 @@ static void sh_eth_chip_reset(struct net_device *ndev) sh_eth_select_mii(ndev); } -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_gether(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -654,10 +652,10 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* R8A7740 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .chip_reset = sh_eth_chip_reset, +static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@ -680,7 +678,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tsu = 1, .select_mii = 1, }; -#endif static struct sh_eth_cpu_data sh7619_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, @@ -2701,6 +2698,7 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { static struct platform_device_id sh_eth_id_table[] = { { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, { CARDNAME }, { } }; -- cgit v1.2.3 From f5d12767c8fd77e29d3d6771de59fd9ac3e540bb Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 13:58:18 +0000 Subject: sh_eth: get SH77{34|63} support out of #ifdef Get the SH77{34|63} specific code/data in the driver out of #ifdef by adding "sh7734-gether" and "sh7763-gether" to the platform driver's ID table. Note that we have to split the 'struct sh_eth_cpu_data' instance into two due to #ifdef inside it; note that we can kill the duplicate sh_eth_set_rate_gether(). Change the GEther platform device's name in the SH platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/sh/boards/board-espt.c | 2 +- arch/sh/boards/mach-sh7763rdp/setup.c | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7734.c | 2 +- drivers/net/ethernet/renesas/sh_eth.c | 67 ++++++++++++++++++---------------- 4 files changed, 39 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c index d71a0bcf8145..4d94dff9015c 100644 --- a/arch/sh/boards/board-espt.c +++ b/arch/sh/boards/board-espt.c @@ -85,7 +85,7 @@ static struct sh_eth_plat_data sh7763_eth_pdata = { }; static struct platform_device espt_eth_device = { - .name = "sh-eth", + .name = "sh7763-gether", .resource = sh_eth_resources, .num_resources = ARRAY_SIZE(sh_eth_resources), .dev = { diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index b7c75298dfb5..50ba481fa240 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c @@ -93,7 +93,7 @@ static struct sh_eth_plat_data sh7763_eth_pdata = { }; static struct platform_device sh7763rdp_eth_device = { - .name = "sh-eth", + .name = "sh7763-gether", .resource = sh_eth_resources, .num_resources = ARRAY_SIZE(sh_eth_resources), .dev = { diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c index deb683abacf0..ed9501519ab3 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c @@ -238,7 +238,7 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("adc0", &mstp_clks[MSTP313]), CLKDEV_CON_ID("mtu0", &mstp_clks[MSTP312]), CLKDEV_CON_ID("iebus0", &mstp_clks[MSTP304]), - CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[MSTP114]), + CLKDEV_DEV_ID("sh7734-gether.0", &mstp_clks[MSTP114]), CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP303]), CLKDEV_CON_ID("hif0", &mstp_clks[MSTP302]), CLKDEV_CON_ID("stif0", &mstp_clks[MSTP301]), diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index da620ecd4151..6d44a43e5884 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -556,8 +556,7 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) else return &sh_eth_my_cpu_data; } - -#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) +#endif static void sh_eth_chip_reset(struct net_device *ndev) { @@ -568,7 +567,7 @@ static void sh_eth_chip_reset(struct net_device *ndev) mdelay(1); } -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_gether(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -587,11 +586,40 @@ static void sh_eth_set_rate(struct net_device *ndev) } } -/* sh7763 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +/* SH7734 */ +static struct sh_eth_cpu_data sh7734_data = { .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .hw_crc = 1, + .select_mii = 1, +}; + +/* SH7763 */ +static struct sh_eth_cpu_data sh7763_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@ -612,14 +640,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .no_trimd = 1, .no_ade = 1, .tsu = 1, -#if defined(CONFIG_CPU_SUBTYPE_SH7734) - .hw_crc = 1, - .select_mii = 1, -#else .irq_flags = IRQF_SHARED, -#endif }; -#endif static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { @@ -632,25 +654,6 @@ static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) sh_eth_select_mii(ndev); } -static void sh_eth_set_rate_gether(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - switch (mdp->speed) { - case 10: /* 10BASE */ - sh_eth_write(ndev, GECMR_10, GECMR); - break; - case 100:/* 100BASE */ - sh_eth_write(ndev, GECMR_100, GECMR); - break; - case 1000: /* 1000BASE */ - sh_eth_write(ndev, GECMR_1000, GECMR); - break; - default: - break; - } -} - /* R8A7740 */ static struct sh_eth_cpu_data r8a7740_data = { .chip_reset = sh_eth_chip_reset_r8a7740, @@ -2698,6 +2701,8 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { static struct platform_device_id sh_eth_id_table[] = { { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, { CARDNAME }, { } -- cgit v1.2.3 From 24549e2a0f33628b5160eac16c6aebf1cfaf22f1 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 13:59:21 +0000 Subject: sh_eth: get SH7757 support out of #ifdef Get the SH7757 code/data in the driver out of #ifdef by adding "sh7757-ether" and "sh7757-gether" to the platform driver's ID table. Note that we can remove SH_ETH_HAS_BOTH_MODULES and sh_eth_get_cpu_data(). Change the Ether/GEther platform devices' names in the SH platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/sh/boards/board-sh7757lcr.c | 8 ++++---- drivers/net/ethernet/renesas/sh_eth.c | 28 ++++++++-------------------- 2 files changed, 12 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 41f86702eb9f..4f114d1cd019 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c @@ -82,7 +82,7 @@ static struct sh_eth_plat_data sh7757_eth0_pdata = { }; static struct platform_device sh7757_eth0_device = { - .name = "sh-eth", + .name = "sh7757-ether", .resource = sh_eth0_resources, .id = 0, .num_resources = ARRAY_SIZE(sh_eth0_resources), @@ -111,7 +111,7 @@ static struct sh_eth_plat_data sh7757_eth1_pdata = { }; static struct platform_device sh7757_eth1_device = { - .name = "sh-eth", + .name = "sh7757-ether", .resource = sh_eth1_resources, .id = 1, .num_resources = ARRAY_SIZE(sh_eth1_resources), @@ -157,7 +157,7 @@ static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { }; static struct platform_device sh7757_eth_giga0_device = { - .name = "sh-eth", + .name = "sh7757-gether", .resource = sh_eth_giga0_resources, .id = 2, .num_resources = ARRAY_SIZE(sh_eth_giga0_resources), @@ -192,7 +192,7 @@ static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { }; static struct platform_device sh7757_eth_giga1_device = { - .name = "sh-eth", + .name = "sh7757-gether", .resource = sh_eth_giga1_resources, .id = 3, .num_resources = ARRAY_SIZE(sh_eth_giga1_resources), diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6d44a43e5884..9860fc028f3d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -431,10 +431,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rpadir = 1, .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) -#define SH_ETH_HAS_BOTH_MODULES 1 +#endif -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_sh7757(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -451,9 +450,9 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* SH7757 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, +static struct sh_eth_cpu_data sh7757_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .rmcr_value = 0x00000001, @@ -518,7 +517,7 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) } /* SH7757(GETHERC) */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { +static struct sh_eth_cpu_data sh7757_data_giga = { .chip_reset = sh_eth_chip_reset_giga, .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga, @@ -549,15 +548,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .tsu = 1, }; -static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) -{ - if (sh_eth_is_gether(mdp)) - return &sh_eth_my_cpu_data_giga; - else - return &sh_eth_my_cpu_data; -} -#endif - static void sh_eth_chip_reset(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -2577,11 +2567,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); /* set cpu data */ -#if defined(SH_ETH_HAS_BOTH_MODULES) - mdp->cd = sh_eth_get_cpu_data(mdp); -#else mdp->cd = &sh_eth_my_cpu_data; -#endif if (id->driver_data) mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; sh_eth_set_default_cpu_data(mdp->cd); @@ -2702,6 +2688,8 @@ static struct platform_device_id sh_eth_id_table[] = { { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, { CARDNAME }, -- cgit v1.2.3 From 9c3beaabb951d672b1534c7f56f84054b088f879 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 14:03:37 +0000 Subject: sh_eth: get SH7724 support out of #ifdef Get the SH7724 code/data in the driver out of #ifdef by adding "r8a7724-ether" to the platform driver's ID table. Change the Ether platform device's name in the SH platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/sh/boards/mach-ecovec24/setup.c | 4 ++-- arch/sh/boards/mach-se/7724/setup.c | 4 ++-- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 2 +- drivers/net/ethernet/renesas/sh_eth.c | 10 +++++----- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 764530c85aa9..61fade0ffa96 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -165,8 +165,8 @@ static struct sh_eth_plat_data sh_eth_plat = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", - .id = 0, + .name = "sh7724-ether", + .id = 0, .dev = { .platform_data = &sh_eth_plat, }, diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 4010e63e82d8..b70180ef3e29 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -380,8 +380,8 @@ static struct sh_eth_plat_data sh_eth_plat = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", - .id = 0, + .name = "sh7724-ether", + .id = 0, .dev = { .platform_data = &sh_eth_plat, }, diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 5f30f805d2f2..0128af3399b7 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -329,7 +329,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]), CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]), CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]), - CLKDEV_DEV_ID("sh-eth.0", &mstp_clks[HWBLK_ETHER]), + CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]), CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 9860fc028f3d..143e22e1b329 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -392,9 +392,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tpauser = 1, .hw_swap = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7724) +#endif -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_sh7724(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -411,9 +411,9 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* SH7724 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data sh7724_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_sh7724, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@ -431,7 +431,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rpadir = 1, .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; -#endif static void sh_eth_set_rate_sh7757(struct net_device *ndev) { @@ -2687,6 +2686,7 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { static struct platform_device_id sh_eth_id_table[] = { { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, -- cgit v1.2.3 From 589ebdef7e3107401bf96a9c660753d397329ee9 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 14:05:59 +0000 Subject: sh_eth: get R8A777x support out of #ifdef Get the R-Car code/data in the driver out of #ifdef by adding "r8a777x-ether" to the platfrom driver's ID table; since it's the last #ifdef, we remove CARDNAME from the ID table and no longer check the driver data before assigning it to 'mdp->cd'... Change the Ether platform device's name in the ARM platform code accordingly. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- arch/arm/mach-shmobile/clock-r8a7778.c | 2 +- arch/arm/mach-shmobile/clock-r8a7779.c | 2 +- drivers/net/ethernet/renesas/sh_eth.c | 14 +++++--------- 3 files changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/arm/mach-shmobile/clock-r8a7778.c b/arch/arm/mach-shmobile/clock-r8a7778.c index cd6855290b1f..9614b07254b2 100644 --- a/arch/arm/mach-shmobile/clock-r8a7778.c +++ b/arch/arm/mach-shmobile/clock-r8a7778.c @@ -77,7 +77,7 @@ static struct clk mstp_clks[MSTP_NR] = { static struct clk_lookup lookups[] = { /* MSTP32 clocks */ - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */ + CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */ diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c index 31d5cd4d9787..2f7e5245a690 100644 --- a/arch/arm/mach-shmobile/clock-r8a7779.c +++ b/arch/arm/mach-shmobile/clock-r8a7779.c @@ -163,7 +163,7 @@ static struct clk_lookup lookups[] = { /* MSTP32 clocks */ CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */ CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */ - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */ + CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */ CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */ CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 143e22e1b329..cff697118119 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -356,8 +356,7 @@ static void __maybe_unused sh_eth_set_duplex(struct net_device *ndev) } /* There is CPU dependent code */ -#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_r8a777x(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -374,9 +373,9 @@ static void sh_eth_set_rate(struct net_device *ndev) } /* R8A7778/9 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data r8a777x_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_r8a777x, .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@ -392,7 +391,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .tpauser = 1, .hw_swap = 1, }; -#endif static void sh_eth_set_rate_sh7724(struct net_device *ndev) { @@ -2566,9 +2564,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); /* set cpu data */ - mdp->cd = &sh_eth_my_cpu_data; - if (id->driver_data) - mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; sh_eth_set_default_cpu_data(mdp->cd); /* set function */ @@ -2692,7 +2688,7 @@ static struct platform_device_id sh_eth_id_table[] = { { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, - { CARDNAME }, + { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, { } }; MODULE_DEVICE_TABLE(platform, sh_eth_id_table); -- cgit v1.2.3 From eb770bf430bc1d4e1d23af353a80cfae2b237e66 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 7 Jun 2013 14:07:13 +0000 Subject: sh_eth: remove dependencies from Kconfig Since dependence on the certain SoCs is no longer necessary to compile the driver, remove the dependency list from its Kconfig entry which is a popular demand anyway... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/Kconfig | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index bed9841d728c..267eac054fa4 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -4,12 +4,6 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" - depends on (SUPERH || ARCH_SHMOBILE) && \ - (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ - CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ - CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || \ - ARCH_R8A7778 || ARCH_R8A7779) select CRC32 select NET_CORE select MII -- cgit v1.2.3 From e403d295817cf9f8be3110eb7ee1a03981287495 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 7 Jun 2013 23:40:41 -0700 Subject: sh_eth: Fix warnings on 64-bit. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't cast a plain integer to a pointer. drivers/net/ethernet/renesas/sh_eth.c: In function ‘sh_eth_chip_reset_giga’: drivers/net/ethernet/renesas/sh_eth.c:482:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] drivers/net/ethernet/renesas/sh_eth.c:483:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] drivers/net/ethernet/renesas/sh_eth.c:492:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] drivers/net/ethernet/renesas/sh_eth.c:493:22: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index cff697118119..43d8490cfe0d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -469,7 +469,7 @@ static struct sh_eth_cpu_data sh7757_data = { .rpadir_value = 2 << 16, }; -#define SH_GIGA_ETH_BASE 0xfee00000 +#define SH_GIGA_ETH_BASE 0xfee00000UL #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) -- cgit v1.2.3 From 9ddebc46e70b434e485060f7c1b53c5b848a6c8c Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 22 May 2013 15:10:46 +0000 Subject: MIPS: OCTEON: Rename Kconfig CAVIUM_OCTEON_REFERENCE_BOARD to CAVIUM_OCTEON_SOC CAVIUM_OCTEON_SOC most place we used to use CPU_CAVIUM_OCTEON. This allows us to CPU_CAVIUM_OCTEON in places where we have no OCTEON SOC. Remove CAVIUM_OCTEON_SIMULATOR as it doesn't really do anything, we can get the same configuration with CAVIUM_OCTEON_SOC. Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Cc: linux-ide@vger.kernel.org Cc: linux-edac@vger.kernel.org Cc: linux-i2c@vger.kernel.org Cc: netdev@vger.kernel.org Cc: spi-devel-general@lists.sourceforge.net Cc: devel@driverdev.osuosl.org Cc: linux-usb@vger.kernel.org Acked-by: Greg Kroah-Hartman Acked-by: Wolfram Sang Acked-by: Mauro Carvalho Chehab Patchwork: https://patchwork.linux-mips.org/patch/5295/ Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 19 ++----------------- arch/mips/cavium-octeon/Kconfig | 6 +++++- arch/mips/cavium-octeon/Platform | 8 ++++---- arch/mips/configs/cavium_octeon_defconfig | 2 +- arch/mips/pci/Makefile | 4 ++-- drivers/ata/Kconfig | 2 +- drivers/char/hw_random/Kconfig | 2 +- drivers/edac/Kconfig | 6 +++--- drivers/i2c/busses/Kconfig | 2 +- drivers/net/ethernet/octeon/Kconfig | 2 +- drivers/net/phy/Kconfig | 2 +- drivers/spi/Kconfig | 2 +- drivers/staging/octeon/Kconfig | 2 +- drivers/usb/host/Kconfig | 4 ++-- drivers/watchdog/Kconfig | 2 +- 15 files changed, 27 insertions(+), 38 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 7a58ab933b20..ade99730ef3b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -735,23 +735,8 @@ config WR_PPMC This enables support for the Wind River MIPS32 4KC PPMC evaluation board, which is based on GT64120 bridge chip. -config CAVIUM_OCTEON_SIMULATOR - bool "Cavium Networks Octeon Simulator" - select CEVT_R4K - select 64BIT_PHYS_ADDR - select DMA_COHERENT - select SYS_SUPPORTS_64BIT_KERNEL - select SYS_SUPPORTS_BIG_ENDIAN - select SYS_SUPPORTS_HOTPLUG_CPU - select SYS_HAS_CPU_CAVIUM_OCTEON - select HOLES_IN_ZONE - help - The Octeon simulator is software performance model of the Cavium - Octeon Processor. It supports simulating Octeon processors on x86 - hardware. - -config CAVIUM_OCTEON_REFERENCE_BOARD - bool "Cavium Networks Octeon reference board" +config CAVIUM_OCTEON_SOC + bool "Cavium Networks Octeon SoC based boards" select CEVT_R4K select 64BIT_PHYS_ADDR select DMA_COHERENT diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 75a6df7fd265..a12444a5f1b5 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -10,6 +10,10 @@ config CAVIUM_CN63XXP1 non-CN63XXP1 hardware, so it is recommended to select "n" unless it is known the workarounds are needed. +endif # CPU_CAVIUM_OCTEON + +if CAVIUM_OCTEON_SOC + config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" default "n" @@ -103,4 +107,4 @@ config OCTEON_ILM To compile this driver as a module, choose M here. The module will be called octeon-ilm -endif # CPU_CAVIUM_OCTEON +endif # CAVIUM_OCTEON_SOC diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform index 1e43ccf1a792..8a301cb12d68 100644 --- a/arch/mips/cavium-octeon/Platform +++ b/arch/mips/cavium-octeon/Platform @@ -1,11 +1,11 @@ # # Cavium Octeon # -platform-$(CONFIG_CPU_CAVIUM_OCTEON) += cavium-octeon/ -cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += \ +platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/ +cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \ -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL -load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff84100000 +load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff84100000 else -load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff81100000 +load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000 endif diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 014ba4bbba7d..1888e5f4d598 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -1,4 +1,4 @@ -CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y +CONFIG_CAVIUM_OCTEON_SOC=y CONFIG_CAVIUM_CN63XXP1=y CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2 CONFIG_SPARSEMEM_MANUAL=y diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 2cb1d315d225..fa3bcd233138 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -54,10 +54,10 @@ obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += pci-octeon.o pcie-octeon.o obj-$(CONFIG_CPU_XLR) += pci-xlr.o obj-$(CONFIG_CPU_XLP) += pci-xlp.o ifdef CONFIG_PCI_MSI -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o +obj-$(CONFIG_CAVIUM_OCTEON_SOC) += msi-octeon.o endif diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index a5a3ebcbdd2c..dc20774bd647 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -160,7 +160,7 @@ config PDC_ADMA config PATA_OCTEON_CF tristate "OCTEON Boot Bus Compact Flash support" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC help This option enables a polled compact flash driver for use with compact flash cards attached to the OCTEON boot bus. diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 2f9dbf7568fb..40a865449f35 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -167,7 +167,7 @@ config HW_RANDOM_OMAP config HW_RANDOM_OCTEON tristate "Octeon Random Number Generator support" - depends on HW_RANDOM && CPU_CAVIUM_OCTEON + depends on HW_RANDOM && CAVIUM_OCTEON_SOC default HW_RANDOM ---help--- This driver provides kernel-side support for the Random Number diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index e443f2c1dfd1..923d2e82a289 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -349,21 +349,21 @@ config EDAC_OCTEON_PC config EDAC_OCTEON_L2C tristate "Cavium Octeon Secondary Caches (L2C)" - depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON + depends on EDAC_MM_EDAC && CAVIUM_OCTEON_SOC help Support for error detection and correction on the Cavium Octeon family of SOCs. config EDAC_OCTEON_LMC tristate "Cavium Octeon DRAM Memory Controller (LMC)" - depends on EDAC_MM_EDAC && CPU_CAVIUM_OCTEON + depends on EDAC_MM_EDAC && CAVIUM_OCTEON_SOC help Support for error detection and correction on the Cavium Octeon family of SOCs. config EDAC_OCTEON_PCI tristate "Cavium Octeon PCI Controller" - depends on EDAC_MM_EDAC && PCI && CPU_CAVIUM_OCTEON + depends on EDAC_MM_EDAC && PCI && CAVIUM_OCTEON_SOC help Support for error detection and correction on the Cavium Octeon family of SOCs. diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 631736e2e7ed..a8fff7700624 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -726,7 +726,7 @@ config I2C_VERSATILE config I2C_OCTEON tristate "Cavium OCTEON I2C bus support" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC help Say yes if you want to support the I2C serial bus on Cavium OCTEON SOC. diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig index 3de52ffd2872..a7aa28054cc1 100644 --- a/drivers/net/ethernet/octeon/Kconfig +++ b/drivers/net/ethernet/octeon/Kconfig @@ -4,7 +4,7 @@ config OCTEON_MGMT_ETHERNET tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC select PHYLIB select MDIO_OCTEON default y diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 1e11f2bfd9ce..84461e8824f7 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -135,7 +135,7 @@ config MDIO_GPIO config MDIO_OCTEON tristate "Support for MDIO buses on Octeon SOCs" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC default y help diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 92a9345d7a6b..20158978866f 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -266,7 +266,7 @@ config SPI_OC_TINY config SPI_OCTEON tristate "Cavium OCTEON SPI controller" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC help SPI host driver for the hardware found on some Cavium OCTEON SOCs. diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig index 9493128e5fd2..6e1d5f8d3ec1 100644 --- a/drivers/staging/octeon/Kconfig +++ b/drivers/staging/octeon/Kconfig @@ -1,6 +1,6 @@ config OCTEON_ETHERNET tristate "Cavium Networks Octeon Ethernet support" - depends on CPU_CAVIUM_OCTEON && NETDEVICES + depends on CAVIUM_OCTEON_SOC && NETDEVICES select PHYLIB select MDIO_OCTEON help diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 344d5e2f87d7..54c121580738 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -285,7 +285,7 @@ config USB_EHCI_HCD_PLATFORM config USB_OCTEON_EHCI bool "Octeon on-chip EHCI support" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC default n select USB_EHCI_BIG_ENDIAN_MMIO help @@ -480,7 +480,7 @@ config USB_OHCI_HCD_PLATFORM config USB_OCTEON_OHCI bool "Octeon on-chip OHCI support" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC default USB_OCTEON_EHCI select USB_OHCI_BIG_ENDIAN_MMIO select USB_OHCI_LITTLE_ENDIAN diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index e89fc3133972..9d03af1c596f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1072,7 +1072,7 @@ config TXX9_WDT config OCTEON_WDT tristate "Cavium OCTEON SOC family Watchdog Timer" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC default y select EXPORT_UASM if OCTEON_WDT = m help -- cgit v1.2.3 From 7260aac97447a2b2cb9e8684d1162118c4426354 Mon Sep 17 00:00:00 2001 From: Maxime Bizon Date: Tue, 4 Jun 2013 22:53:33 +0100 Subject: bcm63xx_enet: implement reset autoneg ethtool callback Implement the rset_nway ethtool callback which uses libphy generic autonegotiation restart function. Signed-off-by: Maxime Bizon Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index e46466cb3627..bc1a9948892b 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1328,6 +1328,20 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev, mutex_unlock(&priv->mib_update_lock); } +static int bcm_enet_nway_reset(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return genphy_restart_aneg(priv->phydev); + } + + return -EOPNOTSUPP; +} + static int bcm_enet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1470,6 +1484,7 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = { .get_strings = bcm_enet_get_strings, .get_sset_count = bcm_enet_get_sset_count, .get_ethtool_stats = bcm_enet_get_ethtool_stats, + .nway_reset = bcm_enet_nway_reset, .get_settings = bcm_enet_get_settings, .set_settings = bcm_enet_set_settings, .get_drvinfo = bcm_enet_get_drvinfo, -- cgit v1.2.3 From 0ae99b5fede6f3a8d252d50bb4aba29544295219 Mon Sep 17 00:00:00 2001 From: Maxime Bizon Date: Tue, 4 Jun 2013 22:53:34 +0100 Subject: bcm63xx_enet: split DMA channel register accesses The current bcm63xx_enet driver always uses bcmenet_shared_base whenever it needs to access DMA channel configuration space or access the DMA channel state RAM. Split these register in 3 parts to be more accurate: - global DMA configuration - per DMA channel configuration space - per DMA channel state RAM space This is preliminary to support new chips where the global DMA configuration remains the same, but there is a varying number of DMA channels located at a different memory offset. Signed-off-by: Maxime Bizon Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- arch/mips/bcm63xx/dev-enet.c | 23 +++- arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h | 4 +- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 139 ++++++++++++++--------- 3 files changed, 105 insertions(+), 61 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 39c23366c5c7..df5bf6686942 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -19,6 +19,16 @@ static struct resource shared_res[] = { .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, + { + .start = -1, /* filled at runtime */ + .end = -1, /* filled at runtime */ + .flags = IORESOURCE_MEM, + }, + { + .start = -1, /* filled at runtime */ + .end = -1, /* filled at runtime */ + .flags = IORESOURCE_MEM, + }, }; static struct platform_device bcm63xx_enet_shared_device = { @@ -110,10 +120,15 @@ int __init bcm63xx_enet_register(int unit, if (!shared_device_registered) { shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); shared_res[0].end = shared_res[0].start; - if (BCMCPU_IS_6338()) - shared_res[0].end += (RSET_ENETDMA_SIZE / 2) - 1; - else - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; + + shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC); + shared_res[1].end = shared_res[1].start; + shared_res[1].end += RSET_ENETDMAC_SIZE(16) - 1; + + shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS); + shared_res[2].end = shared_res[2].start; + shared_res[2].end += RSET_ENETDMAS_SIZE(16) - 1; ret = platform_device_register(&bcm63xx_enet_shared_device); if (ret) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index 336228990808..9981f4f0e42f 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -173,7 +173,9 @@ enum bcm63xx_regs_set { #define BCM_6358_RSET_SPI_SIZE 1804 #define BCM_6368_RSET_SPI_SIZE 1804 #define RSET_ENET_SIZE 2048 -#define RSET_ENETDMA_SIZE 2048 +#define RSET_ENETDMA_SIZE 256 +#define RSET_ENETDMAC_SIZE(chans) (16 * (chans)) +#define RSET_ENETDMAS_SIZE(chans) (16 * (chans)) #define RSET_ENETSW_SIZE 65536 #define RSET_UART_SIZE 24 #define RSET_UDC_SIZE 256 diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index bc1a9948892b..edaf76dc2487 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -41,8 +41,8 @@ static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); -/* io memory shared between all devices */ -static void __iomem *bcm_enet_shared_base; +/* io registers memory shared between all devices */ +static void __iomem *bcm_enet_shared_base[3]; /* * io helpers to access mac registers @@ -63,13 +63,35 @@ static inline void enet_writel(struct bcm_enet_priv *priv, */ static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) { - return bcm_readl(bcm_enet_shared_base + off); + return bcm_readl(bcm_enet_shared_base[0] + off); } static inline void enet_dma_writel(struct bcm_enet_priv *priv, u32 val, u32 off) { - bcm_writel(val, bcm_enet_shared_base + off); + bcm_writel(val, bcm_enet_shared_base[0] + off); +} + +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(bcm_enet_shared_base[1] + off); +} + +static inline void enet_dmac_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, bcm_enet_shared_base[1] + off); +} + +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(bcm_enet_shared_base[2] + off); +} + +static inline void enet_dmas_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, bcm_enet_shared_base[2] + off); } /* @@ -353,8 +375,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) bcm_enet_refill_rx(dev); /* kick rx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->rx_chan)); } return processed; @@ -429,10 +451,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) dev = priv->net_dev; /* ack interrupts */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->tx_chan)); /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); @@ -451,10 +473,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* restore rx/tx interrupt */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->tx_chan)); return rx_work_done; } @@ -497,8 +519,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) priv = netdev_priv(dev); /* mask rx/tx interrupts */ - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); napi_schedule(&priv->napi); @@ -557,8 +579,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); /* kick tx dma */ - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->tx_chan)); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->tx_chan)); /* stop queue if no more desc available */ if (!priv->tx_desc_count) @@ -833,8 +855,8 @@ static int bcm_enet_open(struct net_device *dev) /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) @@ -919,28 +941,28 @@ static int bcm_enet_open(struct net_device *dev) } /* write rx & tx ring addresses */ - enet_dma_writel(priv, priv->rx_desc_dma, - ENETDMA_RSTART_REG(priv->rx_chan)); - enet_dma_writel(priv, priv->tx_desc_dma, - ENETDMA_RSTART_REG(priv->tx_chan)); + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG(priv->rx_chan)); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG(priv->tx_chan)); /* clear remaining state ram for rx & tx channel */ - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->rx_chan)); - enet_dma_writel(priv, BCMENET_DMA_MAXBURST, - ENETDMA_MAXBURST_REG(priv->tx_chan)); + enet_dmac_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMAC_MAXBURST_REG(priv->rx_chan)); + enet_dmac_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMAC_MAXBURST_REG(priv->tx_chan)); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); @@ -958,26 +980,26 @@ static int bcm_enet_open(struct net_device *dev) val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, - ENETDMA_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->rx_chan)); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->tx_chan)); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, - ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->tx_chan)); if (priv->has_phy) phy_start(priv->phydev); @@ -1057,14 +1079,14 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) { int limit; - enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan)); limit = 1000; do { u32 val; - val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); - if (!(val & ENETDMA_CHANCFG_EN_MASK)) + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan)); + if (!(val & ENETDMAC_CHANCFG_EN_MASK)) break; udelay(1); } while (limit--); @@ -1090,8 +1112,8 @@ static int bcm_enet_stop(struct net_device *dev) /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); - enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); @@ -1636,7 +1658,7 @@ static int bcm_enet_probe(struct platform_device *pdev) /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ - if (!bcm_enet_shared_base) + if (!bcm_enet_shared_base[0]) return -ENODEV; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1881,14 +1903,19 @@ struct platform_driver bcm63xx_enet_driver = { static int bcm_enet_shared_probe(struct platform_device *pdev) { struct resource *res; + void __iomem *p[3]; + unsigned int i; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; + memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); - bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res); - if (!bcm_enet_shared_base) - return -ENOMEM; + for (i = 0; i < 3; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + p[i] = devm_ioremap_resource(&pdev->dev, res); + if (!p[i]) + return -ENOMEM; + } + + memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); return 0; } -- cgit v1.2.3 From 6f00a0229627ca189529cad3f9154ac2f9e5c7db Mon Sep 17 00:00:00 2001 From: Maxime Bizon Date: Tue, 4 Jun 2013 22:53:35 +0100 Subject: bcm63xx_enet: add support for Broadcom BCM63xx integrated gigabit switch Newer Broadcom BCM63xx SoCs: 6328, 6362 and 6368 have an integrated switch which needs to be driven slightly differently from the traditional external switches. This patch introduces changes in arch/mips/bcm63xx in order to: - register a bcm63xx_enetsw driver instead of bcm63xx_enet driver - update DMA channels configuration & state RAM base addresses - add a new platform data configuration knob to define the number of ports per switch/device and force link on some ports - define the required switch registers On the driver side, the following changes are required: - the switch ports need to be polled to ensure the link is up and running and RX/TX can properly work - basic switch configuration needs to be performed for the switch to forward packets to the CPU - update the MIB counters since the integrated Signed-off-by: Maxime Bizon Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- arch/mips/bcm63xx/boards/board_bcm963xx.c | 4 + arch/mips/bcm63xx/dev-enet.c | 113 ++- .../include/asm/mach-bcm63xx/bcm63xx_dev_enet.h | 28 + arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h | 50 ++ .../mips/include/asm/mach-bcm63xx/board_bcm963xx.h | 2 + drivers/net/ethernet/broadcom/bcm63xx_enet.c | 995 ++++++++++++++++++++- drivers/net/ethernet/broadcom/bcm63xx_enet.h | 71 ++ 7 files changed, 1205 insertions(+), 58 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index a9505c4867e8..9c0ddafafb6c 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -845,6 +845,10 @@ int __init board_register_devices(void) !bcm63xx_nvram_get_mac_address(board.enet1.mac_addr)) bcm63xx_enet_register(1, &board.enet1); + if (board.has_enetsw && + !bcm63xx_nvram_get_mac_address(board.enetsw.mac_addr)) + bcm63xx_enetsw_register(&board.enetsw); + if (board.has_usbd) bcm63xx_usbd_register(&board.usbd); diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index df5bf6686942..6cbaee0f6d70 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -104,6 +104,64 @@ static struct platform_device bcm63xx_enet1_device = { }, }; +static struct resource enetsw_res[] = { + { + /* start & end filled at runtime */ + .flags = IORESOURCE_MEM, + }, + { + /* start filled at runtime */ + .flags = IORESOURCE_IRQ, + }, + { + /* start filled at runtime */ + .flags = IORESOURCE_IRQ, + }, +}; + +static struct bcm63xx_enetsw_platform_data enetsw_pd; + +static struct platform_device bcm63xx_enetsw_device = { + .name = "bcm63xx_enetsw", + .num_resources = ARRAY_SIZE(enetsw_res), + .resource = enetsw_res, + .dev = { + .platform_data = &enetsw_pd, + }, +}; + +static int __init register_shared(void) +{ + int ret, chan_count; + + if (shared_device_registered) + return 0; + + shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); + shared_res[0].end = shared_res[0].start; + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; + + if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) + chan_count = 32; + else + chan_count = 16; + + shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC); + shared_res[1].end = shared_res[1].start; + shared_res[1].end += RSET_ENETDMAC_SIZE(chan_count) - 1; + + shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS); + shared_res[2].end = shared_res[2].start; + shared_res[2].end += RSET_ENETDMAS_SIZE(chan_count) - 1; + + ret = platform_device_register(&bcm63xx_enet_shared_device); + if (ret) + return ret; + shared_device_registered = 1; + + return 0; +} + int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd) { @@ -117,24 +175,9 @@ int __init bcm63xx_enet_register(int unit, if (unit == 1 && BCMCPU_IS_6338()) return -ENODEV; - if (!shared_device_registered) { - shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); - shared_res[0].end = shared_res[0].start; - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; - - shared_res[1].start = bcm63xx_regset_address(RSET_ENETDMAC); - shared_res[1].end = shared_res[1].start; - shared_res[1].end += RSET_ENETDMAC_SIZE(16) - 1; - - shared_res[2].start = bcm63xx_regset_address(RSET_ENETDMAS); - shared_res[2].end = shared_res[2].start; - shared_res[2].end += RSET_ENETDMAS_SIZE(16) - 1; - - ret = platform_device_register(&bcm63xx_enet_shared_device); - if (ret) - return ret; - shared_device_registered = 1; - } + ret = register_shared(); + if (ret) + return ret; if (unit == 0) { enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0); @@ -175,3 +218,37 @@ int __init bcm63xx_enet_register(int unit, return ret; return 0; } + +int __init +bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd) +{ + int ret; + + if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368()) + return -ENODEV; + + ret = register_shared(); + if (ret) + return ret; + + enetsw_res[0].start = bcm63xx_regset_address(RSET_ENETSW); + enetsw_res[0].end = enetsw_res[0].start; + enetsw_res[0].end += RSET_ENETSW_SIZE - 1; + enetsw_res[1].start = bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0); + enetsw_res[2].start = bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0); + if (!enetsw_res[2].start) + enetsw_res[2].start = -1; + + memcpy(bcm63xx_enetsw_device.dev.platform_data, pd, sizeof(*pd)); + + if (BCMCPU_IS_6328()) + enetsw_pd.num_ports = ENETSW_PORTS_6328; + else if (BCMCPU_IS_6362() || BCMCPU_IS_6368()) + enetsw_pd.num_ports = ENETSW_PORTS_6368; + + ret = platform_device_register(&bcm63xx_enetsw_device); + if (ret) + return ret; + + return 0; +} diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h index d53f611184b9..118e3c938841 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h @@ -39,7 +39,35 @@ struct bcm63xx_enet_platform_data { int phy_id, int reg, int val)); }; +/* + * on board ethernet switch platform data + */ +#define ENETSW_MAX_PORT 8 +#define ENETSW_PORTS_6328 5 /* 4 FE PHY + 1 RGMII */ +#define ENETSW_PORTS_6368 6 /* 4 FE PHY + 2 RGMII */ + +#define ENETSW_RGMII_PORT0 4 + +struct bcm63xx_enetsw_port { + int used; + int phy_id; + + int bypass_link; + int force_speed; + int force_duplex_full; + + const char *name; +}; + +struct bcm63xx_enetsw_platform_data { + char mac_addr[ETH_ALEN]; + int num_ports; + struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; +}; + int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd); +int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd); + #endif /* ! BCM63XX_DEV_ENET_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 3203fe49b34d..0a2121abb1a6 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -830,10 +830,60 @@ * _REG relative to RSET_ENETSW *************************************************************************/ +/* Port traffic control */ +#define ENETSW_PTCTRL_REG(x) (0x0 + (x)) +#define ENETSW_PTCTRL_RXDIS_MASK (1 << 0) +#define ENETSW_PTCTRL_TXDIS_MASK (1 << 1) + +/* Switch mode register */ +#define ENETSW_SWMODE_REG (0xb) +#define ENETSW_SWMODE_FWD_EN_MASK (1 << 1) + +/* IMP override Register */ +#define ENETSW_IMPOV_REG (0xe) +#define ENETSW_IMPOV_FORCE_MASK (1 << 7) +#define ENETSW_IMPOV_TXFLOW_MASK (1 << 5) +#define ENETSW_IMPOV_RXFLOW_MASK (1 << 4) +#define ENETSW_IMPOV_1000_MASK (1 << 3) +#define ENETSW_IMPOV_100_MASK (1 << 2) +#define ENETSW_IMPOV_FDX_MASK (1 << 1) +#define ENETSW_IMPOV_LINKUP_MASK (1 << 0) + +/* Port override Register */ +#define ENETSW_PORTOV_REG(x) (0x58 + (x)) +#define ENETSW_PORTOV_ENABLE_MASK (1 << 6) +#define ENETSW_PORTOV_TXFLOW_MASK (1 << 5) +#define ENETSW_PORTOV_RXFLOW_MASK (1 << 4) +#define ENETSW_PORTOV_1000_MASK (1 << 3) +#define ENETSW_PORTOV_100_MASK (1 << 2) +#define ENETSW_PORTOV_FDX_MASK (1 << 1) +#define ENETSW_PORTOV_LINKUP_MASK (1 << 0) + +/* MDIO control register */ +#define ENETSW_MDIOC_REG (0xb0) +#define ENETSW_MDIOC_EXT_MASK (1 << 16) +#define ENETSW_MDIOC_REG_SHIFT 20 +#define ENETSW_MDIOC_PHYID_SHIFT 25 +#define ENETSW_MDIOC_RD_MASK (1 << 30) +#define ENETSW_MDIOC_WR_MASK (1 << 31) + +/* MDIO data register */ +#define ENETSW_MDIOD_REG (0xb4) + +/* Global Management Configuration Register */ +#define ENETSW_GMCR_REG (0x200) +#define ENETSW_GMCR_RST_MIB_MASK (1 << 0) + /* MIB register */ #define ENETSW_MIB_REG(x) (0x2800 + (x) * 4) #define ENETSW_MIB_REG_COUNT 47 +/* Jumbo control register port mask register */ +#define ENETSW_JMBCTL_PORT_REG (0x4004) + +/* Jumbo control mib good frame register */ +#define ENETSW_JMBCTL_MAXSIZE_REG (0x4008) + /************************************************************************* * _REG relative to RSET_OHCI_PRIV diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h index 682bcf3b492a..d9aee1a833f3 100644 --- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h +++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h @@ -24,6 +24,7 @@ struct board_info { /* enabled feature/device */ unsigned int has_enet0:1; unsigned int has_enet1:1; + unsigned int has_enetsw:1; unsigned int has_pci:1; unsigned int has_pccard:1; unsigned int has_ohci0:1; @@ -36,6 +37,7 @@ struct board_info { /* ethernet config */ struct bcm63xx_enet_platform_data enet0; struct bcm63xx_enet_platform_data enet1; + struct bcm63xx_enetsw_platform_data enetsw; /* USB config */ struct bcm63xx_usbd_platform_data usbd; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index edaf76dc2487..fbbfc4acd53f 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -59,8 +59,43 @@ static inline void enet_writel(struct bcm_enet_priv *priv, } /* - * io helpers to access shared registers + * io helpers to access switch registers */ +static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enetsw_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readw(priv->base + off); +} + +static inline void enetsw_writew(struct bcm_enet_priv *priv, + u16 val, u32 off) +{ + bcm_writew(val, priv->base + off); +} + +static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readb(priv->base + off); +} + +static inline void enetsw_writeb(struct bcm_enet_priv *priv, + u8 val, u32 off) +{ + bcm_writeb(val, priv->base + off); +} + + +/* io helpers to access shared registers */ static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) { return bcm_readl(bcm_enet_shared_base[0] + off); @@ -218,7 +253,6 @@ static int bcm_enet_refill_rx(struct net_device *dev) if (!skb) break; priv->rx_skb[desc_idx] = skb; - p = dma_map_single(&priv->pdev->dev, skb->data, priv->rx_skb_size, DMA_FROM_DEVICE); @@ -321,7 +355,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) } /* recycle packet if it's marked as bad */ - if (unlikely(len_stat & DMADESC_ERR_MASK)) { + if (!priv->enet_is_sw && + unlikely(len_stat & DMADESC_ERR_MASK)) { dev->stats.rx_errors++; if (len_stat & DMADESC_OVSIZE_MASK) @@ -552,6 +587,26 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_unlock; } + /* pad small packets sent on a switch device */ + if (priv->enet_is_sw && skb->len < 64) { + int needed = 64 - skb->len; + char *data; + + if (unlikely(skb_tailroom(skb) < needed)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); + if (!nskb) { + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + dev_kfree_skb(skb); + skb = nskb; + } + data = skb_put(skb, needed); + memset(data, 0, needed); + } + /* point to the next available desc */ desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; priv->tx_skb[priv->tx_curr_desc] = skb; @@ -959,9 +1014,9 @@ static int bcm_enet_open(struct net_device *dev) enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ - enet_dmac_writel(priv, BCMENET_DMA_MAXBURST, + enet_dmac_writel(priv, priv->dma_maxburst, ENETDMAC_MAXBURST_REG(priv->rx_chan)); - enet_dmac_writel(priv, BCMENET_DMA_MAXBURST, + enet_dmac_writel(priv, priv->dma_maxburst, ENETDMAC_MAXBURST_REG(priv->tx_chan)); /* set correct transmit fifo watermark */ @@ -1567,7 +1622,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) * it's appended */ priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, - BCMENET_DMA_MAXBURST * 4); + priv->dma_maxburst * 4); return 0; } @@ -1674,6 +1729,9 @@ static int bcm_enet_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(dev); + priv->enet_is_sw = false; + priv->dma_maxburst = BCMENET_DMA_MAXBURST; + ret = compute_hw_mtu(priv, dev->mtu); if (ret) goto out; @@ -1898,60 +1956,916 @@ struct platform_driver bcm63xx_enet_driver = { }; /* - * reserve & remap memory space shared between all macs + * switch mii access callbacks */ -static int bcm_enet_shared_probe(struct platform_device *pdev) +static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, + int ext, int phy_id, int location) { - struct resource *res; - void __iomem *p[3]; - unsigned int i; + u32 reg; + int ret; - memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); - for (i = 0; i < 3; i++) { - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - p[i] = devm_ioremap_resource(&pdev->dev, res); - if (!p[i]) - return -ENOMEM; - } + reg = ENETSW_MDIOC_RD_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); - memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; - return 0; + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + ret = enetsw_readw(priv, ENETSW_MDIOD_REG); + spin_unlock_bh(&priv->enetsw_mdio_lock); + return ret; } -static int bcm_enet_shared_remove(struct platform_device *pdev) +static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, + int ext, int phy_id, int location, + uint16_t data) { - return 0; + u32 reg; + + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_WR_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + reg |= data; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + spin_unlock_bh(&priv->enetsw_mdio_lock); +} + +static inline int bcm_enet_port_is_rgmii(int portid) +{ + return portid >= ENETSW_RGMII_PORT0; } /* - * this "shared" driver is needed because both macs share a single - * address space + * enet sw PHY polling */ -struct platform_driver bcm63xx_enet_shared_driver = { - .probe = bcm_enet_shared_probe, - .remove = bcm_enet_shared_remove, - .driver = { - .name = "bcm63xx_enet_shared", - .owner = THIS_MODULE, - }, -}; +static void swphy_poll_timer(unsigned long data) +{ + struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; + unsigned int i; + + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + int val, j, up, advertise, lpa, lpa2, speed, duplex, media; + int external_phy = bcm_enet_port_is_rgmii(i); + u8 override; + + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (port->bypass_link) + continue; + + /* dummy read to clear */ + for (j = 0; j < 2; j++) + val = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_BMSR); + + if (val == 0xffff) + continue; + + up = (val & BMSR_LSTATUS) ? 1 : 0; + if (!(up ^ priv->sw_port_link[i])) + continue; + + priv->sw_port_link[i] = up; + + /* link changed */ + if (!up) { + dev_info(&priv->pdev->dev, "link DOWN on %s\n", + port->name); + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + continue; + } + + advertise = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_ADVERTISE); + + lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_LPA); + + lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_STAT1000); + + /* figure out media and duplex from advertise and LPA values */ + media = mii_nway_result(lpa & advertise); + duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (lpa2 & LPA_1000FULL) + duplex = 1; + + if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) + speed = 1000; + else { + if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) + speed = 100; + else + speed = 10; + } + + dev_info(&priv->pdev->dev, + "link UP on %s, %dMbps, %s-duplex\n", + port->name, speed, duplex ? "full" : "half"); + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + if (speed == 1000) + override |= ENETSW_IMPOV_1000_MASK; + else if (speed == 100) + override |= ENETSW_IMPOV_100_MASK; + if (duplex) + override |= ENETSW_IMPOV_FDX_MASK; + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + priv->swphy_poll.expires = jiffies + HZ; + add_timer(&priv->swphy_poll); +} /* - * entry point + * open callback, allocate dma rings & buffers and start rx operation */ -static int __init bcm_enet_init(void) +static int bcm_enetsw_open(struct net_device *dev) { - int ret; + struct bcm_enet_priv *priv; + struct device *kdev; + int i, ret; + unsigned int size; + void *p; + u32 val; - ret = platform_driver_register(&bcm63xx_enet_shared_driver); - if (ret) - return ret; + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; - ret = platform_driver_register(&bcm63xx_enet_driver); + /* mask all interrupts and request them */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); if (ret) - platform_driver_unregister(&bcm63xx_enet_shared_driver); + goto out_freeirq; + + if (priv->irq_tx != -1) { + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out_freeirq_rx; + } + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out_freeirq_tx; + } + + memset(p, 0, size); + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out_free_rx_ring; + } + + memset(p, 0, size); + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + GFP_KERNEL); + if (!priv->tx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_ring; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + GFP_KERNEL); + if (!priv->rx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_skb; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* disable all ports */ + for (i = 0; i < priv->num_ports; i++) { + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + + priv->sw_port_link[i] = 0; + } + + /* reset mib */ + val = enetsw_readb(priv, ENETSW_GMCR_REG); + val |= ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + val &= ~ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + + /* force CPU port state */ + val = enetsw_readb(priv, ENETSW_IMPOV_REG); + val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; + enetsw_writeb(priv, val, ENETSW_IMPOV_REG); + + /* enable switch forward engine */ + val = enetsw_readb(priv, ENETSW_SWMODE_REG); + val |= ENETSW_SWMODE_FWD_EN_MASK; + enetsw_writeb(priv, val, ENETSW_SWMODE_REG); + + /* enable jumbo on all ports */ + enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); + enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); + + /* initialize flow control buffer allocation */ + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG(priv->rx_chan)); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG(priv->tx_chan)); + + /* clear remaining state ram for rx & tx channel */ + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); + + /* set dma maximum burst len */ + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST_REG(priv->tx_chan)); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel + */ + wmb(); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->rx_chan)); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->tx_chan)); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->tx_chan)); + + netif_carrier_on(dev); + netif_start_queue(dev); + + /* apply override config for bypass_link ports here. */ + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + u8 override; + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (!port->bypass_link) + continue; + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + switch (port->force_speed) { + case 1000: + override |= ENETSW_IMPOV_1000_MASK; + break; + case 100: + override |= ENETSW_IMPOV_100_MASK; + break; + case 10: + break; + default: + pr_warn("invalid forced speed on port %s: assume 10\n", + port->name); + break; + } + + if (port->force_duplex_full) + override |= ENETSW_IMPOV_FDX_MASK; + + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + /* start phy polling timer */ + init_timer(&priv->swphy_poll); + priv->swphy_poll.function = swphy_poll_timer; + priv->swphy_poll.data = (unsigned long)priv; + priv->swphy_poll.expires = jiffies; + add_timer(&priv->swphy_poll); + return 0; + +out: + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + kfree(priv->rx_skb); + +out_free_tx_skb: + kfree(priv->tx_skb); + +out_free_tx_ring: + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + +out_freeirq_tx: + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + +out_freeirq_rx: + free_irq(priv->irq_rx, dev); + +out_freeirq: + return ret; +} + +/* stop callback */ +static int bcm_enetsw_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + del_timer_sync(&priv->swphy_poll); + netif_stop_queue(dev); + napi_disable(&priv->napi); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + + return 0; +} + +/* try to sort out phy external status by walking the used_port field + * in the bcm_enet_priv structure. in case the phy address is not + * assigned to any physical port on the switch, assume it is external + * (and yell at the user). + */ +static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) +{ + int i; + + for (i = 0; i < priv->num_ports; ++i) { + if (!priv->used_ports[i].used) + continue; + if (priv->used_ports[i].phy_id == phy_id) + return bcm_enet_port_is_rgmii(i); + } + + printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", + phy_id); + return 1; +} + +/* can't use bcmenet_sw_mdio_read directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, + int location) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + return bcmenet_sw_mdio_read(priv, + bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location); +} + +/* can't use bcmenet_sw_mdio_write directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, + int location, + int val) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location, val); +} + +static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enetsw_mii_mdio_read; + mii.mdio_write = bcm_enetsw_mii_mdio_write; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + +} + +static const struct net_device_ops bcm_enetsw_ops = { + .ndo_open = bcm_enetsw_open, + .ndo_stop = bcm_enetsw_stop, + .ndo_start_xmit = bcm_enet_start_xmit, + .ndo_change_mtu = bcm_enet_change_mtu, + .ndo_do_ioctl = bcm_enetsw_ioctl, +}; + + +static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { + { "rx_packets", DEV_STAT(rx_packets), -1 }, + { "tx_packets", DEV_STAT(tx_packets), -1 }, + { "rx_bytes", DEV_STAT(rx_bytes), -1 }, + { "tx_bytes", DEV_STAT(tx_bytes), -1 }, + { "rx_errors", DEV_STAT(rx_errors), -1 }, + { "tx_errors", DEV_STAT(tx_errors), -1 }, + { "rx_dropped", DEV_STAT(rx_dropped), -1 }, + { "tx_dropped", DEV_STAT(tx_dropped), -1 }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, + { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, + { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, + { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), + ETHSW_MIB_RX_1024_1522 }, + { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), + ETHSW_MIB_RX_1523_2047 }, + { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), + ETHSW_MIB_RX_2048_4095 }, + { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), + ETHSW_MIB_RX_4096_8191 }, + { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), + ETHSW_MIB_RX_8192_9728 }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, + { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, + { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, + { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, + { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, + +}; + +#define BCM_ENETSW_STATS_LEN \ + (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) + +static void bcm_enetsw_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enetsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static int bcm_enetsw_get_sset_count(struct net_device *netdev, + int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_ENETSW_STATS_LEN; + default: + return -EINVAL; + } +} + +static void bcm_enetsw_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, bcm_enet_driver_name, 32); + strncpy(drvinfo->version, bcm_enet_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "bcm63xx", 32); + drvinfo->n_stats = BCM_ENETSW_STATS_LEN; +} + +static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 lo, hi; + char *p; + int reg; + + s = &bcm_enetsw_gstrings_stats[i]; + + reg = s->mib_reg; + if (reg == -1) + continue; + + lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) { + hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); + *(u64 *)p = ((u64)hi << 32 | lo); + } else { + *(u32 *)p = lo; + } + } + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enetsw_gstrings_stats[i]; + + if (s->mib_reg == -1) + p = (char *)&netdev->stats + s->stat_offset; + else + p = (char *)priv + s->stat_offset; + + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +static void bcm_enetsw_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enetsw_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enetsw_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enetsw_open(dev); + if (err) + dev_close(dev); + } + return 0; +} + +static struct ethtool_ops bcm_enetsw_ethtool_ops = { + .get_strings = bcm_enetsw_get_strings, + .get_sset_count = bcm_enetsw_get_sset_count, + .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, + .get_drvinfo = bcm_enetsw_get_drvinfo, + .get_ringparam = bcm_enetsw_get_ringparam, + .set_ringparam = bcm_enetsw_set_ringparam, +}; + +/* allocate netdevice, request register memory and register device. */ +static int bcm_enetsw_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enetsw_platform_data *pd; + struct resource *res_mem; + int ret, irq_rx, irq_tx; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) + */ + if (!bcm_enet_shared_base[0]) + return -ENODEV; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq_rx = platform_get_irq(pdev, 0); + irq_tx = platform_get_irq(pdev, 1); + if (!res_mem || irq_rx < 0) + return -ENODEV; + + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + priv = netdev_priv(dev); + memset(priv, 0, sizeof(*priv)); + + /* initialize default and fetch platform data */ + priv->enet_is_sw = true; + priv->irq_rx = irq_rx; + priv->irq_tx = irq_tx; + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; + + pd = pdev->dev.platform_data; + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + memcpy(priv->used_ports, pd->used_ports, + sizeof(pd->used_ports)); + priv->num_ports = pd->num_ports; + } + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + if (!request_mem_region(res_mem->start, resource_size(res_mem), + "bcm63xx_enetsw")) { + ret = -EBUSY; + goto out; + } + + priv->base = ioremap(res_mem->start, resource_size(res_mem)); + if (priv->base == NULL) { + ret = -ENOMEM; + goto out_release_mem; + } + + priv->mac_clk = clk_get(&pdev->dev, "enetsw"); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + goto out_unmap; + } + clk_enable(priv->mac_clk); + + priv->rx_chan = 0; + priv->tx_chan = 1; + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* register netdevice */ + dev->netdev_ops = &bcm_enetsw_ops; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); + SET_NETDEV_DEV(dev, &pdev->dev); + + spin_lock_init(&priv->enetsw_mdio_lock); + + ret = register_netdev(dev); + if (ret) + goto out_put_clk; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +out_put_clk: + clk_put(priv->mac_clk); + +out_unmap: + iounmap(priv->base); + +out_release_mem: + release_mem_region(res_mem->start, resource_size(res_mem)); +out: + free_netdev(dev); + return ret; +} + + +/* exit func, stops hardware and unregisters netdevice */ +static int bcm_enetsw_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct resource *res; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* release device resources */ + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enetsw_driver = { + .probe = bcm_enetsw_probe, + .remove = bcm_enetsw_remove, + .driver = { + .name = "bcm63xx_enetsw", + .owner = THIS_MODULE, + }, +}; + +/* reserve & remap memory space shared between all macs */ +static int bcm_enet_shared_probe(struct platform_device *pdev) +{ + struct resource *res; + void __iomem *p[3]; + unsigned int i; + + memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); + + for (i = 0; i < 3; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + p[i] = devm_ioremap_resource(&pdev->dev, res); + if (!p[i]) + return -ENOMEM; + } + + memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); + + return 0; +} + +static int bcm_enet_shared_remove(struct platform_device *pdev) +{ + return 0; +} + +/* this "shared" driver is needed because both macs share a single + * address space + */ +struct platform_driver bcm63xx_enet_shared_driver = { + .probe = bcm_enet_shared_probe, + .remove = bcm_enet_shared_remove, + .driver = { + .name = "bcm63xx_enet_shared", + .owner = THIS_MODULE, + }, +}; + +/* entry point */ +static int __init bcm_enet_init(void) +{ + int ret; + + ret = platform_driver_register(&bcm63xx_enet_shared_driver); + if (ret) + return ret; + + ret = platform_driver_register(&bcm63xx_enet_driver); + if (ret) + platform_driver_unregister(&bcm63xx_enet_shared_driver); + + ret = platform_driver_register(&bcm63xx_enetsw_driver); + if (ret) { + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); + } return ret; } @@ -1959,6 +2873,7 @@ static int __init bcm_enet_init(void) static void __exit bcm_enet_exit(void) { platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enetsw_driver); platform_driver_unregister(&bcm63xx_enet_shared_driver); } diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 133d5857b9e2..721ffbaef8d2 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -18,6 +18,7 @@ /* maximum burst len for dma (4 bytes unit) */ #define BCMENET_DMA_MAXBURST 16 +#define BCMENETSW_DMA_MAXBURST 8 /* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value * must be low enough so that a DMA transfer of above burst length can @@ -84,11 +85,60 @@ #define ETH_MIB_RX_CNTRL 54 +/* + * SW MIB Counters register definitions +*/ +#define ETHSW_MIB_TX_ALL_OCT 0 +#define ETHSW_MIB_TX_DROP_PKTS 2 +#define ETHSW_MIB_TX_QOS_PKTS 3 +#define ETHSW_MIB_TX_BRDCAST 4 +#define ETHSW_MIB_TX_MULT 5 +#define ETHSW_MIB_TX_UNI 6 +#define ETHSW_MIB_TX_COL 7 +#define ETHSW_MIB_TX_1_COL 8 +#define ETHSW_MIB_TX_M_COL 9 +#define ETHSW_MIB_TX_DEF 10 +#define ETHSW_MIB_TX_LATE 11 +#define ETHSW_MIB_TX_EX_COL 12 +#define ETHSW_MIB_TX_PAUSE 14 +#define ETHSW_MIB_TX_QOS_OCT 15 + +#define ETHSW_MIB_RX_ALL_OCT 17 +#define ETHSW_MIB_RX_UND 19 +#define ETHSW_MIB_RX_PAUSE 20 +#define ETHSW_MIB_RX_64 21 +#define ETHSW_MIB_RX_65_127 22 +#define ETHSW_MIB_RX_128_255 23 +#define ETHSW_MIB_RX_256_511 24 +#define ETHSW_MIB_RX_512_1023 25 +#define ETHSW_MIB_RX_1024_1522 26 +#define ETHSW_MIB_RX_OVR 27 +#define ETHSW_MIB_RX_JAB 28 +#define ETHSW_MIB_RX_ALIGN 29 +#define ETHSW_MIB_RX_CRC 30 +#define ETHSW_MIB_RX_GD_OCT 31 +#define ETHSW_MIB_RX_DROP 33 +#define ETHSW_MIB_RX_UNI 34 +#define ETHSW_MIB_RX_MULT 35 +#define ETHSW_MIB_RX_BRDCAST 36 +#define ETHSW_MIB_RX_SA_CHANGE 37 +#define ETHSW_MIB_RX_FRAG 38 +#define ETHSW_MIB_RX_OVR_DISC 39 +#define ETHSW_MIB_RX_SYM 40 +#define ETHSW_MIB_RX_QOS_PKTS 41 +#define ETHSW_MIB_RX_QOS_OCT 42 +#define ETHSW_MIB_RX_1523_2047 44 +#define ETHSW_MIB_RX_2048_4095 45 +#define ETHSW_MIB_RX_4096_8191 46 +#define ETHSW_MIB_RX_8192_9728 47 + + struct bcm_enet_mib_counters { u64 tx_gd_octets; u32 tx_gd_pkts; u32 tx_all_octets; u32 tx_all_pkts; + u32 tx_unicast; u32 tx_brdcast; u32 tx_mult; u32 tx_64; @@ -97,7 +147,12 @@ struct bcm_enet_mib_counters { u32 tx_256_511; u32 tx_512_1023; u32 tx_1024_max; + u32 tx_1523_2047; + u32 tx_2048_4095; + u32 tx_4096_8191; + u32 tx_8192_9728; u32 tx_jab; + u32 tx_drop; u32 tx_ovr; u32 tx_frag; u32 tx_underrun; @@ -114,6 +169,7 @@ struct bcm_enet_mib_counters { u32 rx_all_octets; u32 rx_all_pkts; u32 rx_brdcast; + u32 rx_unicast; u32 rx_mult; u32 rx_64; u32 rx_65_127; @@ -197,6 +253,9 @@ struct bcm_enet_priv { /* number of dma desc in tx ring */ int tx_ring_size; + /* maximum dma burst size */ + int dma_maxburst; + /* cpu view of rx dma ring */ struct bcm_enet_desc *tx_desc_cpu; @@ -269,6 +328,18 @@ struct bcm_enet_priv { /* maximum hardware transmit/receive size */ unsigned int hw_mtu; + + bool enet_is_sw; + + /* port mapping for switch devices */ + int num_ports; + struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; + int sw_port_link[ENETSW_MAX_PORT]; + + /* used to poll switch port state */ + struct timer_list swphy_poll; + spinlock_t enetsw_mdio_lock; }; + #endif /* ! BCM63XX_ENET_H_ */ -- cgit v1.2.3 From 5a85e737f30ce7b939a34d93cca816400342208c Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 10 Jun 2013 11:40:20 +0300 Subject: ixgbe: add support for ndo_ll_poll Add the ixgbe driver code implementing ndo_ll_poll. Adds ndo_ll_poll method and locking between it and the napi poll. When receiving a packet we use skb_mark_ll to record the napi it came from. Add each napi to the napi_hash right after netif_napi_add(). Signed-off-by: Alexander Duyck Signed-off-by: Jesse Brandeburg Signed-off-by: Eliezer Tamir Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 120 ++++++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 63 ++++++++++++-- 3 files changed, 177 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ca932387a80f..e9d98629b583 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -52,6 +52,8 @@ #include #endif +#include + /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -356,9 +358,127 @@ struct ixgbe_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define IXGBE_QV_STATE_IDLE 0 +#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) +#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */ +#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */ +#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) +#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_LL_RX_POLL */ + /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; +#ifdef CONFIG_NET_LL_RX_POLL +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ + + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBE_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = true; + spin_lock(&q_vector->lock); + if (q_vector->state & IXGBE_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); + q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; + rc = false; + } else + /* we don't care if someone yielded */ + q_vector->state = IXGBE_QV_STATE_NAPI; + spin_unlock(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = false; + spin_lock(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | + IXGBE_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) + rc = true; + q_vector->state = IXGBE_QV_STATE_IDLE; + spin_unlock(&q_vector->lock); + return rc; +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = true; + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBE_QV_LOCKED)) { + q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; + rc = false; + } else + /* preserve yield marks */ + q_vector->state |= IXGBE_QV_STATE_POLL; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = false; + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); + + if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) + rc = true; + q_vector->state = IXGBE_QV_STATE_IDLE; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); + return q_vector->state & IXGBE_QV_USER_PEND; +} +#else /* CONFIG_NET_LL_RX_POLL */ +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ +} + +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + return true; +} + +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + #ifdef CONFIG_IXGBE_HWMON #define IXGBE_HWMON_TYPE_LOC 0 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ef5f7a678ce1..90b4e1089ecc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -811,6 +811,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); + napi_hash_add(&q_vector->napi); /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; @@ -931,6 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->rx_ring[ring->queue_index] = NULL; adapter->q_vector[v_idx] = NULL; + napi_hash_del(&q_vector->napi); netif_napi_del(&q_vector->napi); /* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d30fbdd81fca..9a7dc405e7ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1504,7 +1504,9 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, { struct ixgbe_adapter *adapter = q_vector->adapter; - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + if (ixgbe_qv_ll_polling(q_vector)) + netif_receive_skb(skb); + else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); else netif_rx(skb); @@ -1892,9 +1894,9 @@ dma_sync: * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the syste. * - * Returns true if all work is completed without reaching budget + * Returns amount of work completed **/ -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, const int budget) { @@ -1976,6 +1978,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } #endif /* IXGBE_FCOE */ + skb_mark_ll(skb, &q_vector->napi); ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ @@ -1992,9 +1995,37 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (cleaned_count) ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - return (total_rx_packets < budget); + return total_rx_packets; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +static int ixgbe_low_latency_recv(struct napi_struct *napi) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int found = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbe_for_each_ring(ring, q_vector->rx) { + found = ixgbe_clean_rx_irq(q_vector, ring, 4); + if (found) + break; + } + + ixgbe_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + /** * ixgbe_configure_msix - Configure MSI-X hardware * @adapter: board private structure @@ -2550,6 +2581,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); + if (!ixgbe_qv_lock_napi(q_vector)) + return budget; + /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) @@ -2558,9 +2592,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) - clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget); + clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget) < per_ring_budget); + ixgbe_qv_unlock_napi(q_vector); /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; @@ -3747,16 +3782,25 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + ixgbe_qv_init_lock(adapter->q_vector[q_idx]); napi_enable(&adapter->q_vector[q_idx]->napi); + } } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + local_bh_disable(); /* for ixgbe_qv_lock_napi() */ + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { napi_disable(&adapter->q_vector[q_idx]->napi); + while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + mdelay(1); + } + } + local_bh_enable(); } #ifdef CONFIG_IXGBE_DCB @@ -7177,6 +7221,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_ll_poll = ixgbe_low_latency_recv, +#endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, -- cgit v1.2.3 From 7e15b90ff9b796d14aa0d1aabc0dbb54632c673c Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Mon, 10 Jun 2013 11:40:31 +0300 Subject: ixgbe: add extra stats for ndo_ll_poll Add additional statistics to the ixgbe driver for ndo_ll_poll Defined under LL_EXTENDED_STATS Signed-off-by: Alexander Duyck Signed-off-by: Jesse Brandeburg Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 14 +++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 40 ++++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++++ 3 files changed, 60 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e9d98629b583..fb098b46c6a6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -54,6 +54,9 @@ #include +#ifdef CONFIG_NET_LL_RX_POLL +#define LL_EXTENDED_STATS +#endif /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -184,6 +187,11 @@ struct ixgbe_rx_buffer { struct ixgbe_queue_stats { u64 packets; u64 bytes; +#ifdef LL_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* LL_EXTENDED_STATS */ }; struct ixgbe_tx_queue_stats { @@ -391,6 +399,9 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; rc = false; +#ifdef LL_EXTENDED_STATS + q_vector->tx.ring->stats.yields++; +#endif } else /* we don't care if someone yielded */ q_vector->state = IXGBE_QV_STATE_NAPI; @@ -421,6 +432,9 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) if ((q_vector->state & IXGBE_QV_LOCKED)) { q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; rc = false; +#ifdef LL_EXTENDED_STATS + q_vector->rx.ring->stats.yields++; +#endif } else /* preserve yield marks */ q_vector->state |= IXGBE_QV_STATE_POLL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index d3754722adb4..24e2e7aafda2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1054,6 +1054,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif continue; } @@ -1063,6 +1069,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif } for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; @@ -1070,6 +1082,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = 0; data[i+1] = 0; i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif continue; } @@ -1079,6 +1097,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i+1] = ring->stats.bytes; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; +#ifdef LL_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif } for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { @@ -1115,12 +1139,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; +#ifdef LL_EXTENDED_STATS + sprintf(p, "tx_q_%u_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_q_%u_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_q_%u_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* LL_EXTENDED_STATS */ } for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; +#ifdef LL_EXTENDED_STATS + sprintf(p, "rx_q_%u_ll_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_q_%u_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_q_%u_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* LL_EXTENDED_STATS */ } for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { sprintf(p, "tx_pb_%u_pxon", i); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9a7dc405e7ed..047ebaaf0141 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2016,6 +2016,12 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) ixgbe_for_each_ring(ring, q_vector->rx) { found = ixgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef LL_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif if (found) break; } -- cgit v1.2.3 From afd6eae13cc4229e25d59334cdc46d042b24a4a5 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 10 Jun 2013 00:16:52 +0400 Subject: 3c59x: consolidate error cleanup in vortex_init_one() The PCI driver's probe() method duplicates the error cleanup code each time it has to do error exit. Consolidate the error cleanup code in one place and use *goto* to jump to the right places. Signed-off-by: Sergei Shtylyov Acked-by: Steffen Klassert Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 30e74211a755..ad5272b348f0 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1012,10 +1012,8 @@ static int vortex_init_one(struct pci_dev *pdev, goto out; rc = pci_request_regions(pdev, DRV_NAME); - if (rc < 0) { - pci_disable_device(pdev); - goto out; - } + if (rc < 0) + goto out_disable; unit = vortex_cards_found; @@ -1032,23 +1030,24 @@ static int vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { - pci_release_regions(pdev); - pci_disable_device(pdev); rc = -ENOMEM; - goto out; + goto out_release; } rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, ent->driver_data, unit); - if (rc < 0) { - pci_iounmap(pdev, ioaddr); - pci_release_regions(pdev); - pci_disable_device(pdev); - goto out; - } + if (rc < 0) + goto out_iounmap; vortex_cards_found++; + goto out; +out_iounmap: + pci_iounmap(pdev, ioaddr); +out_release: + pci_release_regions(pdev); +out_disable: + pci_disable_device(pdev); out: return rc; } -- cgit v1.2.3 From 941e173a538c58f7f60e3f70ab0e35d51563405b Mon Sep 17 00:00:00 2001 From: Tushar Behera Date: Mon, 10 Jun 2013 17:05:05 +0530 Subject: net: fec: Convert to use devm_ioremap_resource Commit 75096579c3ac ("lib: devres: Introduce devm_ioremap_resource()") introduced devm_ioremap_resource() and deprecated the use of devm_request_and_ioremap(). Signed-off-by: Tushar Behera CC: netdev@vger.kernel.org CC: "David S. Miller" Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index fa4a68fc01cb..c540055e4f4f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1876,17 +1876,17 @@ fec_probe(struct platform_device *pdev) (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; - fep->hwp = devm_request_and_ioremap(&pdev->dev, r); + fep->hwp = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(fep->hwp)) { + ret = PTR_ERR(fep->hwp); + goto failed_ioremap; + } + fep->pdev = pdev; fep->dev_id = dev_id++; fep->bufdesc_ex = 0; - if (!fep->hwp) { - ret = -ENOMEM; - goto failed_ioremap; - } - platform_set_drvdata(pdev, ndev); ret = of_get_phy_mode(pdev->dev.of_node); -- cgit v1.2.3 From eed5d29d7818cc7a84e60123555cae154e5b4a73 Mon Sep 17 00:00:00 2001 From: Tushar Behera Date: Mon, 10 Jun 2013 17:05:06 +0530 Subject: net: emaclite: Convert to use devm_ioremap_resource Commit 75096579c3ac ("lib: devres: Introduce devm_ioremap_resource()") introduced devm_ioremap_resource() and deprecated the use of devm_request_and_ioremap(). Signed-off-by: Tushar Behera CC: netdev@vger.kernel.org CC: "David S. Miller" CC: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 1cd131bde680..fd4dbdae5331 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1159,9 +1159,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev) ndev->irq = res->start; res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - lp->base_addr = devm_request_and_ioremap(&ofdev->dev, res); - if (!lp->base_addr) + lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(lp->base_addr)) { + rc = PTR_ERR(lp->base_addr); goto error; + } ndev->mem_start = res->start; ndev->mem_end = res->end; -- cgit v1.2.3 From 6602041b8343fcdb6433cc72ca6cb6c2e189da6d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 10 Jun 2013 18:03:17 +0100 Subject: sfc: Store port number in private data, not net_device::dev_id We should not use net_device::dev_id to indicate the port number, as this affects the way the local part of IPv6 addresses is normally generated. This field was intended for use where multiple devices may share a single assigned MAC address and need to have different IPv6 addresses. Siena's two ports each have their own MAC addresses. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/net_driver.h | 3 ++- drivers/net/ethernet/sfc/siena.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 39d6bd77f015..9a2914cfd345 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -784,6 +784,7 @@ struct efx_nic { char name[IFNAMSIZ]; struct pci_dev *pci_dev; + unsigned int port_num; const struct efx_nic_type *type; int legacy_irq; bool legacy_irq_enabled; @@ -916,7 +917,7 @@ static inline int efx_dev_registered(struct efx_nic *efx) static inline unsigned int efx_port_num(struct efx_nic *efx) { - return efx->net_dev->dev_id; + return efx->port_num; } /** diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 51669244d154..8c91775e3c5f 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -304,7 +304,7 @@ static int siena_probe_nic(struct efx_nic *efx) } efx_reado(efx, ®, FR_AZ_CS_DEBUG); - efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx_mcdi_init(efx); -- cgit v1.2.3 From 5eaedf31319d5f80eaaee1eec8dd18c0b452f0d1 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Mon, 10 Jun 2013 20:19:48 +0300 Subject: gianfar: Add backwards compatible Single Queue mode polling Older Single Queue (SQ_SG_MODE) devices like TSEC (i.e. mpc83xx) don't feature the frame receive indication bits (RXF) in RSTAT. For these and for the rest of the SQ_SG_MODE devices, provide the appropiate polling routine that handles a single pair of Rx/Tx BD rings, removing the overhead incurred by the multiple queues/ multiple interrupt group devices (veTSEC/ eTSEC2.0 devices). So this is primarily a fix for the TSEC devices. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 51 ++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 14f06947e70d..8d2db7b808b7 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -128,6 +128,7 @@ static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_configure_serdes(struct net_device *dev); static int gfar_poll(struct napi_struct *napi, int budget); +static int gfar_poll_sq(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif @@ -1038,9 +1039,13 @@ static int gfar_probe(struct platform_device *ofdev) dev->ethtool_ops = &gfar_ethtool_ops; /* Register for napi ...We are registering NAPI for each grp */ - for (i = 0; i < priv->num_grps; i++) - netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, + if (priv->mode == SQ_SG_MODE) + netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, GFAR_DEV_WEIGHT); + else + for (i = 0; i < priv->num_grps; i++) + netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, + GFAR_DEV_WEIGHT); if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | @@ -2823,6 +2828,48 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) return howmany; } +static int gfar_poll_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; + struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; + int work_done = 0; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RTX_MASK); + + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) + gfar_clean_tx_ring(tx_queue); + + work_done = gfar_clean_rx_ring(rx_queue, budget); + + if (work_done < budget) { + napi_complete(napi); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + gfar_write(®s->imask, IMASK_DEFAULT); + + /* If we are coalescing interrupts, update the timer + * Otherwise, clear it + */ + gfar_write(®s->txic, 0); + if (likely(tx_queue->txcoalescing)) + gfar_write(®s->txic, tx_queue->txic); + + gfar_write(®s->rxic, 0); + if (unlikely(rx_queue->rxcoalescing)) + gfar_write(®s->rxic, rx_queue->rxic); + } + + return work_done; +} + static int gfar_poll(struct napi_struct *napi, int budget) { struct gfar_priv_grp *gfargrp = -- cgit v1.2.3 From c45640e4a9f54f2f6f4bc51c2ba9644ffb3babde Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Sat, 20 Apr 2013 21:51:08 -0500 Subject: ibmebus: convert of_platform_driver to platform_driver ibmebus is the last remaining user of of_platform_driver and the conversion to a regular platform driver is trivial. Signed-off-by: Rob Herring Acked-by: Arnd Bergmann Tested-by: Benjamin Herrenschmidt Signed-off-by: Grant Likely --- arch/powerpc/include/asm/ibmebus.h | 4 ++-- arch/powerpc/kernel/ibmebus.c | 22 ++++++++++------------ drivers/infiniband/hw/ehca/ehca_main.c | 5 ++--- drivers/net/ethernet/ibm/ehea/ehea_main.c | 8 +++----- 4 files changed, 17 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h index 1a9d9aea21fa..088f95b2e14f 100644 --- a/arch/powerpc/include/asm/ibmebus.h +++ b/arch/powerpc/include/asm/ibmebus.h @@ -48,8 +48,8 @@ extern struct bus_type ibmebus_bus_type; -int ibmebus_register_driver(struct of_platform_driver *drv); -void ibmebus_unregister_driver(struct of_platform_driver *drv); +int ibmebus_register_driver(struct platform_driver *drv); +void ibmebus_unregister_driver(struct platform_driver *drv); int ibmebus_request_irq(u32 ist, irq_handler_t handler, unsigned long irq_flags, const char *devname, diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 8220baa46faf..16a7c2326d48 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -205,7 +205,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches) return ret; } -int ibmebus_register_driver(struct of_platform_driver *drv) +int ibmebus_register_driver(struct platform_driver *drv) { /* If the driver uses devices that ibmebus doesn't know, add them */ ibmebus_create_devices(drv->driver.of_match_table); @@ -215,7 +215,7 @@ int ibmebus_register_driver(struct of_platform_driver *drv) } EXPORT_SYMBOL(ibmebus_register_driver); -void ibmebus_unregister_driver(struct of_platform_driver *drv) +void ibmebus_unregister_driver(struct platform_driver *drv) { driver_unregister(&drv->driver); } @@ -338,11 +338,10 @@ static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) static int ibmebus_bus_device_probe(struct device *dev) { int error = -ENODEV; - struct of_platform_driver *drv; + struct platform_driver *drv; struct platform_device *of_dev; - const struct of_device_id *match; - drv = to_of_platform_driver(dev->driver); + drv = to_platform_driver(dev->driver); of_dev = to_platform_device(dev); if (!drv->probe) @@ -350,9 +349,8 @@ static int ibmebus_bus_device_probe(struct device *dev) of_dev_get(of_dev); - match = of_match_device(drv->driver.of_match_table, dev); - if (match) - error = drv->probe(of_dev, match); + if (of_driver_match_device(dev, dev->driver)) + error = drv->probe(of_dev); if (error) of_dev_put(of_dev); @@ -362,7 +360,7 @@ static int ibmebus_bus_device_probe(struct device *dev) static int ibmebus_bus_device_remove(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); if (dev->driver && drv->remove) drv->remove(of_dev); @@ -372,7 +370,7 @@ static int ibmebus_bus_device_remove(struct device *dev) static void ibmebus_bus_device_shutdown(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(of_dev); @@ -419,7 +417,7 @@ struct device_attribute ibmebus_bus_device_attrs[] = { static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); int ret = 0; if (dev->driver && drv->suspend) @@ -430,7 +428,7 @@ static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) static int ibmebus_bus_legacy_resume(struct device *dev) { struct platform_device *of_dev = to_platform_device(dev); - struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + struct platform_driver *drv = to_platform_driver(dev->driver); int ret = 0; if (dev->driver && drv->resume) diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index f8a62918a88d..982e3efd98d3 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -713,8 +713,7 @@ static struct attribute_group ehca_dev_attr_grp = { .attrs = ehca_dev_attrs }; -static int ehca_probe(struct platform_device *dev, - const struct of_device_id *id) +static int ehca_probe(struct platform_device *dev) { struct ehca_shca *shca; const u64 *handle; @@ -937,7 +936,7 @@ static struct of_device_id ehca_device_table[] = }; MODULE_DEVICE_TABLE(of, ehca_device_table); -static struct of_platform_driver ehca_driver = { +static struct platform_driver ehca_driver = { .probe = ehca_probe, .remove = ehca_remove, .driver = { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 90ea0b1673ca..de2969cae262 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -98,8 +98,7 @@ static struct ehea_fw_handle_array ehea_fw_handles; static struct ehea_bcmc_reg_array ehea_bcmc_regs; -static int ehea_probe_adapter(struct platform_device *dev, - const struct of_device_id *id); +static int ehea_probe_adapter(struct platform_device *dev); static int ehea_remove(struct platform_device *dev); @@ -112,7 +111,7 @@ static struct of_device_id ehea_device_table[] = { }; MODULE_DEVICE_TABLE(of, ehea_device_table); -static struct of_platform_driver ehea_driver = { +static struct platform_driver ehea_driver = { .driver = { .name = "ehea", .owner = THIS_MODULE, @@ -3251,8 +3250,7 @@ static void ehea_remove_device_sysfs(struct platform_device *dev) device_remove_file(&dev->dev, &dev_attr_remove_port); } -static int ehea_probe_adapter(struct platform_device *dev, - const struct of_device_id *id) +static int ehea_probe_adapter(struct platform_device *dev) { struct ehea_adapter *adapter; const u64 *adapter_handle; -- cgit v1.2.3 From 8f728d7934c77f63e89abcc96b46a7a98416d5c1 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 13 Jun 2013 00:55:34 +0400 Subject: sh_eth: split 'sh_eth_netdev_ops' Commit 9f86134155047720a3685cda21467f68695152d2 (sh_eth: remove SH_ETH_HAS_TSU) removes 'const' from 'sh_eth_netdev_ops' and modifies it in case TSU registers are present. I've originally suggested to Iwamatsu-san to split this structure in two instead and afterwards Dave M. suggested doing the same. Split 'sh_eth_netdev_ops_tsu' from 'sh_eth_netdev_ops', making both 'const', and assigning 'ndev->detdev_ops' depending on the presence of TSU registers. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 43d8490cfe0d..67a9e962f142 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2486,7 +2486,7 @@ static const u16 *sh_eth_get_register_offset(int register_type) return reg_offset; } -static struct net_device_ops sh_eth_netdev_ops = { +static const struct net_device_ops sh_eth_netdev_ops = { .ndo_open = sh_eth_open, .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, @@ -2498,6 +2498,21 @@ static struct net_device_ops sh_eth_netdev_ops = { .ndo_change_mtu = eth_change_mtu, }; +static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_multicast_list, + .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int sh_eth_drv_probe(struct platform_device *pdev) { int ret, devno = 0; @@ -2568,14 +2583,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev) sh_eth_set_default_cpu_data(mdp->cd); /* set function */ - if (mdp->cd->tsu) { - sh_eth_netdev_ops.ndo_set_rx_mode = sh_eth_set_multicast_list; - sh_eth_netdev_ops.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid; - sh_eth_netdev_ops.ndo_vlan_rx_kill_vid = - sh_eth_vlan_rx_kill_vid; - } - - ndev->netdev_ops = &sh_eth_netdev_ops; + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT; -- cgit v1.2.3 From 8c367fcbe6549195d2eb11e62bea233f811aad41 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 10 Jun 2013 17:34:13 +0100 Subject: cxgb4: Do not set net_device::dev_id to VI index net_device::dev_id should not be used merely to indicate a VI index, as it affects the way the local part of IPv6 addresses is normally generated. This field was intended for use where multiple devices may share a single assigned MAC address and need to have different IPv6 addresses. T4 VIs each have their own MAC address. Signed-off-by: Ben Hutchings Acked-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index bff89a437b76..4cbb2f9850be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3782,7 +3782,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) p->lport = j; p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); - adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? -- cgit v1.2.3 From 8e994402ad5e6ae3d391c0935f9f1dc2eeb92a5e Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 12 Jun 2013 03:07:29 +0400 Subject: sh_eth: remove '__maybe_unused' annotations Now that the SoC specific support is no longer done with help of #ifdef'fery, we no longer need '__maybe_unused' annotations to sh_eth_select_mii() and sh_eth_set_duplex()... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 67a9e962f142..a2eadc070328 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -321,7 +321,7 @@ static int sh_eth_is_gether(struct sh_eth_private *mdp) return 0; } -static void __maybe_unused sh_eth_select_mii(struct net_device *ndev) +static void sh_eth_select_mii(struct net_device *ndev) { u32 value = 0x0; struct sh_eth_private *mdp = netdev_priv(ndev); @@ -345,7 +345,7 @@ static void __maybe_unused sh_eth_select_mii(struct net_device *ndev) sh_eth_write(ndev, value, RMII_MII); } -static void __maybe_unused sh_eth_set_duplex(struct net_device *ndev) +static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); -- cgit v1.2.3 From 3dc6475c0c9e55ac7f053ad6b8b398e779954545 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 12 Jun 2013 20:53:05 +0100 Subject: bcm63xx_enet: add support Broadcom BCM6345 Ethernet This patch adds support for the Broadcom BCM6345 SoC Ethernet. BCM6345 has a slightly different and older DMA engine which requires the following modifications: - the width of the DMA channels on BCM6345 is 64 bytes vs 16 bytes, which means that the helpers enet_dma{c,s} need to account for this channel width and we can no longer use macros - BCM6345 DMA engine does not have any internal SRAM for transfering buffers - BCM6345 buffer allocation and flow control is not per-channel but global (done in RSET_ENETDMA) - the DMA engine bits are right-shifted by 3 compared to other DMA generations - the DMA enable/interrupt masks are a little different (we need to enabled more bits for 6345) - some register have the same meaning but are offsetted in the ENET_DMAC space so a lookup table is required to return the proper offset The MAC itself is identical and requires no modifications to work. Signed-off-by: Florian Fainelli Acked-by: Ralf Baechle Signed-off-by: David S. Miller --- arch/mips/bcm63xx/dev-enet.c | 65 ++++++- arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h | 3 +- .../include/asm/mach-bcm63xx/bcm63xx_dev_enet.h | 94 ++++++++++ arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h | 43 ++++- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 200 ++++++++++++--------- drivers/net/ethernet/broadcom/bcm63xx_enet.h | 15 ++ 6 files changed, 329 insertions(+), 91 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 6cbaee0f6d70..52bc01df9bfe 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -9,10 +9,44 @@ #include #include #include +#include #include #include #include +#ifdef BCMCPU_RUNTIME_DETECT +static const unsigned long bcm6348_regs_enetdmac[] = { + [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG, + [ENETDMAC_IR] = ENETDMAC_IR_REG, + [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG, + [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG, +}; + +static const unsigned long bcm6345_regs_enetdmac[] = { + [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG, + [ENETDMAC_IR] = ENETDMA_6345_IR_REG, + [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG, + [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG, + [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG, + [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG, + [ENETDMAC_FC] = ENETDMA_6345_FC_REG, + [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG, +}; + +const unsigned long *bcm63xx_regs_enetdmac; +EXPORT_SYMBOL(bcm63xx_regs_enetdmac); + +static __init void bcm63xx_enetdmac_regs_init(void) +{ + if (BCMCPU_IS_6345()) + bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac; + else + bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac; +} +#else +static __init void bcm63xx_enetdmac_regs_init(void) { } +#endif + static struct resource shared_res[] = { { .start = -1, /* filled at runtime */ @@ -137,12 +171,19 @@ static int __init register_shared(void) if (shared_device_registered) return 0; + bcm63xx_enetdmac_regs_init(); + shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); shared_res[0].end = shared_res[0].start; - shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; + if (BCMCPU_IS_6345()) + shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1; + else + shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) chan_count = 32; + else if (BCMCPU_IS_6345()) + chan_count = 8; else chan_count = 16; @@ -172,7 +213,7 @@ int __init bcm63xx_enet_register(int unit, if (unit > 1) return -ENODEV; - if (unit == 1 && BCMCPU_IS_6338()) + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345())) return -ENODEV; ret = register_shared(); @@ -213,6 +254,21 @@ int __init bcm63xx_enet_register(int unit, dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY); } + dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK; + dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK; + if (BCMCPU_IS_6345()) { + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK; + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK; + dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK; + dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK; + dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK; + dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH; + dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT; + } else { + dpd->dma_has_sram = true; + dpd->dma_chan_width = ENETDMA_CHAN_WIDTH; + } + ret = platform_device_register(pdev); if (ret) return ret; @@ -246,6 +302,11 @@ bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd) else if (BCMCPU_IS_6362() || BCMCPU_IS_6368()) enetsw_pd.num_ports = ENETSW_PORTS_6368; + enetsw_pd.dma_has_sram = true; + enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH; + enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK; + enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK; + ret = platform_device_register(&bcm63xx_enetsw_device); if (ret) return ret; diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index 9981f4f0e42f..e6e65dc7d502 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -174,6 +174,7 @@ enum bcm63xx_regs_set { #define BCM_6368_RSET_SPI_SIZE 1804 #define RSET_ENET_SIZE 2048 #define RSET_ENETDMA_SIZE 256 +#define RSET_6345_ENETDMA_SIZE 64 #define RSET_ENETDMAC_SIZE(chans) (16 * (chans)) #define RSET_ENETDMAS_SIZE(chans) (16 * (chans)) #define RSET_ENETSW_SIZE 65536 @@ -300,7 +301,7 @@ enum bcm63xx_regs_set { #define BCM_6345_USBDMA_BASE (0xfffe2800) #define BCM_6345_ENET0_BASE (0xfffe1800) #define BCM_6345_ENETDMA_BASE (0xfffe2800) -#define BCM_6345_ENETDMAC_BASE (0xfffe2900) +#define BCM_6345_ENETDMAC_BASE (0xfffe2840) #define BCM_6345_ENETDMAS_BASE (0xfffe2a00) #define BCM_6345_ENETSW_BASE (0xdeadbeef) #define BCM_6345_PCMCIA_BASE (0xfffe2028) diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h index 118e3c938841..753953e86242 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h @@ -4,6 +4,8 @@ #include #include +#include + /* * on board ethernet platform data */ @@ -37,6 +39,21 @@ struct bcm63xx_enet_platform_data { int phy_id, int reg), void (*mii_write)(struct net_device *dev, int phy_id, int reg, int val)); + + /* DMA channel enable mask */ + u32 dma_chan_en_mask; + + /* DMA channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; + + /* DMA channel register width */ + unsigned int dma_chan_width; + + /* DMA descriptor shift */ + unsigned int dma_desc_shift; }; /* @@ -63,6 +80,18 @@ struct bcm63xx_enetsw_platform_data { char mac_addr[ETH_ALEN]; int num_ports; struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; + + /* DMA channel enable mask */ + u32 dma_chan_en_mask; + + /* DMA channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA channel register width */ + unsigned int dma_chan_width; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; }; int __init bcm63xx_enet_register(int unit, @@ -70,4 +99,69 @@ int __init bcm63xx_enet_register(int unit, int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd); +enum bcm63xx_regs_enetdmac { + ENETDMAC_CHANCFG, + ENETDMAC_IR, + ENETDMAC_IRMASK, + ENETDMAC_MAXBURST, + ENETDMAC_BUFALLOC, + ENETDMAC_RSTART, + ENETDMAC_FC, + ENETDMAC_LEN, +}; + +static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg) +{ +#ifdef BCMCPU_RUNTIME_DETECT + extern const unsigned long *bcm63xx_regs_enetdmac; + + return bcm63xx_regs_enetdmac[reg]; +#else +#ifdef CONFIG_BCM63XX_CPU_6345 + switch (reg) { + case ENETDMAC_CHANCFG: + return ENETDMA_6345_CHANCFG_REG; + case ENETDMAC_IR: + return ENETDMA_6345_IR_REG; + case ENETDMAC_IRMASK: + return ENETDMA_6345_IRMASK_REG; + case ENETDMAC_MAXBURST: + return ENETDMA_6345_MAXBURST_REG; + case ENETDMAC_BUFALLOC: + return ENETDMA_6345_BUFALLOC_REG; + case ENETDMAC_RSTART: + return ENETDMA_6345_RSTART_REG; + case ENETDMAC_FC: + return ENETDMA_6345_FC_REG; + case ENETDMAC_LEN: + return ENETDMA_6345_LEN_REG; + } +#endif +#if defined(CONFIG_BCM63XX_CPU_6328) || \ + defined(CONFIG_BCM63XX_CPU_6338) || \ + defined(CONFIG_BCM63XX_CPU_6348) || \ + defined(CONFIG_BCM63XX_CPU_6358) || \ + defined(CONFIG_BCM63XX_CPU_6362) || \ + defined(CONFIG_BCM63XX_CPU_6368) + switch (reg) { + case ENETDMAC_CHANCFG: + return ENETDMAC_CHANCFG_REG; + case ENETDMAC_IR: + return ENETDMAC_IR_REG; + case ENETDMAC_IRMASK: + return ENETDMAC_IRMASK_REG; + case ENETDMAC_MAXBURST: + return ENETDMAC_MAXBURST_REG; + case ENETDMAC_BUFALLOC: + case ENETDMAC_RSTART: + case ENETDMAC_FC: + case ENETDMAC_LEN: + return 0; + } +#endif +#endif + return 0; +} + + #endif /* ! BCM63XX_DEV_ENET_H_ */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 0a2121abb1a6..eff7ca7d12b0 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -727,6 +727,8 @@ /************************************************************************* * _REG relative to RSET_ENETDMA *************************************************************************/ +#define ENETDMA_CHAN_WIDTH 0x10 +#define ENETDMA_6345_CHAN_WIDTH 0x40 /* Controller Configuration Register */ #define ENETDMA_CFG_REG (0x0) @@ -782,31 +784,56 @@ /* State Ram Word 4 */ #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10) +/* Broadcom 6345 ENET DMA definitions */ +#define ENETDMA_6345_CHANCFG_REG (0x00) + +#define ENETDMA_6345_MAXBURST_REG (0x40) + +#define ENETDMA_6345_RSTART_REG (0x08) + +#define ENETDMA_6345_LEN_REG (0x0C) + +#define ENETDMA_6345_IR_REG (0x14) + +#define ENETDMA_6345_IRMASK_REG (0x18) + +#define ENETDMA_6345_FC_REG (0x1C) + +#define ENETDMA_6345_BUFALLOC_REG (0x20) + +/* Shift down for EOP, SOP and WRAP bits */ +#define ENETDMA_6345_DESC_SHIFT (3) /************************************************************************* * _REG relative to RSET_ENETDMAC *************************************************************************/ /* Channel Configuration register */ -#define ENETDMAC_CHANCFG_REG(x) ((x) * 0x10) +#define ENETDMAC_CHANCFG_REG (0x0) #define ENETDMAC_CHANCFG_EN_SHIFT 0 #define ENETDMAC_CHANCFG_EN_MASK (1 << ENETDMAC_CHANCFG_EN_SHIFT) #define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1 #define ENETDMAC_CHANCFG_PKTHALT_MASK (1 << ENETDMAC_CHANCFG_PKTHALT_SHIFT) #define ENETDMAC_CHANCFG_BUFHALT_SHIFT 2 #define ENETDMAC_CHANCFG_BUFHALT_MASK (1 << ENETDMAC_CHANCFG_BUFHALT_SHIFT) +#define ENETDMAC_CHANCFG_CHAINING_SHIFT 2 +#define ENETDMAC_CHANCFG_CHAINING_MASK (1 << ENETDMAC_CHANCFG_CHAINING_SHIFT) +#define ENETDMAC_CHANCFG_WRAP_EN_SHIFT 3 +#define ENETDMAC_CHANCFG_WRAP_EN_MASK (1 << ENETDMAC_CHANCFG_WRAP_EN_SHIFT) +#define ENETDMAC_CHANCFG_FLOWC_EN_SHIFT 4 +#define ENETDMAC_CHANCFG_FLOWC_EN_MASK (1 << ENETDMAC_CHANCFG_FLOWC_EN_SHIFT) /* Interrupt Control/Status register */ -#define ENETDMAC_IR_REG(x) (0x4 + (x) * 0x10) +#define ENETDMAC_IR_REG (0x4) #define ENETDMAC_IR_BUFDONE_MASK (1 << 0) #define ENETDMAC_IR_PKTDONE_MASK (1 << 1) #define ENETDMAC_IR_NOTOWNER_MASK (1 << 2) /* Interrupt Mask register */ -#define ENETDMAC_IRMASK_REG(x) (0x8 + (x) * 0x10) +#define ENETDMAC_IRMASK_REG (0x8) /* Maximum Burst Length */ -#define ENETDMAC_MAXBURST_REG(x) (0xc + (x) * 0x10) +#define ENETDMAC_MAXBURST_REG (0xc) /************************************************************************* @@ -814,16 +841,16 @@ *************************************************************************/ /* Ring Start Address register */ -#define ENETDMAS_RSTART_REG(x) ((x) * 0x10) +#define ENETDMAS_RSTART_REG (0x0) /* State Ram Word 2 */ -#define ENETDMAS_SRAM2_REG(x) (0x4 + (x) * 0x10) +#define ENETDMAS_SRAM2_REG (0x4) /* State Ram Word 3 */ -#define ENETDMAS_SRAM3_REG(x) (0x8 + (x) * 0x10) +#define ENETDMAS_SRAM3_REG (0x8) /* State Ram Word 4 */ -#define ENETDMAS_SRAM4_REG(x) (0xc + (x) * 0x10) +#define ENETDMAS_SRAM4_REG (0xc) /************************************************************************* diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index fbbfc4acd53f..8f1ac023bb03 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -107,26 +107,28 @@ static inline void enet_dma_writel(struct bcm_enet_priv *priv, bcm_writel(val, bcm_enet_shared_base[0] + off); } -static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off) +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) { - return bcm_readl(bcm_enet_shared_base[1] + off); + return bcm_readl(bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); } static inline void enet_dmac_writel(struct bcm_enet_priv *priv, - u32 val, u32 off) + u32 val, u32 off, int chan) { - bcm_writel(val, bcm_enet_shared_base[1] + off); + bcm_writel(val, bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); } -static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off) +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) { - return bcm_readl(bcm_enet_shared_base[2] + off); + return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); } static inline void enet_dmas_writel(struct bcm_enet_priv *priv, - u32 val, u32 off) + u32 val, u32 off, int chan) { - bcm_writel(val, bcm_enet_shared_base[2] + off); + bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); } /* @@ -262,7 +264,7 @@ static int bcm_enet_refill_rx(struct net_device *dev) len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; @@ -273,7 +275,10 @@ static int bcm_enet_refill_rx(struct net_device *dev) priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (priv->dma_has_sram) + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); } /* If rx ring is still empty, set a timer to try allocating @@ -349,7 +354,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != + (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { dev->stats.rx_dropped++; continue; } @@ -410,8 +416,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) bcm_enet_refill_rx(dev); /* kick rx dma */ - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); } return processed; @@ -486,10 +492,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) dev = priv->net_dev; /* ack interrupts */ - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); @@ -508,10 +514,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* restore rx/tx interrupt */ - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); return rx_work_done; } @@ -554,8 +560,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) priv = netdev_priv(dev); /* mask rx/tx interrupts */ - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); napi_schedule(&priv->napi); @@ -616,14 +622,14 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; - len_stat |= DMADESC_ESOP_MASK | + len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); } priv->tx_desc_count--; @@ -634,8 +640,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); /* kick tx dma */ - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->tx_chan); /* stop queue if no more desc available */ if (!priv->tx_desc_count) @@ -763,6 +769,9 @@ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); + if (!priv->dma_has_sram) + return; + /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) @@ -910,8 +919,8 @@ static int bcm_enet_open(struct net_device *dev) /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) @@ -986,8 +995,12 @@ static int bcm_enet_open(struct net_device *dev) priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, - ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMAC_BUFALLOC, priv->rx_chan); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); @@ -996,18 +1009,30 @@ static int bcm_enet_open(struct net_device *dev) } /* write rx & tx ring addresses */ - enet_dmas_writel(priv, priv->rx_desc_dma, - ENETDMAS_RSTART_REG(priv->rx_chan)); - enet_dmas_writel(priv, priv->tx_desc_dma, - ENETDMAS_RSTART_REG(priv->tx_chan)); + if (priv->dma_has_sram) { + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, priv->rx_desc_dma, + ENETDMAC_RSTART, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_desc_dma, + ENETDMAC_RSTART, priv->tx_chan); + } /* clear remaining state ram for rx & tx channel */ - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); + if (priv->dma_has_sram) { + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); + } /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); @@ -1015,18 +1040,24 @@ static int bcm_enet_open(struct net_device *dev) /* set dma maximum burst len */ enet_dmac_writel(priv, priv->dma_maxburst, - ENETDMAC_MAXBURST_REG(priv->rx_chan)); + ENETDMAC_MAXBURST, priv->rx_chan); enet_dmac_writel(priv, priv->dma_maxburst, - ENETDMAC_MAXBURST_REG(priv->tx_chan)); + ENETDMAC_MAXBURST, priv->tx_chan); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ - val = priv->rx_ring_size / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); - val = (priv->rx_ring_size * 2) / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + if (priv->dma_has_sram) { + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + } else { + enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); + } /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ @@ -1035,26 +1066,26 @@ static int bcm_enet_open(struct net_device *dev) val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); if (priv->has_phy) phy_start(priv->phydev); @@ -1134,13 +1165,13 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) { int limit; - enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan)); + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); limit = 1000; do { u32 val; - val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan)); + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); if (!(val & ENETDMAC_CHANCFG_EN_MASK)) break; udelay(1); @@ -1167,8 +1198,8 @@ static int bcm_enet_stop(struct net_device *dev) /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); @@ -1782,6 +1813,11 @@ static int bcm_enet_probe(struct platform_device *pdev) priv->pause_tx = pd->pause_tx; priv->force_duplex_full = pd->force_duplex_full; priv->force_speed_100 = pd->force_speed_100; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_desc_shift = pd->dma_desc_shift; } if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { @@ -2118,8 +2154,8 @@ static int bcm_enetsw_open(struct net_device *dev) kdev = &priv->pdev->dev; /* mask all interrupts and request them */ - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED, dev->name, dev); @@ -2231,23 +2267,23 @@ static int bcm_enetsw_open(struct net_device *dev) /* write rx & tx ring addresses */ enet_dmas_writel(priv, priv->rx_desc_dma, - ENETDMAS_RSTART_REG(priv->rx_chan)); + ENETDMAS_RSTART_REG, priv->rx_chan); enet_dmas_writel(priv, priv->tx_desc_dma, - ENETDMAS_RSTART_REG(priv->tx_chan)); + ENETDMAS_RSTART_REG, priv->tx_chan); /* clear remaining state ram for rx & tx channel */ - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); /* set dma maximum burst len */ enet_dmac_writel(priv, priv->dma_maxburst, - ENETDMAC_MAXBURST_REG(priv->rx_chan)); + ENETDMAC_MAXBURST, priv->rx_chan); enet_dmac_writel(priv, priv->dma_maxburst, - ENETDMAC_MAXBURST_REG(priv->tx_chan)); + ENETDMAC_MAXBURST, priv->tx_chan); /* set flow control low/high threshold to 1/3 / 2/3 */ val = priv->rx_ring_size / 3; @@ -2261,21 +2297,21 @@ static int bcm_enetsw_open(struct net_device *dev) wmb(); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->rx_chan)); + ENETDMAC_CHANCFG, priv->rx_chan); /* watch "packet transferred" interrupt in rx and tx */ enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->rx_chan)); + ENETDMAC_IR, priv->rx_chan); enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->tx_chan)); + ENETDMAC_IR, priv->tx_chan); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->rx_chan)); + ENETDMAC_IRMASK, priv->rx_chan); enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->tx_chan)); + ENETDMAC_IRMASK, priv->tx_chan); netif_carrier_on(dev); netif_start_queue(dev); @@ -2377,8 +2413,8 @@ static int bcm_enetsw_stop(struct net_device *dev) del_timer_sync(&priv->rx_timeout); /* mask all interrupts */ - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); /* disable dma & mac */ bcm_enet_disable_dma(priv, priv->tx_chan); @@ -2712,6 +2748,10 @@ static int bcm_enetsw_probe(struct platform_device *pdev) memcpy(priv->used_ports, pd->used_ports, sizeof(pd->used_ports)); priv->num_ports = pd->num_ports; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; } ret = compute_hw_mtu(priv, dev->mtu); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 721ffbaef8d2..f55af4310085 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -339,6 +339,21 @@ struct bcm_enet_priv { /* used to poll switch port state */ struct timer_list swphy_poll; spinlock_t enetsw_mdio_lock; + + /* dma channel enable mask */ + u32 dma_chan_en_mask; + + /* dma channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; + + /* dma channel width */ + unsigned int dma_chan_width; + + /* dma descriptor shift value */ + unsigned int dma_desc_shift; }; -- cgit v1.2.3 From 948e306d7d645af80ea331b60495710fe4fe12bb Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 13 Jun 2013 13:19:11 +0300 Subject: net/mlx4: Add VF link state support Add support to change the link state of VF (vPort) Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 48 ++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 +++++ drivers/net/ethernet/mellanox/mlx4/eq.c | 9 +++-- drivers/net/ethernet/mellanox/mlx4/fw.c | 8 +++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + include/linux/mlx4/cmd.h | 2 +- 6 files changed, 73 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 0e572a527154..ea1e03896353 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -2178,7 +2179,54 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in ivf->qos = s_info->default_qos; ivf->tx_rate = s_info->tx_rate; ivf->spoofchk = s_info->spoofchk; + ivf->linkstate = s_info->link_state; return 0; } EXPORT_SYMBOL_GPL(mlx4_get_vf_config); + +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + struct mlx4_vport_oper_state *vp_oper; + int slave; + u8 link_stat_event; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + /* get current link state */ + if (!priv->sense.do_sense_port[port]) + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + else + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + case IFLA_VF_LINK_STATE_ENABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + break; + + case IFLA_VF_LINK_STATE_DISABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + default: + mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", + link_state, slave, port); + return -EINVAL; + }; + /* update the admin & oper state on the link state */ + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + s_info->link_state = link_state; + vp_oper->state.link_state = link_state; + + /* send event */ + mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 89c47ea84b50..ade276cca0e6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2061,6 +2061,13 @@ static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_ return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); } +static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); +} static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2101,6 +2108,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_set_vf_mac = mlx4_en_set_vf_mac, .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, .ndo_get_vf_config = mlx4_en_get_vf_config, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 6000342f9725..7e042869ef0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -448,6 +448,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) int i; enum slave_port_gen_event gen_event; unsigned long flags; + struct mlx4_vport_state *s_info; while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { /* @@ -556,7 +557,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" " to slave: %d, port:%d\n", __func__, i, port); - mlx4_slave_event(dev, i, eqe); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + mlx4_slave_event(dev, i, eqe); } else { /* IB port */ set_and_calc_slave_port_state(dev, i, port, MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, @@ -580,7 +583,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) for (i = 0; i < dev->num_slaves; i++) { if (i == mlx4_master_func_num(dev)) continue; - mlx4_slave_event(dev, i, eqe); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + mlx4_slave_event(dev, i, eqe); } else /* IB port */ /* port-up event will be sent to a slave when the diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 2c97901c6a6d..569bbe3e7403 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -830,8 +830,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, u8 port_type; u16 short_field; int err; + int admin_link_state; #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 +#define MLX4_PORT_LINK_UP_MASK 0x80 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e @@ -861,6 +863,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, /* set port type to currently operating port type */ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); + admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; + if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) + port_type |= MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) + port_type &= ~MLX4_PORT_LINK_UP_MASK; + MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index df15bb6631cc..75272935a3f7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -482,6 +482,7 @@ struct mlx4_vport_state { u8 default_qos; u32 tx_rate; bool spoofchk; + u32 link_state; }; struct mlx4_vf_admin_state { diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index adf6e0648f20..8074a9711cf1 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -237,7 +237,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); - +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) -- cgit v1.2.3 From 72bb72b0d98847d22c6fae4e170121f3640f0f60 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 17 Jun 2013 13:47:25 -0700 Subject: tg3: Prevent system hang during repeated EEH errors. The current tg3 code assumes the pci_error_handlers to be always called in sequence. In particular, during ->error_detected(), NAPI is disabled and the device is shutdown. The device is later reset and NAPI re-enabled in ->slot_reset() and ->resume(). In EEH, if more than 6 errors are detected in a hour, only ->error_detected() will be called. This will leave the driver in an inconsistent state as NAPI is disabled but netif_running state is still true. When the device is later closed, we'll try to disable NAPI again and it will loop forever. We fix this by closing the device if we encounter any error conditions during the normal sequence of the pci_error_handlers. v2: Remove the changes in tg3_io_resume() based on Benjamin Poirier's feedback. Signed-off-by: Michael Chan Signed-off-by: Nithin Nayak Sujir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 28a645fc68bf..297fc138fa15 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17747,10 +17747,13 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_full_unlock(tp); done: - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + tg3_napi_enable(tp); + dev_close(netdev); err = PCI_ERS_RESULT_DISCONNECT; - else + } else { pci_disable_device(pdev); + } rtnl_unlock(); @@ -17796,6 +17799,10 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) rc = PCI_ERS_RESULT_RECOVERED; done: + if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + tg3_napi_enable(tp); + dev_close(netdev); + } rtnl_unlock(); return rc; -- cgit v1.2.3 From 32bc9b46d840d08e1c8f58eaf500d0e596c33659 Mon Sep 17 00:00:00 2001 From: Chris Healy Date: Mon, 17 Jun 2013 07:25:06 -0700 Subject: fec: Add support to restart autonegotiate Add ethtool operation to restart autonegotiation via the PHY. Tested on i.MX28EVK. Signed-off-by: Chris Healy Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c540055e4f4f..46f2544fd1a7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1435,6 +1435,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, return 0; } +static int fec_enet_nway_reset(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + static const struct ethtool_ops fec_enet_ethtool_ops = { .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, @@ -1443,6 +1454,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_drvinfo = fec_enet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = fec_enet_get_ts_info, + .nway_reset = fec_enet_nway_reset, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From 18f1d0541241855d928608ce6ce325e4cdad222d Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 14 Jun 2013 17:58:33 +0900 Subject: mv643xx_eth.c: convert skb->end into skb_end_poitner(skb) The change set of 4305b541 "[SK_BUFF]: Convert skb->end to sk_buff_data_t" converted skb->end from pointer to sk_buff_data_t. The pointed value should be accessed via skb_end_pointer(). Since arm or ppc arch doesn't define NET_SKBUFF_DATA_USES_OFFSET, skb->end is effectively pointer. So it doesn't cause a real problem. But this patch is good for consistency. Found by inspection. Compile test only. Cc: Simon Horman Cc: Lennert Buytenhek Signed-off-by: Isaku Yamahata Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index c7f9fb33ce95..510d50603a02 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -621,7 +621,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) rx_desc = rxq->rx_desc_area + rx; - size = skb->end - skb->data; + size = skb_end_pointer(skb) - skb->data; rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, size, DMA_FROM_DEVICE); -- cgit v1.2.3 From 511efbbbc8ff210dae7feaf263d7f855ea357cbe Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 14 Jun 2013 17:58:34 +0900 Subject: pxa168_eth: convert skb->end into skb_end_pointer(skb) The change set of 4305b541, "[SK_BUFF]: Convert skb->end to sk_buff_data_t" converted skb->end from pointer type to sk_buff_data_t. The pointed value should be accessed via skb_end_pointer(). Since arm arch doesn't define NET_SKBUFF_DATA_USES_OFFSET, skb->end is effectively pointer. So it doesn't cause a real problem. But this patch is good for consistency. Found by inspection. Compile tested only. Cc: Simon Horman Signed-off-by: Isaku Yamahata Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 2602cf7ba642..ec20508f0d6b 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -357,7 +357,7 @@ static void rxq_refill(struct net_device *dev) /* Get 'used' Rx descriptor */ used_rx_desc = pep->rx_used_desc_q; p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; - size = skb->end - skb->data; + size = skb_end_pointer(skb) - skb->data; p_used_rx_desc->buf_ptr = dma_map_single(NULL, skb->data, size, -- cgit v1.2.3 From 278cee0515a3b3abb0d4e614d969b5be35c2c288 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Sat, 1 Jun 2013 01:30:56 +0900 Subject: treewide: Fix typo in printk Correct spelling typo in printk within various drivers. Signed-off-by: Masanari Iida Acked-by: Randy Dunlap Signed-off-by: Jiri Kosina --- drivers/cpufreq/s3c2416-cpufreq.c | 2 +- drivers/net/ethernet/mellanox/mlx4/main.c | 2 +- drivers/net/ethernet/neterion/vxge/vxge-main.c | 2 +- drivers/net/wireless/ath/ath9k/Kconfig | 2 +- drivers/scsi/libiscsi_tcp.c | 2 +- drivers/scsi/pmcraid.c | 2 +- lib/Kconfig.kgdb | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 4f1881eee3f1..e594c6293463 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -205,7 +205,7 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx) ret = s3c2416_cpufreq_set_armdiv(s3c_freq, clk_get_rate(s3c_freq->hclk) / 1000); if (ret < 0) { - pr_err("cpufreq: Failed to to set the armdiv to %lukHz: %d\n", + pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n", clk_get_rate(s3c_freq->hclk) / 1000, ret); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 0d32a82458bf..c8595ec92142 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -98,7 +98,7 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" static bool enable_64b_cqe_eqe; module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, - "Enable 64 byte CQEs/EQEs when the the FW supports this"); + "Enable 64 byte CQEs/EQEs when the FW supports this"); #define HCA_GLOBAL_CAP_MASK 0 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index cbfaed5f2f8d..5a20eaf903dd 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3444,7 +3444,7 @@ static int vxge_device_register(struct __vxge_hw_device *hldev, } vxge_debug_init(vxge_hw_device_trace_level_get(hldev), - "%s : checksuming enabled", __func__); + "%s : checksumming enabled", __func__); if (high_dma) { ndev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index f3dc124c60c7..f4f3566fba2e 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -28,7 +28,7 @@ config ATH9K Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family of chipsets. For a specific list of supported external cards, laptops that already ship with these cards and - APs that come with these cards refer to to ath9k wiki + APs that come with these cards refer to ath9k wiki products page: http://wireless.kernel.org/en/users/Drivers/ath9k/products diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 552e8a2b6f5f..92deec5ed7d6 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -558,7 +558,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) if (!rc) { iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " "Target has sent more R2Ts than it " - "negotiated for or driver has has leaked.\n"); + "negotiated for or driver has leaked.\n"); return ISCSI_ERR_PROTO; } diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 8e1b73775065..1eb7b0280a45 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -6092,7 +6092,7 @@ static int __init pmcraid_init(void) if (IS_ERR(pmcraid_class)) { error = PTR_ERR(pmcraid_class); - pmcraid_err("failed to register with with sysfs, error = %x\n", + pmcraid_err("failed to register with sysfs, error = %x\n", error); goto out_unreg_chrdev; } diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 140e87824173..358eb81fa28d 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -64,8 +64,8 @@ config KGDB_LOW_LEVEL_TRAP default n help This will add an extra call back to kgdb for the breakpoint - exception handler on which will will allow kgdb to step - through a notify handler. + exception handler which will allow kgdb to step through a + notify handler. config KGDB_KDB bool "KGDB_KDB: include kdb frontend for kgdb" -- cgit v1.2.3 From 646093a29f85630d8efe2aa38fa585d2c3ea2e46 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 19 Jun 2013 10:32:32 +0800 Subject: bcm63xx_enet: fix return value check in bcm_enet_shared_probe() In case of error, the function devm_ioremap_resource() returns ERR_PTR() and never returns NULL. The NULL test in the return value check should be replaced with IS_ERR(). Introduce by commit 0ae99b5fede6f3a8d252d50bb4aba29544295219 (bcm63xx_enet: split DMA channel register accesses) Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 8f1ac023bb03..b1bcd4ba4744 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2862,8 +2862,8 @@ static int bcm_enet_shared_probe(struct platform_device *pdev) for (i = 0; i < 3; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); p[i] = devm_ioremap_resource(&pdev->dev, res); - if (!p[i]) - return -ENOMEM; + if (IS_ERR(p[i])) + return PTR_ERR(p[i]); } memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); -- cgit v1.2.3 From ea7d69e75ac06daa7bc2fca2a8317197e5e3c81c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 19 Jun 2013 23:29:23 +0400 Subject: sh_eth: define/use EESR_RX_CHECK macro sh_eth_interrupt() uses the same Rx interrupt mask twice to check the interrupt status register -- #define EESR_RX_CHECK and use it instead. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 13 ++----------- drivers/net/ethernet/renesas/sh_eth.h | 8 ++++++++ 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 8cb600ccaf1d..cd5987ecee4b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1504,23 +1504,14 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) */ intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; /* Clear interrupt */ - if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | - EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | - cd->tx_check | cd->eesr_err_check)) { + if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) { sh_eth_write(ndev, intr_status, EESR); ret = IRQ_HANDLED; } else goto other_irq; - if (intr_status & (EESR_FRC | /* Frame recv*/ - EESR_RMAF | /* Multi cast address recv*/ - EESR_RRF | /* Bit frame recv */ - EESR_RTLF | /* Long frame recv*/ - EESR_RTSF | /* short frame recv */ - EESR_PRE | /* PHY-LSI recv error */ - EESR_CERF)){ /* recv frame CRC error */ + if (intr_status & EESR_RX_CHECK) sh_eth_rx(ndev, intr_status); - } /* Tx Check */ if (intr_status & cd->tx_check) { diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 729e77e66f19..d05b60b826eb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -248,6 +248,14 @@ enum EESR_BIT { EESR_CERF = 0x00000001, }; +#define EESR_RX_CHECK (EESR_FRC | /* Frame recv */ \ + EESR_RMAF | /* Multicast address recv */ \ + EESR_RRF | /* Bit frame recv */ \ + EESR_RTLF | /* Long frame recv */ \ + EESR_RTSF | /* Short frame recv */ \ + EESR_PRE | /* PHY-LSI recv error */ \ + EESR_CERF) /* Recv frame CRC error */ + #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ EESR_RTO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ -- cgit v1.2.3 From 3719109d61ca96746c733538ec776d02a6952640 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 19 Jun 2013 23:30:23 +0400 Subject: sh_eth: add NAPI support The driver hasn't used NAPI so far; implement its support at last... The patch was tested on Renesas R8A77781 BOCK-W board. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 85 ++++++++++++++++++++++++++++++----- drivers/net/ethernet/renesas/sh_eth.h | 1 + 2 files changed, 74 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index cd5987ecee4b..5233edab2dd5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1244,7 +1244,7 @@ static int sh_eth_txfree(struct net_device *ndev) } /* Packet receive function */ -static int sh_eth_rx(struct net_device *ndev, u32 intr_status) +static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_rxdesc *rxdesc; @@ -1252,6 +1252,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) int entry = mdp->cur_rx % mdp->num_rx_ring; int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; struct sk_buff *skb; + int exceeded = 0; u16 pkt_len = 0; u32 desc_status; @@ -1263,6 +1264,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) if (--boguscnt < 0) break; + if (*quota <= 0) { + exceeded = 1; + break; + } + (*quota)--; + if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; @@ -1350,7 +1357,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status) sh_eth_write(ndev, EDRRR_R, EDRRR); } - return 0; + return exceeded; } static void sh_eth_rcv_snd_disable(struct net_device *ndev) @@ -1491,7 +1498,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - unsigned long intr_status; + unsigned long intr_status, intr_enable; spin_lock(&mdp->lock); @@ -1502,25 +1509,41 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) * and we need to fully handle it in sh_eth_error() in order to quench * it as it doesn't get cleared by just writing 1 to the ECI bit... */ - intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; - /* Clear interrupt */ - if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) { - sh_eth_write(ndev, intr_status, EESR); + intr_enable = sh_eth_read(ndev, EESIPR); + intr_status &= intr_enable | DMAC_M_ECI; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) ret = IRQ_HANDLED; - } else + else goto other_irq; - if (intr_status & EESR_RX_CHECK) - sh_eth_rx(ndev, intr_status); + if (intr_status & EESR_RX_CHECK) { + if (napi_schedule_prep(&mdp->napi)) { + /* Mask Rx interrupts */ + sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, + EESIPR); + __napi_schedule(&mdp->napi); + } else { + dev_warn(&ndev->dev, + "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", + intr_status, intr_enable); + } + } /* Tx Check */ if (intr_status & cd->tx_check) { + /* Clear Tx interrupts */ + sh_eth_write(ndev, intr_status & cd->tx_check, EESR); + sh_eth_txfree(ndev); netif_wake_queue(ndev); } - if (intr_status & cd->eesr_err_check) + if (intr_status & cd->eesr_err_check) { + /* Clear error interrupts */ + sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); + sh_eth_error(ndev, intr_status); + } other_irq: spin_unlock(&mdp->lock); @@ -1528,6 +1551,33 @@ other_irq: return ret; } +static int sh_eth_poll(struct napi_struct *napi, int budget) +{ + struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, + napi); + struct net_device *ndev = napi->dev; + int quota = budget; + unsigned long intr_status; + + for (;;) { + intr_status = sh_eth_read(ndev, EESR); + if (!(intr_status & EESR_RX_CHECK)) + break; + /* Clear Rx interrupts */ + sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); + + if (sh_eth_rx(ndev, intr_status, "a)) + goto out; + } + + napi_complete(napi); + + /* Reenable Rx interrupts */ + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +out: + return budget - quota; +} + /* PHY state control function */ static void sh_eth_adjust_link(struct net_device *ndev) { @@ -1839,6 +1889,8 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; + napi_enable(&mdp->napi); + return ret; out_free_irq: @@ -1934,6 +1986,8 @@ static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + napi_disable(&mdp->napi); + netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ @@ -2623,10 +2677,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev) } } + netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); + /* network device register */ ret = register_netdev(ndev); if (ret) - goto out_release; + goto out_napi_del; /* mdio bus init */ ret = sh_mdio_init(ndev, pdev->id, pd); @@ -2644,6 +2700,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev) out_unregister: unregister_netdev(ndev); +out_napi_del: + netif_napi_del(&mdp->napi); + out_release: /* net_dev free */ if (ndev) @@ -2656,9 +2715,11 @@ out: static int sh_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); sh_mdio_release(ndev); unregister_netdev(ndev); + netif_napi_del(&mdp->napi); pm_runtime_disable(&pdev->dev); free_netdev(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index d05b60b826eb..029744f77af6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -505,6 +505,7 @@ struct sh_eth_private { u32 cur_tx, dirty_tx; u32 rx_buf_sz; /* Based on MTU+slack. */ int edmac_endian; + struct napi_struct napi; /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ -- cgit v1.2.3 From 8f80899665c4baca5fe18e919a18db818c3e11fa Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 20 Jun 2013 02:22:56 +0400 Subject: sh_eth: remove 'tx_error_check' field of 'struct sh_eth_cpu_data' The 'tx_error_check' field of 'struct sh_eth_cpu_data' is write-only, so remove it along with the DEFAULT_TX_ERROR_CHECK macro. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 14 -------------- drivers/net/ethernet/renesas/sh_eth.h | 3 --- 2 files changed, 17 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5233edab2dd5..9a8b24e513ca 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -384,7 +384,6 @@ static struct sh_eth_cpu_data r8a777x_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, .mpr = 1, @@ -420,7 +419,6 @@ static struct sh_eth_cpu_data sh7724_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, .mpr = 1, @@ -457,7 +455,6 @@ static struct sh_eth_cpu_data sh7757_data = { .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, - .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .irq_flags = IRQF_SHARED, .apr = 1, @@ -527,8 +524,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .fdr_value = 0x0000072f, .rmcr_value = 0x00000001, @@ -587,8 +582,6 @@ static struct sh_eth_cpu_data sh7734_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .apr = 1, .mpr = 1, @@ -616,8 +609,6 @@ static struct sh_eth_cpu_data sh7763_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .apr = 1, .mpr = 1, @@ -655,8 +646,6 @@ static struct sh_eth_cpu_data r8a7740_data = { .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ EESR_ECI, - .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ - EESR_TFE, .apr = 1, .mpr = 1, @@ -706,9 +695,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) if (!cd->eesr_err_check) cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; - - if (!cd->tx_error_check) - cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; } static int sh_eth_check_reset(struct net_device *ndev) diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 029744f77af6..87c7ae9e10cc 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -261,8 +261,6 @@ enum EESR_BIT { #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE | EESR_ECI) -#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ - EESR_TFE) /* EESIPR */ enum DMAC_IM_BIT { @@ -468,7 +466,6 @@ struct sh_eth_cpu_data { /* interrupt checking mask */ unsigned long tx_check; unsigned long eesr_err_check; - unsigned long tx_error_check; /* hardware features */ unsigned long irq_flags; /* IRQ configuration flags */ -- cgit v1.2.3 From a80c3de714bc4855747faf5f00e8203fb800e6bb Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 20 Jun 2013 02:24:54 +0400 Subject: sh_eth: remove redundant bits from 'eesipr_value' field initializer For SH7724 'eesipr_value' field initializer includes DMAC_M_RFRMER & DMAC_M_ECI bits which are already contained in 0x01ff009f -- remove them. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 9a8b24e513ca..4b4c586d2c4f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -414,7 +414,7 @@ static struct sh_eth_cpu_data sh7724_data = { .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, + .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | -- cgit v1.2.3 From c8bbe37aa60a3ef694df65fed4e905bd4b1bd73c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 20 Jun 2013 02:26:14 +0400 Subject: sh_eth: cleanup 'enum TD_STS_BIT' Fix the comment to 'enum TD_STS_BIT', reformat the values, and add a couple of values missing before (though unused by the driver). Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 87c7ae9e10cc..7540dc35fdb8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -302,11 +302,11 @@ enum FCFTR_BIT { #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) -/* Transfer descriptor bit */ +/* Transmit descriptor bit */ enum TD_STS_BIT { - TD_TACT = 0x80000000, - TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000, - TD_TFP0 = 0x10000000, + TD_TACT = 0x80000000, TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, TD_TWBI = 0x04000000, }; #define TDF1ST TD_TFP1 #define TDFEND TD_TFP0 -- cgit v1.2.3 From ac8025a643a0e0beb81f3f37ca693364c6b77858 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 13 Jun 2013 22:12:45 +0400 Subject: sh_eth: get R8A7740 Rx descriptor word 0 shift out of #ifdef The only R8A7740 specific #ifdef hindering ARM multiplatform build is left in sh_eth_rx(): it covers the code shifting Rx buffer descriptor word 0 by 16. Get rid of the #ifdef by adding 'shift_rd0' field to the 'struct sh_eth_cpu_data', making the shift dependent on it, and setting it to 1 for the R8A7740 case... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 6 +++--- drivers/net/ethernet/renesas/sh_eth.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4b4c586d2c4f..7732f11f14ad 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -656,6 +656,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .no_ade = 1, .tsu = 1, .select_mii = 1, + .shift_rd0 = 1, }; static struct sh_eth_cpu_data sh7619_data = { @@ -1259,7 +1260,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++; -#if defined(CONFIG_ARCH_R8A7740) /* * In case of almost all GETHER/ETHERs, the Receive Frame State * (RFS) bits in the Receive Descriptor 0 are from bit 9 to @@ -1267,8 +1267,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) * bits are from bit 25 to bit 16. So, the driver needs right * shifting by 16. */ - desc_status >>= 16; -#endif + if (mdp->cd->shift_rd0) + desc_status >>= 16; if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 7540dc35fdb8..a78fb0c424f8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -481,6 +481,7 @@ struct sh_eth_cpu_data { unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ unsigned hw_crc:1; /* E-DMAC have CSMR */ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ + unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ }; struct sh_eth_private { -- cgit v1.2.3 From 9e77a2b837bbd7197da966f0915e8f1ddb2ca850 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 18 Jun 2013 16:18:27 +0300 Subject: net/mlx4_en: Add Low Latency Socket (LLS) support Add basic support for LLS. Signed-off-by: Amir Vadai Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 3 + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 43 ++++++++- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 15 ++- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 121 +++++++++++++++++++++++++ 4 files changed, 178 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 1e6c594d6d04..3e2d5047cdb3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -139,6 +139,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (!cq->is_tx) { netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); + napi_hash_add(&cq->napi); napi_enable(&cq->napi); } @@ -162,6 +163,8 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { if (!cq->is_tx) { napi_disable(&cq->napi); + napi_hash_del(&cq->napi); + synchronize_rcu(); netif_napi_del(&cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index ade276cca0e6..ab9ec91d1f70 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -67,6 +68,30 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +static int mlx4_en_low_latency_recv(struct napi_struct *napi) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; + int done; + + if (!priv->port_up) + return LL_FLUSH_FAILED; + + if (!mlx4_en_cq_lock_poll(cq)) + return LL_FLUSH_BUSY; + + done = mlx4_en_process_rx_cq(dev, cq, 4); + + mlx4_en_cq_unlock_poll(cq); + + return done; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -1445,6 +1470,8 @@ int mlx4_en_start_port(struct net_device *dev) for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; + mlx4_en_cq_init_lock(cq); + err = mlx4_en_activate_cq(priv, cq, i); if (err) { en_err(priv, "Failed activating Rx CQ\n"); @@ -1694,10 +1721,19 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { + struct mlx4_en_cq *cq = &priv->rx_cq[i]; + + local_bh_disable(); + while (!mlx4_en_cq_lock_napi(cq)) { + pr_info("CQ %d locked\n", i); + mdelay(1); + } + local_bh_enable(); + mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); - while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) + while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) msleep(1); - mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); + mlx4_en_deactivate_cq(priv, cq); } /* close port*/ @@ -2090,6 +2126,9 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_ll_poll = mlx4_en_low_latency_recv, +#endif }; static const struct net_device_ops mlx4_netdev_ops_master = { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 02aee1ebd203..9c57581b021c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -31,6 +31,7 @@ * */ +#include #include #include #include @@ -656,8 +657,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * - DIX Ethernet (type interpretation) * - TCP/IP (v4) * - without IP options - * - not an IP fragment */ - if (dev->features & NETIF_F_GRO) { + * - not an IP fragment + * - no LLS polling in progress + */ + if (!mlx4_en_cq_ll_polling(cq) && + (dev->features & NETIF_F_GRO)) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) goto next; @@ -737,6 +741,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud timestamp); } + skb_mark_ll(skb, &cq->napi); + /* Push it up the stack */ netif_receive_skb(skb); @@ -781,8 +787,13 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int done; + if (!mlx4_en_cq_lock_napi(cq)) + return budget; + done = mlx4_en_process_rx_cq(dev, cq, budget); + mlx4_en_cq_unlock_napi(cq); + /* If we used up all the quota - we're probably not done yet... */ if (done == budget) INC_PERF_COUNTER(priv->pstats.napi_quota); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index b1f51c1f635c..11c862e4e69d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -310,6 +310,19 @@ struct mlx4_en_cq { u16 moder_cnt; struct mlx4_cqe *buf; #define MLX4_EN_OPCODE_ERROR 0x1e + +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define MLX4_EN_CQ_STATE_IDLE 0 +#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ +#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ +#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) +#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ +#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ +#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) +#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) + spinlock_t poll_lock; /* protects from LLS/napi conflicts */ +#endif /* CONFIG_NET_LL_RX_POLL */ }; struct mlx4_en_port_profile { @@ -562,6 +575,114 @@ struct mlx4_mac_entry { struct rcu_head rcu; }; +#ifdef CONFIG_NET_LL_RX_POLL +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ + spin_lock_init(&cq->poll_lock); + cq->state = MLX4_EN_CQ_STATE_IDLE; +} + +/* called from the device poll rutine to get ownership of a cq */ +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock(&cq->poll_lock); + if (cq->state & MLX4_CQ_LOCKED) { + WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); + cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; + rc = false; + } else + /* we don't care if someone yielded */ + cq->state = MLX4_EN_CQ_STATE_NAPI; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* returns true is someone tried to get the cq while napi had it */ +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | + MLX4_EN_CQ_STATE_NAPI_YIELD)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* called from mlx4_en_low_latency_poll() */ +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock_bh(&cq->poll_lock); + if ((cq->state & MLX4_CQ_LOCKED)) { + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; + + cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; + rc = false; + } else + /* preserve yield marks */ + cq->state |= MLX4_EN_CQ_STATE_POLL; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* returns true if someone tried to get the cq while it was locked */ +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock_bh(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +{ + WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); + return cq->state & CQ_USER_PEND; +} +#else +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ +} + +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + return true; +} + +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) void mlx4_en_update_loopback_state(struct net_device *dev, -- cgit v1.2.3 From 8501841a4483e678ebd1b7872019621244d0098a Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 18 Jun 2013 16:18:28 +0300 Subject: net/mlx4_en: Low Latency recv statistics Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 20 +++++++++++++++++++- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 ++++ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 6 ++++++ 3 files changed, 29 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c9e6b62dd000..727874f575ce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -222,7 +222,12 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + - (priv->tx_ring_num + priv->rx_ring_num) * 2; + (priv->tx_ring_num * 2) + +#ifdef CONFIG_NET_LL_RX_POLL + (priv->rx_ring_num * 5); +#else + (priv->rx_ring_num * 2); +#endif case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -271,6 +276,11 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i].packets; data[index++] = priv->rx_ring[i].bytes; +#ifdef CONFIG_NET_LL_RX_POLL + data[index++] = priv->rx_ring[i].yields; + data[index++] = priv->rx_ring[i].misses; + data[index++] = priv->rx_ring[i].cleaned; +#endif } spin_unlock_bh(&priv->stats_lock); @@ -334,6 +344,14 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); +#ifdef CONFIG_NET_LL_RX_POLL + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_napi_yield", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_misses", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_cleaned", i); +#endif } break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index ab9ec91d1f70..7299ada876c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -85,6 +85,10 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi) return LL_FLUSH_BUSY; done = mlx4_en_process_rx_cq(dev, cq, 4); + if (likely(done)) + rx_ring->cleaned += done; + else + rx_ring->misses++; mlx4_en_cq_unlock_poll(cq); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 11c862e4e69d..57192a8f1d5e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -290,6 +290,11 @@ struct mlx4_en_rx_ring { void *rx_info; unsigned long bytes; unsigned long packets; +#ifdef CONFIG_NET_LL_RX_POLL + unsigned long yields; + unsigned long misses; + unsigned long cleaned; +#endif unsigned long csum_ok; unsigned long csum_none; int hwtstamp_rx_filter; @@ -625,6 +630,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; rc = false; + rx_ring->yields++; } else /* preserve yield marks */ cq->state |= MLX4_EN_CQ_STATE_POLL; -- cgit v1.2.3 From 8f20aa575c0a69ccbdce325818f2b3878bfed61c Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Wed, 19 Jun 2013 01:36:04 +0300 Subject: bnx2x: add support for ndo_ll_poll Adds ndo_ll_poll method and locking for FPs between LL and the napi. When receiving a packet we use skb_mark_ll to record the napi it came from. Add each napi to the napi_hash right after netif_napi_add(). Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 125 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 67 ++++++++++-- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 23 ++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 + 4 files changed, 208 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index f76597e5fa55..edda716fdce4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -485,6 +485,21 @@ struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ struct napi_struct napi; + +#ifdef CONFIG_NET_LL_RX_POLL + unsigned int state; +#define BNX2X_FP_STATE_IDLE 0 +#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ +#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) +#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) + /* protect state */ + spinlock_t lock; +#endif /* CONFIG_NET_LL_RX_POLL */ + union host_hc_status_block status_blk; /* chip independent shortcuts into sb structure */ __le16 *sb_index_values; @@ -557,6 +572,116 @@ struct bnx2x_fastpath { #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) +#ifdef CONFIG_NET_LL_RX_POLL +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ + spin_lock_init(&fp->lock); + fp->state = BNX2X_FP_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a FP */ +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock(&fp->lock); + if (fp->state & BNX2X_FP_LOCKED) { + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + fp->state |= BNX2X_FP_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + fp->state = BNX2X_FP_STATE_NAPI; + } + spin_unlock(&fp->lock); + return rc; +} + +/* returns true is someone tried to get the FP while napi had it */ +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock(&fp->lock); + WARN_ON(fp->state & + (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + fp->state = BNX2X_FP_STATE_IDLE; + spin_unlock(&fp->lock); + return rc; +} + +/* called from bnx2x_low_latency_poll() */ +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = true; + + spin_lock_bh(&fp->lock); + if ((fp->state & BNX2X_FP_LOCKED)) { + fp->state |= BNX2X_FP_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + fp->state |= BNX2X_FP_STATE_POLL; + } + spin_unlock_bh(&fp->lock); + return rc; +} + +/* returns true if someone tried to get the FP while it was locked */ +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + bool rc = false; + + spin_lock_bh(&fp->lock); + WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); + + if (fp->state & BNX2X_FP_STATE_POLL_YIELD) + rc = true; + fp->state = BNX2X_FP_STATE_IDLE; + spin_unlock_bh(&fp->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + return fp->state & BNX2X_FP_USER_PEND; +} +#else +static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) +{ +} + +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + return true; +} + +static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + return false; +} +#endif /* CONFIG_NET_LL_RX_POLL */ + /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4e42bdd7c522..20eefa6809e6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "bnx2x_cmn.h" #include "bnx2x_init.h" @@ -999,8 +1000,13 @@ reuse_rx: PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - napi_gro_receive(&fp->napi, skb); + skb_mark_ll(skb, &fp->napi); + + if (bnx2x_fp_ll_polling(fp)) + netif_receive_skb(skb); + else + napi_gro_receive(&fp->napi, skb); next_rx: rx_buf->data = NULL; @@ -1755,32 +1761,46 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } static void bnx2x_napi_enable(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { + bnx2x_fp_init_lock(&bp->fp[i]); napi_enable(&bnx2x_fp(bp, i, napi)); + } } static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + local_bh_disable(); + for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_lock_napi(&bp->fp[i])) + mdelay(1); + } + local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + local_bh_disable(); + for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_lock_napi(&bp->fp[i])) + mdelay(1); + } + local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) @@ -3039,6 +3059,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return 0; } #endif + if (!bnx2x_fp_lock_napi(fp)) + return work_done; for_each_cos_in_tx_queue(fp, cos) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) @@ -3048,12 +3070,15 @@ int bnx2x_poll(struct napi_struct *napi, int budget) work_done += bnx2x_rx_int(fp, budget - work_done); /* must not complete if we consumed full budget */ - if (work_done >= budget) + if (work_done >= budget) { + bnx2x_fp_unlock_napi(fp); break; + } } /* Fall out from the NAPI loop if needed */ - if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + if (!bnx2x_fp_unlock_napi(fp) && + !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { /* No need to update SB for FCoE L2 ring as long as * it's connected to the default SB and the SB @@ -3095,6 +3120,34 @@ int bnx2x_poll(struct napi_struct *napi, int budget) return work_done; } +#ifdef CONFIG_NET_LL_RX_POLL +/* must be called with local_bh_disable()d */ +int bnx2x_low_latency_recv(struct napi_struct *napi) +{ + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + int found = 0; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR) || + (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) + return LL_FLUSH_FAILED; + + if (!bnx2x_fp_lock_poll(fp)) + return LL_FLUSH_BUSY; + + if (bnx2x_has_rx_work(fp)) { + bnx2x_update_fpsb_idx(fp); + found = bnx2x_rx_int(fp, 4); + } + + bnx2x_fp_unlock_poll(fp); + + return found; +} +#endif + /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 650bb52155a8..a1a5cdca114c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -604,6 +604,13 @@ int bnx2x_enable_msi(struct bnx2x *bp); */ int bnx2x_poll(struct napi_struct *napi, int budget); +/** + * bnx2x_low_latency_recv - LL callback + * + * @napi: napi structure + */ +int bnx2x_low_latency_recv(struct napi_struct *napi); + /** * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure * @@ -846,9 +853,11 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_add_all_napi(struct bnx2x *bp) @@ -856,25 +865,31 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) { int i; - for_each_rx_queue_cnic(bp, i) + for_each_rx_queue_cnic(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_eth_queue(bp, i) + for_each_eth_queue(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); netif_napi_del(&bnx2x_fp(bp, i, napi)); + } } int bnx2x_set_int_mode(struct bnx2x *bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 658b9fd0275f..e2e870568455 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12013,6 +12013,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, #endif + +#ifdef CONFIG_NET_LL_RX_POLL + .ndo_ll_poll = bnx2x_low_latency_recv, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) -- cgit v1.2.3 From 75b2945988274078bccf4c0b84e90c77b4fcaf96 Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Wed, 19 Jun 2013 01:36:05 +0300 Subject: bnx2x: replace mechanism to check for next available packet Check next packet availability by validating that HW has finished CQE placement. This saves latency of another dma transaction performed to update SB indexes. Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 49 ++++++++++-------------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 26 +++++++++---- drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 3 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 1 - 4 files changed, 42 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 20eefa6809e6..ca7f2bb08f44 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -804,40 +804,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { struct bnx2x *bp = fp->bp; u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; - u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; + u16 sw_comp_cons, sw_comp_prod; int rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return 0; #endif - /* CQ "next element" is of the size of the regular element, - that's why it's ok here */ - hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); - if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - hw_comp_cons++; - bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_comp_cons = fp->rx_comp_cons; sw_comp_prod = fp->rx_comp_prod; - /* Memory barrier necessary as speculative reads of the rx - * buffer can be ahead of the index in the status block - */ - rmb(); + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; DP(NETIF_MSG_RX_STATUS, - "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - fp->index, hw_comp_cons, sw_comp_cons); + "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); - while (sw_comp_cons != hw_comp_cons) { + while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { struct sw_rx_bd *rx_buf = NULL; struct sk_buff *skb; - union eth_rx_cqe *cqe; - struct eth_fast_path_rx_cqe *cqe_fp; u8 cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; u16 len, pad, queue; @@ -849,12 +841,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) return 0; #endif - comp_ring_cons = RCQ_BD(sw_comp_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); - cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; @@ -1018,8 +1007,15 @@ next_cqe: sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + /* mark CQE as free */ + BNX2X_SEED_CQE(cqe_fp); + if (rx_pkt == budget) break; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; } /* while */ fp->rx_bd_cons = bd_cons; @@ -1055,8 +1051,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) #endif /* Handle Rx and Tx according to MSI-X vector */ - prefetch(fp->rx_cons_sb); - for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); @@ -3137,10 +3131,8 @@ int bnx2x_low_latency_recv(struct napi_struct *napi) if (!bnx2x_fp_lock_poll(fp)) return LL_FLUSH_BUSY; - if (bnx2x_has_rx_work(fp)) { - bnx2x_update_fpsb_idx(fp); + if (bnx2x_has_rx_work(fp)) found = bnx2x_rx_int(fp, 4); - } bnx2x_fp_unlock_poll(fp); @@ -4339,10 +4331,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) &bnx2x_fp(bp, index, rx_desc_mapping), sizeof(struct eth_rx_bd) * NUM_RX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), - &bnx2x_fp(bp, index, rx_comp_mapping), - sizeof(struct eth_fast_path_rx_cqe) * - NUM_RCQ_BD); + /* Seed all CQEs by 1s */ + BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), + &bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); /* SGE ring */ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a1a5cdca114c..c07a6d054cfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -59,6 +59,16 @@ extern int int_mode; (unsigned long long)(*y), x); \ } while (0) +#define BNX2X_PCI_FALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0xFFFFFFFF, size); \ + DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ + (unsigned long long)(*y), x); \ + } while (0) + #define BNX2X_ALLOC(x, size) \ do { \ x = kzalloc(size, GFP_KERNEL); \ @@ -805,16 +815,18 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) return false; } +#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0) +#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF) static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) { - u16 rx_cons_sb; + u16 cons; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; - /* Tell compiler that status block fields can change */ - barrier(); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; - return (fp->rx_comp_cons != rx_cons_sb); + cons = RCQ_BD(fp->rx_comp_cons); + cqe = &fp->rx_comp_ring[cons]; + cqe_fp = &cqe->fast_path_cqe; + return BNX2X_IS_CQE_COMPLETED(cqe_fp); } /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5ef3f964e544..5018e52ae2ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3818,7 +3818,8 @@ struct eth_fast_path_rx_cqe { __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[8]; + __le32 reserved1[7]; + u32 marker; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e2e870568455..f0d21faf71ec 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1866,7 +1866,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); if (status & mask) { /* Handle Rx or Tx according to SB id */ - prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) prefetch(fp->txdata_ptr[cos]->tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); -- cgit v1.2.3 From 257a3feb3f144783184adb2df930331cbe36ff25 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Fri, 14 Jun 2013 15:54:51 +0530 Subject: be2net: use pci_vfs_assigned()/pci_num_vf() instead of be_find_vfs() be_find_vfs() is no longer needed as the common PCI calls provide the same functionality. Signed-off-by: Sathya Perla Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 31 ++++------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 98efc29eaa55..cd69ac79f565 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1262,30 +1262,6 @@ static int be_set_vf_tx_rate(struct net_device *netdev, return status; } -static int be_find_vfs(struct be_adapter *adapter, int vf_state) -{ - struct pci_dev *dev, *pdev = adapter->pdev; - int vfs = 0, assigned_vfs = 0, pos; - u16 offset, stride; - - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); - if (!pos) - return 0; - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); - pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride); - - dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL); - while (dev) { - if (dev->is_virtfn && pci_physfn(dev) == pdev) { - vfs++; - if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) - assigned_vfs++; - } - dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev); - } - return (vf_state == ASSIGNED) ? assigned_vfs : vfs; -} - static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) { struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); @@ -2797,7 +2773,7 @@ static void be_vf_clear(struct be_adapter *adapter) struct be_vf_cfg *vf_cfg; u32 vf; - if (be_find_vfs(adapter, ASSIGNED)) { + if (pci_vfs_assigned(adapter->pdev)) { dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs: not disabling VFs\n"); goto done; @@ -2899,7 +2875,7 @@ static int be_vf_setup(struct be_adapter *adapter) int status, old_vfs, vf; struct device *dev = &adapter->pdev->dev; - old_vfs = be_find_vfs(adapter, ENABLED); + old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { dev_info(dev, "%d VFs are already enabled\n", old_vfs); if (old_vfs != num_vfs) @@ -4200,9 +4176,10 @@ reschedule: schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } +/* If any VFs are already enabled don't FLR the PF */ static bool be_reset_required(struct be_adapter *adapter) { - return be_find_vfs(adapter, ENABLED) > 0 ? false : true; + return pci_num_vf(adapter->pdev) ? false : true; } static char *mc_name(struct be_adapter *adapter) -- cgit v1.2.3 From a1606c7dc64d8449676d7e840dd2cd0c4e0a0c57 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Jun 2013 03:24:51 +0100 Subject: net: Move MII out from under NET_CORE and hide it All drivers that select MII also need to select NET_CORE because MII depends on it. This is a bit ridiculous because NET_CORE is just a menu option that doesn't enable any code by itself. There is also no need for it to be a visible option, since its users all select it. Signed-off-by: Ben Hutchings Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- arch/cris/arch-v10/drivers/Kconfig | 1 - arch/cris/arch-v32/drivers/Kconfig | 1 - drivers/net/Kconfig | 10 +++------- drivers/net/ethernet/3com/Kconfig | 1 - drivers/net/ethernet/Kconfig | 4 ---- drivers/net/ethernet/adaptec/Kconfig | 1 - drivers/net/ethernet/adi/Kconfig | 1 - drivers/net/ethernet/allwinner/Kconfig | 1 - drivers/net/ethernet/amd/Kconfig | 2 -- drivers/net/ethernet/atheros/Kconfig | 4 ---- drivers/net/ethernet/broadcom/Kconfig | 2 -- drivers/net/ethernet/cirrus/Kconfig | 1 - drivers/net/ethernet/davicom/Kconfig | 1 - drivers/net/ethernet/dec/tulip/Kconfig | 1 - drivers/net/ethernet/dlink/Kconfig | 1 - drivers/net/ethernet/faraday/Kconfig | 1 - drivers/net/ethernet/freescale/fs_enet/Kconfig | 1 - drivers/net/ethernet/icplus/Kconfig | 1 - drivers/net/ethernet/intel/Kconfig | 1 - drivers/net/ethernet/micrel/Kconfig | 4 ---- drivers/net/ethernet/nuvoton/Kconfig | 1 - drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | 1 - drivers/net/ethernet/packetengines/Kconfig | 1 - drivers/net/ethernet/rdc/Kconfig | 1 - drivers/net/ethernet/realtek/Kconfig | 3 --- drivers/net/ethernet/renesas/Kconfig | 1 - drivers/net/ethernet/sgi/Kconfig | 1 - drivers/net/ethernet/sis/Kconfig | 2 -- drivers/net/ethernet/smsc/Kconfig | 5 ----- drivers/net/ethernet/stmicro/stmmac/Kconfig | 1 - drivers/net/ethernet/via/Kconfig | 2 -- drivers/net/usb/Kconfig | 4 ---- drivers/staging/silicom/Kconfig | 1 - 33 files changed, 3 insertions(+), 61 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig index 5f2cdb3e428c..8eab0c61d57b 100644 --- a/arch/cris/arch-v10/drivers/Kconfig +++ b/arch/cris/arch-v10/drivers/Kconfig @@ -4,7 +4,6 @@ config ETRAX_ETHERNET bool "Ethernet support" depends on ETRAX_ARCH_V10 select ETHERNET - select NET_CORE select MII help This option enables the ETRAX 100LX built-in 10/100Mbit Ethernet diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig index c55971a40c34..91c4e54ca904 100644 --- a/arch/cris/arch-v32/drivers/Kconfig +++ b/arch/cris/arch-v32/drivers/Kconfig @@ -4,7 +4,6 @@ config ETRAX_ETHERNET bool "Ethernet support" depends on ETRAX_ARCH_V32 select ETHERNET - select NET_CORE select MII help This option enables the ETRAX FS built-in 10/100Mbit Ethernet diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 3835321b8cf3..00aba08f01a9 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -25,6 +25,9 @@ menuconfig NETDEVICES # that for each of the symbols. if NETDEVICES +config MII + tristate + config NET_CORE default y bool "Network core driver support" @@ -100,13 +103,6 @@ config NET_FC adaptor below. You also should have said Y to "SCSI support" and "SCSI generic support". -config MII - tristate "Generic Media Independent Interface device support" - help - Most ethernet controllers have MII transceiver either as an external - or internal device. It is safe to say Y or M here even if your - ethernet card lacks MII. - config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 1c71c763f680..f00c76377b44 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -67,7 +67,6 @@ config PCMCIA_3C589 config VORTEX tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" depends on (PCI || EISA) && HAS_IOPORT - select NET_CORE select MII ---help--- This option enables driver support for a large number of 10Mbps and diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 18fd6fbeb109..a989669b353a 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -64,7 +64,6 @@ config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the PCI-Express gigabit ethernet adapters @@ -96,7 +95,6 @@ config FEALNX tristate "Myson MTD-8xx PCI Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- Say Y here to support the Myson MTD-800 family of PCI-based Ethernet @@ -107,7 +105,6 @@ source "drivers/net/ethernet/8390/Kconfig" config NET_NETX tristate "NetX Ethernet support" - select NET_CORE select MII depends on ARCH_NETX ---help--- @@ -125,7 +122,6 @@ source "drivers/net/ethernet/oki-semi/Kconfig" config ETHOC tristate "OpenCores 10/100 Mbps Ethernet MAC support" depends on HAS_IOMEM && HAS_DMA - select NET_CORE select MII select PHYLIB select CRC32 diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig index 0bff571b1bb3..5c804bbe3dab 100644 --- a/drivers/net/ethernet/adaptec/Kconfig +++ b/drivers/net/ethernet/adaptec/Kconfig @@ -22,7 +22,6 @@ config ADAPTEC_STARFIRE tristate "Adaptec Starfire/DuraLAN support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig index a9481606bbcd..f952fff6a9a9 100644 --- a/drivers/net/ethernet/adi/Kconfig +++ b/drivers/net/ethernet/adi/Kconfig @@ -23,7 +23,6 @@ config BFIN_MAC tristate "Blackfin on-chip MAC support" depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) select CRC32 - select NET_CORE select MII select PHYLIB select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig index 66d35324f31e..53ad213e865b 100644 --- a/drivers/net/ethernet/allwinner/Kconfig +++ b/drivers/net/ethernet/allwinner/Kconfig @@ -24,7 +24,6 @@ config SUN4I_EMAC depends on ARCH_SUNXI depends on OF select CRC32 - select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 13d74aa4033d..562df46e0a82 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -34,7 +34,6 @@ config AMD8111_ETH tristate "AMD 8111 (new PCI LANCE) support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- If you have an AMD 8111-based PCI LANCE ethernet card, @@ -60,7 +59,6 @@ config PCNET32 tristate "AMD PCnet32 PCI support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- If you have a PCnet32 or PCnetPCI based network (Ethernet) card, diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index ad6aa1e98348..39e8d6cc8843 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -22,7 +22,6 @@ config ATL2 tristate "Atheros L2 Fast Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros L2 fast ethernet adapter. @@ -34,7 +33,6 @@ config ATL1 tristate "Atheros/Attansic L1 Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros/Attansic L1 gigabit ethernet @@ -47,7 +45,6 @@ config ATL1E tristate "Atheros L1E Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros L1E gigabit ethernet adapter. @@ -59,7 +56,6 @@ config ATL1C tristate "Atheros L1C Gigabit Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver supports the Atheros L1C gigabit ethernet adapter. diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 3e69b3f88099..1d680baf43d6 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -22,7 +22,6 @@ config B44 tristate "Broadcom 440x/47xx ethernet support" depends on SSB_POSSIBLE && HAS_DMA select SSB - select NET_CORE select MII ---help--- If you have a network (Ethernet) controller of this type, say Y @@ -54,7 +53,6 @@ config B44_PCI config BCM63XX_ENET tristate "Broadcom 63xx internal mac support" depends on BCM63XX - select NET_CORE select MII select PHYLIB help diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 8388e36cf08f..7403dff8f14a 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -44,7 +44,6 @@ config CS89x0_PLATFORM config EP93XX_ETH tristate "EP93xx Ethernet support" depends on ARM && ARCH_EP93XX - select NET_CORE select MII help This is a driver for the ethernet hardware included in EP93xx CPUs. diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 9745fe5e8039..316c5e5a92ad 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -6,7 +6,6 @@ config DM9000 tristate "DM9000 support" depends on ARM || BLACKFIN || MIPS || COLDFIRE select CRC32 - select NET_CORE select MII ---help--- Support for DM9000 chipset. diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 1df33c799c00..eb9ba6e97d04 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -126,7 +126,6 @@ config WINBOND_840 tristate "Winbond W89c840 Ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver is for the Winbond W89c840 chip. It also works with diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig index ee26ce78e270..c543ac11ce08 100644 --- a/drivers/net/ethernet/dlink/Kconfig +++ b/drivers/net/ethernet/dlink/Kconfig @@ -36,7 +36,6 @@ config SUNDANCE tristate "Sundance Alta support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver is for the Sundance "Alta" chip. diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index b8974b9e3b47..5918c6891694 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -21,7 +21,6 @@ if NET_VENDOR_FARADAY config FTMAC100 tristate "Faraday FTMAC100 10/100 Ethernet support" depends on ARM - select NET_CORE select MII ---help--- This driver supports the FTMAC100 10/100 Ethernet controller diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index 268414d9f2cb..be92229f2c2a 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -1,7 +1,6 @@ config FS_ENET tristate "Freescale Ethernet Driver" depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) - select NET_CORE select MII select PHYLIB diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig index 5119ef18953b..14a66e9d2e26 100644 --- a/drivers/net/ethernet/icplus/Kconfig +++ b/drivers/net/ethernet/icplus/Kconfig @@ -5,7 +5,6 @@ config IP1000 tristate "IP1000 Gigabit Ethernet support" depends on PCI - select NET_CORE select MII ---help--- This driver supports IP1000 gigabit Ethernet cards. diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 05f7264c51f7..f0e7ed20a750 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -20,7 +20,6 @@ if NET_VENDOR_INTEL config E100 tristate "Intel(R) PRO/100+ support" depends on PCI - select NET_CORE select MII ---help--- This driver supports Intel(R) PRO/100 family of adapters. diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index fe42fc00d8d3..d16b11ed2e52 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -22,7 +22,6 @@ if NET_VENDOR_MICREL config ARM_KS8695_ETHER tristate "KS8695 Ethernet support" depends on ARM && ARCH_KS8695 - select NET_CORE select MII ---help--- If you wish to compile a kernel for the KS8695 and want to @@ -39,7 +38,6 @@ config KS8842 config KS8851 tristate "Micrel KS8851 SPI" depends on SPI - select NET_CORE select MII select CRC32 select EEPROM_93CX6 @@ -49,7 +47,6 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM - select NET_CORE select MII ---help--- This platform driver is for Micrel KS8851 Address/data bus @@ -58,7 +55,6 @@ config KS8851_MLL config KSZ884X_PCI tristate "Micrel KSZ8841/2 PCI" depends on PCI - select NET_CORE select MII select CRC32 ---help--- diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig index 334c17183095..01182b559473 100644 --- a/drivers/net/ethernet/nuvoton/Kconfig +++ b/drivers/net/ethernet/nuvoton/Kconfig @@ -22,7 +22,6 @@ config W90P910_ETH tristate "Nuvoton w90p910 Ethernet support" depends on ARM && ARCH_W90X900 select PHYLIB - select NET_CORE select MII ---help--- Say Y here if you want to use built-in Ethernet ports diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 34d05bf72b2e..cb22341a14a8 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -5,7 +5,6 @@ config PCH_GBE tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" depends on PCI - select NET_CORE select MII select PTP_1588_CLOCK_PCH ---help--- diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index cbbeca3f8c5c..8d5180043c70 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -21,7 +21,6 @@ if NET_PACKET_ENGINE config HAMACHI tristate "Packet Engines Hamachi GNIC-II support" depends on PCI - select NET_CORE select MII ---help--- If you have a Gigabit Ethernet card of this type, say Y and read diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig index c8ba4b3494c1..2055f7eb2ba9 100644 --- a/drivers/net/ethernet/rdc/Kconfig +++ b/drivers/net/ethernet/rdc/Kconfig @@ -22,7 +22,6 @@ config R6040 tristate "RDC R6040 Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 783fa8b5cde7..ae5d027096ed 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -37,7 +37,6 @@ config 8139CP tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -52,7 +51,6 @@ config 8139TOO tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -107,7 +105,6 @@ config R8169 depends on PCI select FW_LOADER select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 267eac054fa4..544514e66187 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -5,7 +5,6 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" select CRC32 - select NET_CORE select MII select MDIO_BITBANG select PHYLIB diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig index c1c4bb868a3b..e832f46660c9 100644 --- a/drivers/net/ethernet/sgi/Kconfig +++ b/drivers/net/ethernet/sgi/Kconfig @@ -22,7 +22,6 @@ config SGI_IOC3_ETH bool "SGI IOC3 Ethernet" depends on PCI && SGI_IP27 select CRC32 - select NET_CORE select MII ---help--- If you have a network (Ethernet) card of this type, say Y and read diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig index f1135cc1bd48..68d052b09af1 100644 --- a/drivers/net/ethernet/sis/Kconfig +++ b/drivers/net/ethernet/sis/Kconfig @@ -22,7 +22,6 @@ config SIS900 tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This is a driver for the Fast Ethernet PCI network cards based on @@ -39,7 +38,6 @@ config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index ff9e99474039..068fc44d37e1 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -37,7 +37,6 @@ config SMC9194 config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 - select NET_CORE select MII depends on (ARM || M32R || SUPERH || MIPS || BLACKFIN || \ MN10300 || COLDFIRE || ARM64) @@ -57,7 +56,6 @@ config PCMCIA_SMC91C92 tristate "SMC 91Cxx PCMCIA support" depends on PCMCIA select CRC32 - select NET_CORE select MII ---help--- Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA @@ -70,7 +68,6 @@ config EPIC100 tristate "SMC EtherPower II" depends on PCI select CRC32 - select NET_CORE select MII ---help--- This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, @@ -81,7 +78,6 @@ config EPIC100 config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 - select NET_CORE select MII depends on (ARM || SUPERH || MN10300) ---help--- @@ -99,7 +95,6 @@ config SMSC911X tristate "SMSC LAN911x/LAN921x families embedded ethernet support" depends on HAS_IOMEM select CRC32 - select NET_CORE select MII select PHYLIB ---help--- diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 43c1f3223322..6e52c0f74cd9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,7 +1,6 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" depends on HAS_IOMEM && HAS_DMA - select NET_CORE select MII select PHYLIB select CRC32 diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 6a87097d88c0..8a049a2b4474 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -21,7 +21,6 @@ config VIA_RHINE tristate "VIA Rhine support" depends on PCI select CRC32 - select NET_CORE select MII ---help--- If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A), @@ -47,7 +46,6 @@ config VIA_VELOCITY depends on (PCI || USE_OF) select CRC32 select CRC_CCITT - select NET_CORE select MII ---help--- If you have a VIA "Velocity" based network card say Y here. diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 287cc624b90b..d84bfd4109a4 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -67,7 +67,6 @@ config USB_KAWETH config USB_PEGASUS tristate "USB Pegasus/Pegasus-II based ethernet device support" - select NET_CORE select MII ---help--- Say Y here if you know you have Pegasus or Pegasus-II based adapter. @@ -83,7 +82,6 @@ config USB_PEGASUS config USB_RTL8150 tristate "USB RTL8150 based ethernet device support" - select NET_CORE select MII help Say Y here if you have RTL8150 based usb-ethernet adapter. @@ -95,7 +93,6 @@ config USB_RTL8150 config USB_RTL8152 tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" - select NET_CORE select MII help This option adds support for Realtek RTL8152 based USB 2.0 @@ -106,7 +103,6 @@ config USB_RTL8152 config USB_USBNET tristate "Multi-purpose USB Networking Framework" - select NET_CORE select MII ---help--- This driver supports several kinds of network links over USB, diff --git a/drivers/staging/silicom/Kconfig b/drivers/staging/silicom/Kconfig index eda2e7d73645..fc082db628ec 100644 --- a/drivers/staging/silicom/Kconfig +++ b/drivers/staging/silicom/Kconfig @@ -32,7 +32,6 @@ config BPCTL depends on PCI && NET depends on m select SBYPASS - select NET_CORE select MII ---help--- If you have a network (Ethernet) controller of this type, say Y -- cgit v1.2.3 From d6cf7a86ce50ab9aded00a01ba55b6cd6b87209d Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Jun 2013 03:27:29 +0100 Subject: at91_ether: Do not select NET_CORE This has no dependency on any of the drivers under NET_CORE. Signed-off-by: Ben Hutchings Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 768285ec10f4..8030cc0396fd 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -23,7 +23,6 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" depends on GENERIC_HARDIRQS && HAS_DMA - select NET_CORE select MACB ---help--- If you wish to compile a kernel for the AT91RM9200 and enable -- cgit v1.2.3 From b8a39dd292af453f42ebcdbad229f7d9d1282ec2 Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Tue, 18 Jun 2013 16:05:39 +0800 Subject: Bnx2x: remove redundant D0 power state set Pci_enable_device() will set device power state to D0, so it's no need to do it again in bnx2x_init_dev(). Also remove redundant PM Cap find code, because pci core has been saved the pci device pm cap value. Signed-off-by: Yijing Wang Cc: Eilon Greenstein Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Acked-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index f0d21faf71ec..c962d66a1a79 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12091,7 +12091,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, } if (IS_PF(bp)) { - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + bp->pm_cap = pdev->pm_cap; if (bp->pm_cap == 0) { dev_err(&bp->pdev->dev, "Cannot find power management capability, aborting\n"); @@ -12140,8 +12140,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, } BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); - bnx2x_set_power_state(bp, PCI_D0); - /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); -- cgit v1.2.3 From f9c7da5eaf10fa1d7a91b83aefc6fb8e3a4ad39c Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Tue, 18 Jun 2013 16:06:37 +0800 Subject: amd8111e: use pdev->pm_cap instead of pci_find_capability(.., PCI_CAP_ID_PM) Pci core has been saved pm cap register offset by pdev->pm_cap in pci_pm_init() in init path. So we can use pdev->pm_cap instead of using pci_find_capability(pdev, PCI_CAP_ID_PM) for better performance and simplified code. Signed-off-by: Yijing Wang Cc: "David S. Miller" Cc: Patrick McHardy Cc: Bill Pemberton Cc: Greg Kroah-Hartman Cc: netdev@vger.kernel.org (open list:NETWORKING DRIVERS) Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index bc71aec1159d..1b1429d5d5c2 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1813,7 +1813,7 @@ static const struct net_device_ops amd8111e_netdev_ops = { static int amd8111e_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int err,i,pm_cap; + int err, i; unsigned long reg_addr,reg_len; struct amd8111e_priv* lp; struct net_device* dev; @@ -1842,7 +1842,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, pci_set_master(pdev); /* Find power-management capability. */ - if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){ + if (!pdev->pm_cap) { printk(KERN_ERR "amd8111e: No Power Management capability, " "exiting.\n"); err = -ENODEV; @@ -1875,7 +1875,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, lp = netdev_priv(dev); lp->pci_dev = pdev; lp->amd8111e_net_dev = dev; - lp->pm_cap = pm_cap; + lp->pm_cap = pdev->pm_cap; spin_lock_init(&lp->lock); -- cgit v1.2.3 From 85768271315bd368c86a0783bf0c85019afbabec Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Tue, 18 Jun 2013 16:12:37 +0800 Subject: bnx2: use pdev->pm_cap instead of pci_find_capability(.., PCI_CAP_ID_PM) Pci core has been saved pm cap register offset by pdev->pm_cap in pci_pm_init() in init path. So we can use pdev->pm_cap instead of using pci_find_capability(pdev, PCI_CAP_ID_PM) for better performance and simplified code. Signed-off-by: Yijing Wang Cc: Michael Chan Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 1a1b23eb13dc..6a2de1d79ff6 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8104,7 +8104,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) pci_set_master(pdev); - bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + bp->pm_cap = pdev->pm_cap; if (bp->pm_cap == 0) { dev_err(&pdev->dev, "Cannot find power management capability, aborting\n"); -- cgit v1.2.3 From b6b4316c8b2fa6af5cee71e7defd09527b9d1cf9 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Sat, 22 Jun 2013 04:12:00 -0400 Subject: qlcnic: Handle qlcnic_alloc_mbx_args() failure qlcnic_alloc_mbx_args() may fail due to failure in memory allocation. This patch checks for failure of qlcnic_alloc_mbx_args() to avoid potential invalid memory access. Signed-off-by: Shahed Shaikh Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 133 ++++++++++++++++----- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 6 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 125 ++++++++++++++----- .../net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 6 +- 4 files changed, 206 insertions(+), 64 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f63a69570256..88e2e23498d3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -844,7 +844,9 @@ void qlcnic_83xx_idc_aen_work(struct work_struct *work) int i, err = 0; adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); + if (err) + return; for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++) cmd.req.arg[i] = adapter->ahw->mbox_aen[i]; @@ -1085,8 +1087,10 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) cap |= QLC_83XX_FW_CAP_LRO_MSS; /* set mailbox hdr and capabilities */ - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_CREATE_RX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CREATE_RX_CTX); + if (err) + return err; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); @@ -1244,7 +1248,9 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, mbx.intr_id = 0xffff; mbx.src = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + if (err) + return err; if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) cmd.req.arg[0] |= (0x3 << 29); @@ -1390,8 +1396,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, if (state) { /* Get LED configuration */ - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_GET_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_LED_CONFIG); + if (status) + return status; + status = qlcnic_issue_cmd(adapter, &cmd); if (status) { dev_err(&adapter->pdev->dev, @@ -1405,8 +1414,11 @@ int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, /* Set LED Configuration */ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) | LSW(QLC_83XX_LED_CONFIG); - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_SET_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + if (status) + return status; + cmd.req.arg[1] = mbx_in; cmd.req.arg[2] = mbx_in; cmd.req.arg[3] = mbx_in; @@ -1423,8 +1435,11 @@ mbx_err: } else { /* Restoring default LED configuration */ - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_SET_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + if (status) + return status; + cmd.req.arg[1] = adapter->ahw->mbox_reg[0]; cmd.req.arg[2] = adapter->ahw->mbox_reg[1]; cmd.req.arg[3] = adapter->ahw->mbox_reg[2]; @@ -1494,10 +1509,18 @@ void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, return; if (enable) { - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_INIT_NIC_FUNC); + if (status) + return; + cmd.req.arg[1] = BIT_0 | BIT_31; } else { - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_STOP_NIC_FUNC); + if (status) + return; + cmd.req.arg[1] = BIT_0 | BIT_31; } status = qlcnic_issue_cmd(adapter, &cmd); @@ -1514,7 +1537,10 @@ int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter) struct qlcnic_cmd_args cmd; int err; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); + if (err) + return err; + cmd.req.arg[1] = adapter->ahw->port_config; err = qlcnic_issue_cmd(adapter, &cmd); if (err) @@ -1528,7 +1554,10 @@ int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter) struct qlcnic_cmd_args cmd; int err; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); + if (err) + return err; + err = qlcnic_issue_cmd(adapter, &cmd); if (err) dev_info(&adapter->pdev->dev, "Get Port config failed\n"); @@ -1544,7 +1573,10 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable) u32 temp; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); + if (err) + return err; + temp = adapter->recv_ctx->context_id << 16; cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp; err = qlcnic_issue_cmd(adapter, &cmd); @@ -1575,7 +1607,11 @@ int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return -EIO; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); + if (err) + return err; + qlcnic_83xx_set_interface_id_promisc(adapter, &temp); cmd.req.arg[1] = (mode ? 1 : 0) | temp; err = qlcnic_issue_cmd(adapter, &cmd); @@ -1762,7 +1798,11 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, u32 temp = 0, temp_ip; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_IP_ADDR); + if (err) + return; + qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); if (mode == QLCNIC_IP_UP) @@ -1801,7 +1841,10 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); + if (err) + return err; + temp = adapter->recv_ctx->context_id << 16; arg1 = lro_bit_mask | temp; cmd.req.arg[1] = arg1; @@ -1823,8 +1866,9 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); - + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); + if (err) + return err; /* * RSS request: * bits 3-0: Rsvd @@ -1930,7 +1974,10 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + if (err) + return err; + qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd); err = qlcnic_issue_cmd(adapter, &cmd); @@ -1961,7 +2008,10 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter) if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) return; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + if (err) + return; + if (coal->type == QLCNIC_INTR_COAL_TYPE_RX) { temp = adapter->recv_ctx->context_id; cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16; @@ -2033,7 +2083,10 @@ int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable) return err; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH); + if (err) + return err; + cmd.req.arg[1] = (port & 0xf) | BIT_4; err = qlcnic_issue_cmd(adapter, &cmd); @@ -2061,7 +2114,10 @@ int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter, return err; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + cmd.req.arg[1] = (nic->pci_func << 16); cmd.req.arg[2] = 0x1 << 16; cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16); @@ -2093,7 +2149,10 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, u8 op = 0; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + return err; + if (func_id != adapter->ahw->pci_func) { temp = func_id << 16; cmd.req.arg[1] = op | BIT_31 | temp; @@ -2140,7 +2199,10 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, int i, err = 0, j = 0; u32 temp; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + if (err) + return err; + err = qlcnic_issue_cmd(adapter, &cmd); ahw->act_pci_func = 0; @@ -2195,7 +2257,10 @@ int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) struct qlcnic_cmd_args cmd; max_ints = adapter->ahw->num_msix - 1; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); + if (err) + return err; + cmd.req.arg[1] = max_ints; if (qlcnic_sriov_vf_check(adapter)) @@ -2823,7 +2888,11 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) dev_info(&adapter->pdev->dev, "link state down\n"); return config; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); + if (err) + return err; + err = qlcnic_issue_cmd(adapter, &cmd); if (err) { dev_info(&adapter->pdev->dev, @@ -3049,7 +3118,9 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) struct net_device *netdev = adapter->netdev; int ret = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); + if (ret) + return; /* Get Tx stats */ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16); cmd.rsp.num = QLC_83XX_TX_STAT_REGS; @@ -3139,7 +3210,9 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) goto fail_diag_irq; ahw->diag_cnt = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) + goto fail_diag_irq; if (adapter->flags & QLCNIC_MSIX_ENABLED) intrpt_id = ahw->intr_tbl[0].id; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index aa26250d7374..f073c08de91f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2083,7 +2083,11 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_STOP_NIC_FUNC); + if (status) + return; + cmd.req.arg[1] = BIT_31; status = qlcnic_issue_cmd(adapter, &cmd); if (status) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 9d0ae11589ce..72593a19ad59 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -194,7 +194,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, _QLCNIC_LINUX_SUBVERSION); - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); + if (err) + return err; + memcpy(&arg1, drv_string, sizeof(u32)); memcpy(&arg2, drv_string + 4, sizeof(u32)); memcpy(&arg3, drv_string + 8, sizeof(u32)); @@ -222,7 +225,10 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) return err; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); + if (err) + return err; + cmd.req.arg[1] = recv_ctx->context_id; cmd.req.arg[2] = mtu; @@ -336,7 +342,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) } phys_addr = hostrq_phys_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + if (err) + goto out_free_rsp; + cmd.req.arg[1] = MSD(phys_addr); cmd.req.arg[2] = LSD(phys_addr); cmd.req.arg[3] = rq_size; @@ -374,10 +383,10 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) recv_ctx->context_id = le16_to_cpu(prsp->context_id); recv_ctx->virt_port = prsp->virt_port; + qlcnic_free_mbx_args(&cmd); out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, - cardrsp_phys_addr); - qlcnic_free_mbx_args(&cmd); + cardrsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); return err; @@ -389,7 +398,10 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) struct qlcnic_cmd_args cmd; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); + if (err) + return; + cmd.req.arg[1] = recv_ctx->context_id; err = qlcnic_issue_cmd(adapter, &cmd); if (err) @@ -458,7 +470,10 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, phys_addr = rq_phys_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + if (err) + goto out_free_rsp; + cmd.req.arg[1] = MSD(phys_addr); cmd.req.arg[2] = LSD(phys_addr); cmd.req.arg[3] = rq_size; @@ -474,12 +489,13 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, err = -EIO; } + qlcnic_free_mbx_args(&cmd); + +out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, rsp_phys_addr); - out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); - qlcnic_free_mbx_args(&cmd); return err; } @@ -488,8 +504,11 @@ void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { struct qlcnic_cmd_args cmd; + int ret; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + if (ret) + return; cmd.req.arg[1] = tx_ring->ctx_id; if (qlcnic_issue_cmd(adapter, &cmd)) @@ -504,7 +523,10 @@ qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) int err; struct qlcnic_cmd_args cmd; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); + if (err) + return err; + cmd.req.arg[1] = config; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); @@ -708,7 +730,10 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + if (err) + return err; + cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; err = qlcnic_issue_cmd(adapter, &cmd); @@ -747,7 +772,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, nic_info = nic_info_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + goto out_free_dma; + cmd.req.arg[1] = MSD(nic_dma_t); cmd.req.arg[2] = LSD(nic_dma_t); cmd.req.arg[3] = (func_id << 16 | nic_size); @@ -769,9 +797,10 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); } + qlcnic_free_mbx_args(&cmd); +out_free_dma: dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, nic_dma_t); - qlcnic_free_mbx_args(&cmd); return err; } @@ -808,7 +837,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + goto out_free_dma; + cmd.req.arg[1] = MSD(nic_dma_t); cmd.req.arg[2] = LSD(nic_dma_t); cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); @@ -820,9 +852,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, err = -EIO; } - dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, - nic_dma_t); qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); return err; } @@ -846,7 +879,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, return -ENOMEM; npar = pci_info_addr; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + if (err) + goto out_free_dma; + cmd.req.arg[1] = MSD(pci_info_dma_t); cmd.req.arg[2] = LSD(pci_info_dma_t); cmd.req.arg[3] = pci_size; @@ -874,9 +910,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, err = -EIO; } + qlcnic_free_mbx_args(&cmd); +out_free_dma: dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, pci_info_dma_t); - qlcnic_free_mbx_args(&cmd); return err; } @@ -897,7 +934,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, arg1 = id | (enable_mirroring ? BIT_4 : 0); arg1 |= pci_func << 8; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_PORTMIRRORING); + if (err) + return err; + cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); @@ -941,7 +982,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 |= rx_tx << 15 | stats_size << 16; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_STATS); + if (err) + goto out_free_dma; + cmd.req.arg[1] = arg1; cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[3] = LSD(stats_dma_t); @@ -963,9 +1008,10 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, esw_stats->numbytes = le64_to_cpu(stats->numbytes); } - dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, - stats_dma_t); qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); return err; } @@ -989,7 +1035,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, if (!stats_addr) return -ENOMEM; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); + if (err) + goto out_free_dma; + cmd.req.arg[1] = stats_size << 16; cmd.req.arg[2] = MSD(stats_dma_t); cmd.req.arg[3] = LSD(stats_dma_t); @@ -1020,11 +1069,12 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, "%s: Get mac stats failed, err=%d.\n", __func__, err); } - dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, - stats_dma_t); - qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + return err; } @@ -1108,7 +1158,11 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; arg1 |= BIT_14 | rx_tx << 15; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_STATS); + if (err) + return err; + cmd.req.arg[1] = arg1; err = qlcnic_issue_cmd(adapter, &cmd); qlcnic_free_mbx_args(&cmd); @@ -1127,10 +1181,13 @@ static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, struct device *dev = &adapter->pdev->dev; struct qlcnic_cmd_args cmd; u8 pci_func = *arg1 >> 8; - int err = -EIO; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); + if (err) + return err; - qlcnic_alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); cmd.req.arg[1] = *arg1; err = qlcnic_issue_cmd(adapter, &cmd); *arg1 = cmd.rsp.arg[1]; @@ -1208,7 +1265,11 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, return err; } - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_ESWITCH); + if (err) + return err; + cmd.req.arg[1] = arg1; cmd.req.arg[2] = arg2; err = qlcnic_issue_cmd(adapter, &cmd); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index f67652de5a63..700a46324d09 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -846,7 +846,9 @@ static int qlcnic_irq_test(struct net_device *netdev) goto clear_diag_irq; ahw->diag_cnt = 0; - qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) + goto free_diag_res; cmd.req.arg[1] = ahw->pci_func; ret = qlcnic_issue_cmd(adapter, &cmd); @@ -858,6 +860,8 @@ static int qlcnic_irq_test(struct net_device *netdev) done: qlcnic_free_mbx_args(&cmd); + +free_diag_res: qlcnic_diag_free_res(netdev, max_sds_rings); clear_diag_irq: -- cgit v1.2.3 From 52e493d01cbf85871f0d1fdaeffdf376444e5d50 Mon Sep 17 00:00:00 2001 From: Jitendra Kalsaria Date: Sat, 22 Jun 2013 04:12:01 -0400 Subject: qlcnic: Secondary unicast MAC address support. Add support for configuring secondary unicast address which will use existing HW filters to store all the unicast MAC addresses and prevent device going into promiscuous mode. Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 8 ++++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 17 +++++++++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 3 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 34 ++++++++++------- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 43 +++++++++++++++++----- 6 files changed, 83 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 534e36ed855a..28a12765794d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -426,6 +426,7 @@ struct qlcnic_hardware_context { u8 nic_mode; char diag_cnt; + u16 max_uc_count; u16 port_type; u16 board_type; u16 supported_type; @@ -1494,6 +1495,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t); int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); +void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); int qlcnic_enable_msix(struct qlcnic_adapter *, u32); /* eSwitch management functions */ @@ -1631,6 +1633,7 @@ struct qlcnic_hardware_ops { int (*config_promisc_mode) (struct qlcnic_adapter *, u32); void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); int (*get_board_info) (struct qlcnic_adapter *); + void (*set_mac_filter_count) (struct qlcnic_adapter *); void (*free_mac_list) (struct qlcnic_adapter *); }; @@ -1846,6 +1849,11 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) return adapter->ahw->hw_ops->free_mac_list(adapter); } +static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->set_mac_filter_count(adapter); +} + static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, u32 key) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 88e2e23498d3..ab664107ac9b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -172,6 +172,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { .config_promisc_mode = qlcnic_83xx_nic_set_promisc, .change_l2_filter = qlcnic_83xx_change_l2_filter, .get_board_info = qlcnic_83xx_get_port_info, + .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, }; @@ -609,6 +610,22 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) return status; } +void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 act_pci_fn = ahw->act_pci_func; + u16 count; + + ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT; + if (act_pci_fn <= 2) + count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) / + act_pci_fn; + else + count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) / + act_pci_fn; + ahw->max_uc_count = count; +} + void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter) { u32 val; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 1bfe283a9412..e821ee47324a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -393,6 +393,8 @@ enum qlcnic_83xx_states { #define QLC_83XX_LB_MAX_FILTERS 2048 #define QLC_83XX_LB_BUCKET_SIZE 256 #define QLC_83XX_MINIMUM_VECTOR 3 +#define QLC_83XX_MAX_MC_COUNT 38 +#define QLC_83XX_MAX_UC_COUNT 4096 #define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) #define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) @@ -624,4 +626,5 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); +void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h index c0f0c0d0a790..d262211b03b3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -672,6 +672,7 @@ enum { #define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10 #define QLCNIC_MAX_MC_COUNT 38 +#define QLCNIC_MAX_UC_COUNT 512 #define QLCNIC_WATCHDOG_TIMEOUTVALUE 5 #define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 218978db2963..e7f305d5bb5f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -499,6 +499,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan) void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff @@ -515,25 +516,30 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) if (netdev->flags & IFF_PROMISC) { if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) mode = VPORT_MISS_MODE_ACCEPT_ALL; - goto send_fw_cmd; - } - - if ((netdev->flags & IFF_ALLMULTI) || - (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) { - mode = VPORT_MISS_MODE_ACCEPT_MULTI; - goto send_fw_cmd; + } else if (netdev->flags & IFF_ALLMULTI) { + if (netdev_mc_count(netdev) > ahw->max_mc_count) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + } else if (!netdev_mc_empty(netdev) && + !qlcnic_sriov_vf_check(adapter)) { + netdev_for_each_mc_addr(ha, netdev) + qlcnic_nic_add_mac(adapter, ha->addr, + vlan); + } + if (mode != VPORT_MISS_MODE_ACCEPT_MULTI && + qlcnic_sriov_vf_check(adapter)) + qlcnic_vf_add_mc_list(netdev, vlan); } - if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) { - netdev_for_each_mc_addr(ha, netdev) { + /* configure unicast MAC address, if there is not sufficient space + * to store all the unicast addresses then enable promiscuous mode + */ + if (netdev_uc_count(netdev) > ahw->max_uc_count) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + } else if (!netdev_uc_empty(netdev)) { + netdev_for_each_uc_addr(ha, netdev) qlcnic_nic_add_mac(adapter, ha->addr, vlan); - } } - if (qlcnic_sriov_vf_check(adapter)) - qlcnic_vf_add_mc_list(netdev, vlan); - -send_fw_cmd: if (!qlcnic_sriov_vf_check(adapter)) { if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0ae88355ad51..8e1453ac0035 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -360,12 +360,15 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_del(ndm, tb, netdev, addr); if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = qlcnic_nic_del_mac(adapter, addr); - else if (is_multicast_ether_addr(addr)) + if (is_unicast_ether_addr(addr)) { + err = dev_uc_del(netdev, addr); + if (!err) + err = qlcnic_nic_del_mac(adapter, addr); + } else if (is_multicast_ether_addr(addr)) { err = dev_mc_del(netdev, addr); - else + } else { err = -EINVAL; + } } return err; } @@ -388,12 +391,16 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (ether_addr_equal(addr, adapter->mac_addr)) return err; - if (is_unicast_ether_addr(addr)) - err = qlcnic_nic_add_mac(adapter, addr, 0); - else if (is_multicast_ether_addr(addr)) + if (is_unicast_ether_addr(addr)) { + if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count) + err = dev_uc_add_excl(netdev, addr); + else + err = -ENOMEM; + } else if (is_multicast_ether_addr(addr)) { err = dev_mc_add_excl(netdev, addr); - else + } else { err = -EINVAL; + } return err; } @@ -505,6 +512,7 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { .config_promisc_mode = qlcnic_82xx_nic_set_promisc, .change_l2_filter = qlcnic_82xx_change_filter, .get_board_info = qlcnic_82xx_get_board_info, + .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, .free_mac_list = qlcnic_82xx_free_mac_list, }; @@ -1829,6 +1837,22 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter) return err; } +void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 act_pci_fn = ahw->act_pci_func; + u16 count; + + ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; + if (act_pci_fn <= 2) + count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) / + act_pci_fn; + else + count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) / + act_pci_fn; + ahw->max_uc_count = count; +} + int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int pci_using_dac) @@ -1838,7 +1862,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, adapter->rx_csum = 1; adapter->ahw->mc_enabled = 0; - adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; + qlcnic_set_mac_filter_count(adapter); netdev->netdev_ops = &qlcnic_netdev_ops; netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ; @@ -1876,6 +1900,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->features |= NETIF_F_LRO; netdev->hw_features = netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; err = register_netdev(netdev); -- cgit v1.2.3 From 2c4a787847c243b725a7bac510d3ebaa35c90fab Mon Sep 17 00:00:00 2001 From: Jitendra Kalsaria Date: Sat, 22 Jun 2013 04:12:02 -0400 Subject: qlcnic: Minimize sleep duration within loopback diagnostic test. o Minimize sleep duration and check for adapter status. o Exit from loopback test if adapter reset is detected. Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 57 +++++++++++++++------- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 3 +- 2 files changed, 41 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ab664107ac9b..1c463dd562e2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1676,13 +1676,19 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) /* Poll for link up event before running traffic */ do { - msleep(500); + msleep(QLC_83XX_LB_MSLEEP_COUNT); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_process_aen(adapter); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - dev_info(&adapter->pdev->dev, - "Firmware didn't sent link up event to loopback request\n"); + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + ret = -EIO; + goto free_diag_res; + } + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_info(netdev, + "Firmware didn't sent link up event to loopback request\n"); ret = -QLCNIC_FW_NOT_RESPOND; qlcnic_83xx_clear_lb_mode(adapter, mode); goto free_diag_res; @@ -1711,6 +1717,7 @@ fail_diag_alloc: int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; int status = 0, loop = 0; u32 config; @@ -1728,9 +1735,9 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) status = qlcnic_83xx_set_port_config(adapter); if (status) { - dev_err(&adapter->pdev->dev, - "Failed to Set Loopback Mode = 0x%x.\n", - ahw->port_config); + netdev_err(netdev, + "Failed to Set Loopback Mode = 0x%x.\n", + ahw->port_config); ahw->port_config = config; clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return status; @@ -1738,13 +1745,19 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { - msleep(300); + msleep(QLC_83XX_LB_MSLEEP_COUNT); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_process_aen(adapter); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - dev_err(&adapter->pdev->dev, - "FW did not generate IDC completion AEN\n"); + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EIO; + } + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_err(netdev, + "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); qlcnic_83xx_clear_lb_mode(adapter, mode); return -EIO; @@ -1759,6 +1772,7 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) { struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; int status = 0, loop = 0; u32 config = ahw->port_config; @@ -1770,9 +1784,9 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) status = qlcnic_83xx_set_port_config(adapter); if (status) { - dev_err(&adapter->pdev->dev, - "Failed to Clear Loopback Mode = 0x%x.\n", - ahw->port_config); + netdev_err(netdev, + "Failed to Clear Loopback Mode = 0x%x.\n", + ahw->port_config); ahw->port_config = config; clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return status; @@ -1780,13 +1794,20 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) /* Wait for Link and IDC Completion AEN */ do { - msleep(300); + msleep(QLC_83XX_LB_MSLEEP_COUNT); if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) qlcnic_83xx_process_aen(adapter); - if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { - dev_err(&adapter->pdev->dev, - "Firmware didn't sent IDC completion AEN\n"); + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EIO; + } + + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_err(netdev, + "Did not receive IDC completion AEN\n"); clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); return -EIO; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index e821ee47324a..e356ee87a9a6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -36,7 +36,8 @@ #define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3 #define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200 #define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3 - +#define QLC_83XX_LB_WAIT_COUNT 250 +#define QLC_83XX_LB_MSLEEP_COUNT 20 #define QLC_83XX_NO_NIC_RESOURCE 0x5 #define QLC_83XX_MAC_PRESENT 0xC #define QLC_83XX_MAC_ABSENT 0xD -- cgit v1.2.3 From 9baf1aa9c4c7a7fe52886634c3646fabaa48aa39 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Sat, 22 Jun 2013 04:12:03 -0400 Subject: qlcnic: Add support for PEX DMA method to read memory section of adapter dump This patch adds support to read memory section of adapter dump using PEX DMA method. This method significantly improves total adapter dump collection time. Signed-off-by: Shahed Shaikh Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 3 + .../net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | 225 ++++++++++++++++++++- 2 files changed, 221 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 28a12765794d..cc2c2c13f6a0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -393,6 +393,9 @@ struct qlcnic_fw_dump { u32 size; /* total size of the dump */ void *data; /* dump data area */ struct qlcnic_dump_template_hdr *tmpl_hdr; + dma_addr_t phys_addr; + void *dma_buffer; + bool use_pex_dma; }; /* diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 4b9bab18ebd9..ab8a6744d402 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -15,6 +15,7 @@ #define QLC_83XX_MINIDUMP_FLASH 0x520000 #define QLC_83XX_OCM_INDEX 3 #define QLC_83XX_PCI_INDEX 0 +#define QLC_83XX_DMA_ENGINE_INDEX 8 static const u32 qlcnic_ms_read_data[] = { 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC @@ -32,6 +33,16 @@ static const u32 qlcnic_ms_read_data[] = { #define QLCNIC_DUMP_MASK_MAX 0xff +struct qlcnic_pex_dma_descriptor { + u32 read_data_size; + u32 dma_desc_cmd; + u32 src_addr_low; + u32 src_addr_high; + u32 dma_bus_addr_low; + u32 dma_bus_addr_high; + u32 rsvd[6]; +} __packed; + struct qlcnic_common_entry_hdr { u32 type; u32 offset; @@ -90,7 +101,10 @@ struct __ocm { } __packed; struct __mem { - u8 rsvd[24]; + u32 desc_card_addr; + u32 dma_desc_cmd; + u32 start_dma_cmd; + u32 rsvd[3]; u32 addr; u32 size; } __packed; @@ -466,12 +480,12 @@ skip_poll: return l2->no_ops * l2->read_addr_num * sizeof(u32); } -static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, - struct qlcnic_dump_entry *entry, __le32 *buffer) +static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, + struct __mem *mem, __le32 *buffer, + int *ret) { - u32 addr, data, test, ret = 0; + u32 addr, data, test; int i, reg_read; - struct __mem *mem = &entry->region.mem; reg_read = mem->size; addr = mem->addr; @@ -480,7 +494,8 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, dev_info(&adapter->pdev->dev, "Unaligned memory addr:0x%x size:0x%x\n", addr, reg_read); - return -EINVAL; + *ret = -EINVAL; + return 0; } mutex_lock(&adapter->ahw->mem_lock); @@ -499,7 +514,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, if (printk_ratelimit()) { dev_err(&adapter->pdev->dev, "failed to read through agent\n"); - ret = -EINVAL; + *ret = -EIO; goto out; } } @@ -516,6 +531,181 @@ out: return mem->size; } +/* DMA register base address */ +#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000)) + +/* DMA register offsets w.r.t base address */ +#define QLC_DMA_CMD_BUFF_ADDR_LOW 0 +#define QLC_DMA_CMD_BUFF_ADDR_HI 4 +#define QLC_DMA_CMD_STATUS_CTRL 8 + +#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16) + +static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, + struct __mem *mem) +{ + struct qlcnic_dump_template_hdr *tmpl_hdr; + struct device *dev = &adapter->pdev->dev; + u32 dma_no, dma_base_addr, temp_addr; + int i, ret, dma_sts; + + tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; + dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; + dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); + + temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; + ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, + mem->desc_card_addr); + if (ret) + return ret; + + temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; + ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0); + if (ret) + return ret; + + temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; + ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, + mem->start_dma_cmd); + if (ret) + return ret; + + /* Wait for DMA to complete */ + temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; + for (i = 0; i < 400; i++) { + dma_sts = qlcnic_ind_rd(adapter, temp_addr); + + if (dma_sts & BIT_1) + usleep_range(250, 500); + else + break; + } + + if (i >= 400) { + dev_info(dev, "PEX DMA operation timed out"); + ret = -EIO; + } + + return ret; +} + +static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter, + struct __mem *mem, + __le32 *buffer, int *ret) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 temp, dma_base_addr, size = 0, read_size = 0; + struct qlcnic_pex_dma_descriptor *dma_descr; + struct qlcnic_dump_template_hdr *tmpl_hdr; + struct device *dev = &adapter->pdev->dev; + dma_addr_t dma_phys_addr; + void *dma_buffer; + + tmpl_hdr = fw_dump->tmpl_hdr; + + /* Check if DMA engine is available */ + temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX]; + dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); + temp = qlcnic_ind_rd(adapter, + dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); + + if (!(temp & BIT_31)) { + dev_info(dev, "%s: DMA engine is not available\n", __func__); + *ret = -EIO; + return 0; + } + + /* Create DMA descriptor */ + dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor), + GFP_KERNEL); + if (!dma_descr) { + *ret = -ENOMEM; + return 0; + } + + /* dma_desc_cmd 0:15 = 0 + * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3 + * dma_desc_cmd 20:23 = pci function number + * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15 + */ + dma_phys_addr = fw_dump->phys_addr; + dma_buffer = fw_dump->dma_buffer; + temp = 0; + temp = mem->dma_desc_cmd & 0xff0f; + temp |= (adapter->ahw->pci_func & 0xf) << 4; + dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000; + dma_descr->dma_bus_addr_low = LSD(dma_phys_addr); + dma_descr->dma_bus_addr_high = MSD(dma_phys_addr); + dma_descr->src_addr_high = 0; + + /* Collect memory dump using multiple DMA operations if required */ + while (read_size < mem->size) { + if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE) + size = QLC_PEX_DMA_READ_SIZE; + else + size = mem->size - read_size; + + dma_descr->src_addr_low = mem->addr + read_size; + dma_descr->read_data_size = size; + + /* Write DMA descriptor to MS memory*/ + temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; + *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr, + (u32 *)dma_descr, temp); + if (*ret) { + dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", + mem->desc_card_addr); + goto free_dma_descr; + } + + *ret = qlcnic_start_pex_dma(adapter, mem); + if (*ret) { + dev_info(dev, "Failed to start PEX DMA operation\n"); + goto free_dma_descr; + } + + memcpy(buffer, dma_buffer, size); + buffer += size / 4; + read_size += size; + } + +free_dma_descr: + kfree(dma_descr); + + return read_size; +} + +static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct device *dev = &adapter->pdev->dev; + struct __mem *mem = &entry->region.mem; + u32 data_size; + int ret = 0; + + if (fw_dump->use_pex_dma) { + data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer, + &ret); + if (ret) + dev_info(dev, + "Failed to read memory dump using PEX DMA: mask[0x%x]\n", + entry->hdr.mask); + else + return data_size; + } + + data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret); + if (ret) { + dev_info(dev, + "Failed to read memory dump using test agent method: mask[0x%x]\n", + entry->hdr.mask); + return 0; + } else { + return data_size; + } +} + static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry, __le32 *buffer) { @@ -893,6 +1083,12 @@ flash_temp: tmpl_hdr = ahw->fw_dump.tmpl_hdr; tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; + + if ((tmpl_hdr->version & 0xffffff) >= 0x20001) + ahw->fw_dump.use_pex_dma = true; + else + ahw->fw_dump.use_pex_dma = false; + ahw->fw_dump.enable = 1; return 0; @@ -910,7 +1106,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr; static const struct qlcnic_dump_operations *fw_dump_ops; + struct device *dev = &adapter->pdev->dev; struct qlcnic_hardware_context *ahw; + void *temp_buffer; ahw = adapter->ahw; @@ -944,6 +1142,16 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; tmpl_hdr->sys_info[1] = adapter->fw_version; + if (fw_dump->use_pex_dma) { + temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE, + &fw_dump->phys_addr, + GFP_KERNEL); + if (!temp_buffer) + fw_dump->use_pex_dma = false; + else + fw_dump->dma_buffer = temp_buffer; + } + if (qlcnic_82xx_check(adapter)) { ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); fw_dump_ops = qlcnic_fw_dump_ops; @@ -1002,6 +1210,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) return 0; } error: + if (fw_dump->use_pex_dma) + dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE, + fw_dump->dma_buffer, fw_dump->phys_addr); vfree(fw_dump->data); return -EINVAL; } -- cgit v1.2.3 From db131786445d55c418d27cb2341b86cf16807d9d Mon Sep 17 00:00:00 2001 From: Pratik Pujar Date: Sat, 22 Jun 2013 04:12:04 -0400 Subject: qlcnic: Cleanup of structure qlcnic_hardware_context Signed-off-by: Pratik Pujar Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 3 ++- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 4 ++-- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index cc2c2c13f6a0..5694d599f659 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -449,7 +449,7 @@ struct qlcnic_hardware_context { u16 max_pci_func; u32 capabilities; - u32 capabilities2; + u32 extra_capability[3]; u32 temp; u32 int_vec_bit; u32 fw_hal_version; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index e7f305d5bb5f..9fcbfd449ac5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -786,7 +786,8 @@ int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) word = 0; if (enable) { word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK; - if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6) + if (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAP2_HW_LRO_IPV6) word |= QLCNIC_ENABLE_IPV6_LRO | QLCNIC_NO_DEST_IPV6_CHECK; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 8e1453ac0035..3963e781a2b4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -994,7 +994,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter) if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { u32 temp; temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); - adapter->ahw->capabilities2 = temp; + adapter->ahw->extra_capability[0] = temp; } adapter->ahw->max_mac_filters = nic_info.max_mac_filters; adapter->ahw->max_mtu = nic_info.max_mtu; @@ -1486,7 +1486,7 @@ static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter) u32 capab = 0; if (qlcnic_82xx_check(adapter)) { - if (adapter->ahw->capabilities2 & + if (adapter->ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; } else { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 7ec030abdf07..10ed82b3baca 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -168,7 +168,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, if (err) return err; - if ((ahw->capabilities2 & QLCNIC_FW_CAPABILITY_2_BEACON)) { + if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { err = qlcnic_get_beacon_state(adapter, &h_beacon_state); if (!err) { dev_info(&adapter->pdev->dev, -- cgit v1.2.3 From 8af3f33db05c6d0146ad14905145a5c923770856 Mon Sep 17 00:00:00 2001 From: Pratik Pujar Date: Sat, 22 Jun 2013 04:12:05 -0400 Subject: qlcnic: Add support for 'set driver version' in 83XX Issue 'set driver version' during driver load and after reset recovery to notify the driver version to the firmware. Signed-off-by: Pratik Pujar Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 5 +++-- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 9 ++++++-- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 6 +++--- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 3 ++- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 25 +++++++++++++++------- 6 files changed, 33 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 5694d599f659..955dd858464a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -819,7 +819,7 @@ struct qlcnic_mac_list_s { #define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2 #define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 -#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5 +#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 #define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 /* module types */ @@ -1476,7 +1476,7 @@ int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); -int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *); +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32); int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features); @@ -1500,6 +1500,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32); void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); int qlcnic_enable_msix(struct qlcnic_adapter *, u32); +void qlcnic_set_drv_version(struct qlcnic_adapter *); /* eSwitch management functions */ int qlcnic_config_switch_port(struct qlcnic_adapter *, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 1c463dd562e2..1938812bb0e2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -63,6 +63,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, + {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, @@ -2186,16 +2187,17 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, u32 temp; u8 op = 0; struct qlcnic_cmd_args cmd; + struct qlcnic_hardware_context *ahw = adapter->ahw; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); if (err) return err; - if (func_id != adapter->ahw->pci_func) { + if (func_id != ahw->pci_func) { temp = func_id << 16; cmd.req.arg[1] = op | BIT_31 | temp; } else { - cmd.req.arg[1] = adapter->ahw->pci_func << 16; + cmd.req.arg[1] = ahw->pci_func << 16; } err = qlcnic_issue_cmd(adapter, &cmd); if (err) { @@ -2222,6 +2224,9 @@ int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; npar_info->max_linkspeed_reg_offset = temp; } + if (npar_info->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) + memcpy(ahw->extra_capability, &cmd.rsp.arg[16], + sizeof(ahw->extra_capability)); out: qlcnic_free_mbx_args(&cmd); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index f073c08de91f..e2d04d5cb9b0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -629,6 +629,7 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) return -EIO; } + qlcnic_set_drv_version(adapter); qlcnic_83xx_idc_attach_driver(adapter); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 72593a19ad59..0d54fceda960 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -36,7 +36,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { {QLCNIC_CMD_CONFIG_PORT, 4, 1}, {QLCNIC_CMD_TEMP_SIZE, 4, 4}, {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, - {QLCNIC_CMD_SET_DRV_VER, 4, 1}, + {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, }; @@ -182,7 +182,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, return cmd->rsp.arg[0]; } -int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) { struct qlcnic_cmd_args cmd; u32 arg1, arg2, arg3; @@ -194,7 +194,7 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, _QLCNIC_LINUX_SUBVERSION); - err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); + err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd); if (err) return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 812fd07baef3..b190c8495a85 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -86,7 +86,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_BC_EVENT_SETUP 0x31 #define QLCNIC_CMD_CONFIG_VPORT 0x32 #define QLCNIC_CMD_GET_MAC_STATS 0x37 -#define QLCNIC_CMD_SET_DRV_VER 0x38 +#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 #define QLCNIC_CMD_GET_LED_STATUS 0x3C #define QLCNIC_CMD_CONFIGURE_RSS 0x41 #define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 @@ -103,6 +103,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_GET_LINK_STATUS 0x68 #define QLCNIC_CMD_SET_LED_CONFIG 0x69 #define QLCNIC_CMD_GET_LED_CONFIG 0x6A +#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F #define QLCNIC_CMD_ADD_RCV_RINGS 0x0B #define QLCNIC_INTRPT_INTX 1 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 3963e781a2b4..0d7a4fd9975c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1985,6 +1985,21 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, return 0; } +void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 fw_cmd = 0; + + if (qlcnic_82xx_check(adapter)) + fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER; + else if (qlcnic_83xx_check(adapter)) + fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER; + + if ((ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) && + (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)) + qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); +} + static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1992,7 +2007,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct qlcnic_adapter *adapter = NULL; struct qlcnic_hardware_context *ahw; int err, pci_using_dac = -1; - u32 capab2; char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ if (pdev->is_virtfn) @@ -2147,13 +2161,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_out_disable_mbx_intr; - if (qlcnic_82xx_check(adapter)) { - if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { - capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); - if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB) - qlcnic_fw_cmd_set_drv_version(adapter); - } - } + qlcnic_set_drv_version(adapter); pci_set_drvdata(pdev, adapter); @@ -3124,6 +3132,7 @@ done: adapter->fw_fail_cnt = 0; adapter->flags &= ~QLCNIC_FW_HANG; clear_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_set_drv_version(adapter); if (!qlcnic_clr_drv_state(adapter)) qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, -- cgit v1.2.3 From 486a5bc77a4a83d29b72b17fde229e45a2428194 Mon Sep 17 00:00:00 2001 From: Rajesh Borundia Date: Sat, 22 Jun 2013 04:12:06 -0400 Subject: qlcnic: Add support for 83xx suspend and resume. o Implement shutdown and resume handlers for 83xx. o Refactor 82xx shutdown and resume handlers. Signed-off-by: Rajesh Borundia Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 14 +++++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 53 +++++++++++++++++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 6 ++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 17 ++++-- .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | 23 +++++++- drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 51 +++++++++++++++++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 4 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 66 +++------------------- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 2 + .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 53 +++++++++++++++++ 10 files changed, 225 insertions(+), 64 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 955dd858464a..a89e322be28e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1596,6 +1596,8 @@ struct qlcnic_nic_template { void (*napi_del)(struct qlcnic_adapter *); void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int); irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *); + int (*shutdown)(struct pci_dev *); + int (*resume)(struct qlcnic_adapter *); }; /* Adapter hardware abstraction */ @@ -1800,6 +1802,18 @@ static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter) adapter->ahw->hw_ops->napi_enable(adapter); } +static inline int __qlcnic_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + return adapter->nic_ops->shutdown(pdev); +} + +static inline int __qlcnic_resume(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->resume(adapter); +} + static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter) { adapter->ahw->hw_ops->napi_disable(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 1938812bb0e2..0913c623a67e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -186,6 +186,8 @@ static struct qlcnic_nic_template qlcnic_83xx_ops = { .napi_del = qlcnic_83xx_napi_del, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, + .shutdown = qlcnic_83xx_shutdown, + .resume = qlcnic_83xx_resume, }; void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw) @@ -3393,3 +3395,54 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) } return 0; } + +int qlcnic_83xx_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_83xx_disable_mbx_intr(adapter); + cancel_delayed_work_sync(&adapter->idc_aen_work); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + return 0; +} + +int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + int err = 0; + + err = qlcnic_83xx_idc_init(adapter); + if (err) + return err; + + if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE) { + if (ahw->op_mode == QLCNIC_MGMT_FUNC) { + qlcnic_83xx_set_vnic_opmode(adapter); + } else { + err = qlcnic_83xx_check_vnic_state(adapter); + if (err) + return err; + } + } + + err = qlcnic_83xx_idc_reattach_driver(adapter); + if (err) + return err; + + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, + idc->delay); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index e356ee87a9a6..2548d1403d75 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -628,4 +628,10 @@ u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); +int qlcnic_83xx_shutdown(struct pci_dev *); +int qlcnic_83xx_resume(struct qlcnic_adapter *); +int qlcnic_83xx_idc_init(struct qlcnic_adapter *); +int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *); +int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index e2d04d5cb9b0..f41dfab1e9a3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -606,7 +606,7 @@ static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) +int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) { int err; @@ -1135,7 +1135,7 @@ qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter) return 0; } -static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) +int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) { int ret = -EIO; @@ -1554,9 +1554,18 @@ static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev) int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) { - u8 *p_buff; - u32 addr, count; struct qlcnic_hardware_context *ahw = p_dev->ahw; + u32 addr, count, prev_ver, curr_ver; + u8 *p_buff; + + if (ahw->reset.buff != NULL) { + prev_ver = p_dev->fw_version; + curr_ver = qlcnic_83xx_get_fw_version(p_dev); + if (curr_ver > prev_ver) + kfree(ahw->reset.buff); + else + return 0; + } ahw->reset.seq_error = 0; ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index b5054e1d1710..599d1fda52f2 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -39,7 +39,7 @@ int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock) return 0; } -static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) +int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) { u8 id; int ret = -EBUSY; @@ -218,3 +218,24 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) return 0; } + +int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + u32 state; + + state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); + while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { + msleep(1000); + state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); + } + + if (!idc->vnic_wait_limit) { + dev_err(&adapter->pdev->dev, + "vNIC mode not operational, state check timed out.\n"); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 9fcbfd449ac5..5b5d2edf125d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -1577,3 +1577,54 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter) { qlcnic_pcie_sem_unlock(adapter, 5); } + +int qlcnic_82xx_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (qlcnic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + return 0; +} + +int qlcnic_82xx_resume(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + err = qlcnic_start_firmware(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (!err) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index b190c8495a85..2c22504f57aa 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -199,4 +199,8 @@ void qlcnic_82xx_api_unlock(struct qlcnic_adapter *); void qlcnic_82xx_napi_enable(struct qlcnic_adapter *); void qlcnic_82xx_napi_disable(struct qlcnic_adapter *); void qlcnic_82xx_napi_del(struct qlcnic_adapter *); +int qlcnic_82xx_shutdown(struct pci_dev *); +int qlcnic_82xx_resume(struct qlcnic_adapter *); +void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed); +void qlcnic_fw_poll_work(struct work_struct *work); #endif /* __QLCNIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0d7a4fd9975c..4528f8ec333b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -59,13 +59,11 @@ static int qlcnic_close(struct net_device *netdev); static void qlcnic_tx_timeout(struct net_device *netdev); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); -static void qlcnic_fw_poll_work(struct work_struct *work); #ifdef CONFIG_NET_POLL_CONTROLLER static void qlcnic_poll_controller(struct net_device *netdev); #endif static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); -static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); static irqreturn_t qlcnic_tmp_intr(int irq, void *data); @@ -469,6 +467,8 @@ static struct qlcnic_nic_template qlcnic_ops = { .napi_add = qlcnic_82xx_napi_add, .napi_del = qlcnic_82xx_napi_del, .config_ipaddr = qlcnic_82xx_config_ipaddr, + .shutdown = qlcnic_82xx_shutdown, + .resume = qlcnic_82xx_resume, .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr, }; @@ -2277,37 +2277,6 @@ static void qlcnic_remove(struct pci_dev *pdev) kfree(ahw); free_netdev(netdev); } -static int __qlcnic_shutdown(struct pci_dev *pdev) -{ - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - int retval; - - netif_device_detach(netdev); - - qlcnic_cancel_idc_work(adapter); - - if (netif_running(netdev)) - qlcnic_down(adapter, netdev); - - qlcnic_sriov_cleanup(adapter); - if (qlcnic_82xx_check(adapter)) - qlcnic_clr_all_drv_state(adapter, 0); - - clear_bit(__QLCNIC_RESETTING, &adapter->state); - - retval = pci_save_state(pdev); - if (retval) - return retval; - if (qlcnic_82xx_check(adapter)) { - if (qlcnic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - } - - return 0; -} static void qlcnic_shutdown(struct pci_dev *pdev) { @@ -2318,8 +2287,7 @@ static void qlcnic_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_PM -static int -qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) +static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) { int retval; @@ -2331,11 +2299,9 @@ qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int -qlcnic_resume(struct pci_dev *pdev) +static int qlcnic_resume(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; int err; err = pci_enable_device(pdev); @@ -2346,23 +2312,7 @@ qlcnic_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_restore_state(pdev); - err = qlcnic_start_firmware(adapter); - if (err) { - dev_err(&pdev->dev, "failed to start firmware\n"); - return err; - } - - if (netif_running(netdev)) { - err = qlcnic_up(adapter, netdev); - if (err) - goto done; - - qlcnic_restore_indev_addr(netdev, NETDEV_UP); - } -done: - netif_device_attach(netdev); - qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); - return 0; + return __qlcnic_resume(adapter); } #endif @@ -2701,8 +2651,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) return 0; } -static void -qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) +void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) { u32 val; @@ -3213,8 +3162,7 @@ detach: return 1; } -static void -qlcnic_fw_poll_work(struct work_struct *work) +void qlcnic_fw_poll_work(struct work_struct *work) { struct qlcnic_adapter *adapter = container_of(work, struct qlcnic_adapter, fw_work.work); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 9176cb015732..0daf660e12a1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -195,6 +195,8 @@ int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *, int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *, struct qlcnic_info *, u16); int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8); +int qlcnic_sriov_vf_shutdown(struct pci_dev *); +int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index bcd200eff981..d55624993b04 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -76,6 +76,8 @@ static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, .napi_add = qlcnic_83xx_napi_add, .napi_del = qlcnic_83xx_napi_del, + .shutdown = qlcnic_sriov_vf_shutdown, + .resume = qlcnic_sriov_vf_resume, .config_ipaddr = qlcnic_83xx_config_ipaddr, .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, }; @@ -1954,3 +1956,54 @@ static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) kfree(cur); } } + +int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + qlcnic_sriov_cfg_bc_intr(adapter, 0); + qlcnic_83xx_disable_mbx_intr(adapter); + cancel_delayed_work_sync(&adapter->idc_aen_work); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + return 0; +} + +int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(QLC_83XX_MODULE_LOADED, &idc->status); + qlcnic_83xx_enable_mbx_intrpt(adapter); + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + return err; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (!err) { + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (!err) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + } + + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + idc->delay); + return err; +} -- cgit v1.2.3 From 8640caf60aac7fe30dd622aca41d8336d07d3bd7 Mon Sep 17 00:00:00 2001 From: Jitendra Kalsaria Date: Sat, 22 Jun 2013 04:12:07 -0400 Subject: qlcnic: Update version to 5.2.44 Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index a89e322be28e..b00cf5665eab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -38,8 +38,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 2 -#define _QLCNIC_LINUX_SUBVERSION 43 -#define QLCNIC_LINUX_VERSIONID "5.2.43" +#define _QLCNIC_LINUX_SUBVERSION 44 +#define QLCNIC_LINUX_VERSIONID "5.2.44" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) -- cgit v1.2.3 From 78c3bcc5d1af64f51d9f30b0f5a2d1985bf69734 Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Thu, 20 Jun 2013 17:39:08 +0300 Subject: bnx2x: Improve PF behaviour toward VF If PF is unloaded with loaded VFs, signal towards VFs so they can detect this gracefully. Signed-off-by: Ariel Elior Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein ---- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 23 +++++++++++++++++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 24 ++++++++++++++++++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 12 +++++++++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | 5 ++++- 7 files changed, 63 insertions(+), 8 deletions(-) Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 23 +++++++++++++++++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 24 ++++++++++++++++++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 12 +++++++++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | 5 ++++- 7 files changed, 63 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index edda716fdce4..dedbd76c033e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1330,6 +1330,7 @@ enum { BNX2X_SP_RTNL_AFEX_F_UPDATE, BNX2X_SP_RTNL_ENABLE_SRIOV, BNX2X_SP_RTNL_VFPF_MCAST, + BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, BNX2X_SP_RTNL_HYPERVISOR_VLAN, }; @@ -1500,6 +1501,7 @@ struct bnx2x { #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) +#define INTERRUPTS_ENABLED_FLAG (1 << 23) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ca7f2bb08f44..d342c5a34afc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2871,6 +2871,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; smp_mb(); + /* indicate to VFs that the PF is going down */ + bnx2x_iov_channel_down(bp); + if (CNIC_LOADED(bp)) bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c962d66a1a79..7a7e326b3509 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -5457,9 +5457,19 @@ static void bnx2x_timer(unsigned long data) bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); /* sample pf vf bulletin board for new posts from pf */ - if (IS_VF(bp)) + if (IS_VF(bp)) { bnx2x_sample_bulletin(bp); + /* if channel is down we need to self destruct */ + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } + } + mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -9620,6 +9630,13 @@ sp_rtnl_not_reset: "sending set mcast vf pf channel message from rtnl sp-task\n"); bnx2x_vfpf_set_mcast(bp->dev); } + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state)){ + if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { + bnx2x_tx_disable(bp); + BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); + } + } if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, &bp->sp_rtnl_state)) { @@ -12814,6 +12831,8 @@ static void __bnx2x_remove(struct pci_dev *pdev, rtnl_unlock(); } + bnx2x_iov_remove_one(bp); + /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) bnx2x_set_power_state(bp, PCI_D0); @@ -12828,8 +12847,6 @@ static void __bnx2x_remove(struct pci_dev *pdev, /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->sp_rtnl_task); - bnx2x_iov_remove_one(bp); - /* send message via vfpf channel to release the resources of this vf */ if (IS_VF(bp)) bnx2x_vfpf_release(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 8a556dd888d5..2a8ad6d4b540 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1459,13 +1459,11 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); if (!vf) - goto unknown_dev; + return false; dev = pci_get_bus_and_slot(vf->bus, vf->devfn); if (dev) return bnx2x_is_pcie_pending(dev); - -unknown_dev: return false; } @@ -3469,3 +3467,23 @@ int bnx2x_open_epilog(struct bnx2x *bp) return 0; } + +void bnx2x_iov_channel_down(struct bnx2x *bp) +{ + int vf_idx; + struct pf_vf_bulletin_content *bulletin; + + if (!IS_SRIOV(bp)) + return; + + for_each_vf(bp, vf_idx) { + /* locate this VFs bulletin board and update the channel down + * bit + */ + bulletin = BP_VF_BULLETIN(bp, vf_idx); + bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf_idx); + } +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index f08c604a4fbd..9fc029ed9547 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -761,6 +761,7 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp) } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); +void bnx2x_iov_channel_down(struct bnx2x *bp); int bnx2x_open_epilog(struct bnx2x *bp); #else /* CONFIG_BNX2X_SRIOV */ @@ -817,6 +818,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } +static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } #endif /* CONFIG_BNX2X_SRIOV */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 861809d3154b..2088063151d6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -113,7 +113,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) { struct cstorm_vf_zone_data __iomem *zone_data = REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); - int tout = 600, interval = 100; /* wait for 60 seconds */ + int tout = 100, interval = 100; /* wait for 10 seconds */ if (*done) { BNX2X_ERR("done was non zero before message to pf was sent\n"); @@ -121,6 +121,16 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) return -EINVAL; } + /* if PF indicated channel is down avoid sending message. Return success + * so calling flow can continue + */ + bnx2x_sample_bulletin(bp); + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); + *done = PFVF_STATUS_SUCCESS; + return 0; + } + /* Write message address */ writel(U64_LO(msg_mapping), &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 41708faab575..f3ad174a3a63 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -331,7 +331,10 @@ struct pf_vf_bulletin_content { #define VLAN_VALID 1 /* when set, the vf should not access * the vfpf channel */ - +#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ u8 mac[ETH_ALEN]; u8 mac_padding[2]; -- cgit v1.2.3 From af902ae4432ba8650a5f16a2cca58507a5f566bd Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Thu, 20 Jun 2013 17:39:09 +0300 Subject: bnx2x: VF ndo sanity If iproute2 VF callbacks are invoked before PF is loaded, abort gracefully. Signed-off-by: Ariel Elior Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 2a8ad6d4b540..f6177baf20b7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3083,6 +3083,11 @@ void bnx2x_disable_sriov(struct bnx2x *bp) static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, struct bnx2x_virtf *vf) { + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("vf ndo called though PF is down\n"); + return -EINVAL; + } + if (!IS_SRIOV(bp)) { BNX2X_ERR("vf ndo called though sriov is disabled\n"); return -EINVAL; -- cgit v1.2.3 From 03c22ea3f0951f7c5395fe8348cb15100815268a Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Thu, 20 Jun 2013 17:39:10 +0300 Subject: bnx2x: improve VF timings Wait 100ms for FLR to complete in parallel over all VFs instead of serializing the waits (which can amount to several seconds with 64 VFs). Signed-off-by: Ariel Elior Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f6177baf20b7..bda4d7e7fe7b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1469,9 +1469,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) { - /* Wait 100ms */ - msleep(100); - /* Verify no pending pci transactions */ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) BNX2X_ERR("PCIE Transactions still pending\n"); @@ -2174,6 +2171,9 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); + /* let FLR complete ... */ + msleep(100); + /* initialize vf database */ for_each_vf(bp, vfid) { struct bnx2x_virtf *vf = BP_VF(bp, vfid); @@ -2775,6 +2775,10 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) vf->abs_vfid, vf->state); return -EINVAL; } + + /* let FLR complete ... */ + msleep(100); + /* FLR cleanup epilogue */ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) return -EBUSY; -- cgit v1.2.3 From b8e0d884cba6b9cd6e43adfaab9e67096ee8fb28 Mon Sep 17 00:00:00 2001 From: Yaniv Rosner Date: Thu, 20 Jun 2013 17:39:11 +0300 Subject: bnx2x: Fix 20G KR2 support claims Don't claim 20G is supported if the speed is unsupported by the phys (reflected by various ethtools and ndos). Signed-off-by: Yaniv Rosner Signed-off-by: Yuval Mintz Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7a7e326b3509..d468fe48093a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10558,6 +10558,10 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) if (!(bp->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; } BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], -- cgit v1.2.3 From 37173488400704f1a05656616cd12baa9e03173b Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 24 Jun 2013 11:04:10 +0300 Subject: bnx2x: Fix compilation with no IOV support This fixes an issue caused by submit 78c3bcc5d1af64f51d9f30b0f5a2d1985bf69734 `bnx2x: Improve PF behaviour toward VF', which made the bnx2x driver fail compilation when PCI_IOV is not set. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 14 ++------------ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 14 ++++++++++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 2 ++ 3 files changed, 18 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d468fe48093a..73189888766d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -5457,18 +5457,8 @@ static void bnx2x_timer(unsigned long data) bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); /* sample pf vf bulletin board for new posts from pf */ - if (IS_VF(bp)) { - bnx2x_sample_bulletin(bp); - - /* if channel is down we need to self destruct */ - if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { - smp_mb__before_clear_bit(); - set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, - &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); - schedule_delayed_work(&bp->sp_rtnl_task, 0); - } - } + if (IS_VF(bp)) + bnx2x_timer_sriov(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index bda4d7e7fe7b..95861efb5051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -3426,6 +3426,20 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) return PFVF_BULLETIN_UPDATED; } +void bnx2x_timer_sriov(struct bnx2x *bp) +{ + bnx2x_sample_bulletin(bp); + + /* if channel is down we need to self destruct */ + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + schedule_delayed_work(&bp->sp_rtnl_task, 0); + } +} + void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { /* vf doorbells are embedded within the regview */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 9fc029ed9547..d143a7cdbbbe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -751,6 +751,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, } enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); +void bnx2x_timer_sriov(struct bnx2x *bp); void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp); int bnx2x_enable_sriov(struct bnx2x *bp); @@ -809,6 +810,7 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp { return PFVF_BULLETIN_UNCHANGED; } +static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) { -- cgit v1.2.3 From b28405b0f25c91b52350fd558c219f08e5033eaf Mon Sep 17 00:00:00 2001 From: Alexandre Rames Date: Thu, 21 Mar 2013 16:41:43 +0000 Subject: sfc: Fix EEH with legacy interrupts. PCI legacy interrupts are level-triggered, and we cannot mask them up on an isolated device. Instead, disable the IRQ at the controller until we have recovered. Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/efx.c | 7 ++++++- drivers/net/ethernet/sfc/efx.h | 1 + drivers/net/ethernet/sfc/net_driver.h | 1 + drivers/net/ethernet/sfc/nic.c | 10 ++++++++++ 4 files changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 46cc11d5e205..787c9ebc17ee 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "net_driver.h" #include "efx.h" #include "nic.h" @@ -1427,6 +1428,10 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) BUG_ON(efx->state == STATE_DISABLED); + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); @@ -2365,7 +2370,7 @@ out: * Returns 0 if the recovery mechanisms are unsuccessful. * Returns a non-zero value otherwise. */ -static int efx_try_recovery(struct efx_nic *efx) +int efx_try_recovery(struct efx_nic *efx) { #ifdef CONFIG_EEH /* A PCI error can occur and not be seen by EEH because nothing diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 8372da239b43..bdb30bbb0c97 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -124,6 +124,7 @@ extern const struct ethtool_ops efx_ethtool_ops; extern int efx_reset(struct efx_nic *efx, enum reset_type method); extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +extern int efx_try_recovery(struct efx_nic *efx); /* Global */ extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9a2914cfd345..2dec48bed274 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -788,6 +788,7 @@ struct efx_nic { const struct efx_nic_type *type; int legacy_irq; bool legacy_irq_enabled; + bool eeh_disabled_legacy_irq; struct workqueue_struct *workqueue; char workqueue_name[16]; struct work_struct reset_work; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index b0503cd8c2a0..39432d3d1fe9 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -1579,6 +1579,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) efx_readd(efx, ®, FR_BZ_INT_ISR0); queues = EFX_EXTRACT_DWORD(reg, 0, 31); + /* Legacy interrupts are disabled too late by the EEH kernel + * code. Disable them earlier. + * If an EEH error occurred, the read will have returned all ones. + */ + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && + !efx->eeh_disabled_legacy_irq) { + disable_irq_nosync(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = true; + } + /* Handle non-event-queue sources */ if (queues & (1U << efx->irq_level)) { syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); -- cgit v1.2.3 From c99dffc4170b23455f9ee9e56baf5f77a68ca4c8 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Mon, 8 Apr 2013 12:49:48 +0100 Subject: sfc: Enable RX checksum offload for packets not handled by GRO Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/rx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index a7dfe36cabf4..b915e09b8691 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -598,6 +598,8 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, /* Set the SKB flags */ skb_checksum_none_assert(skb); + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) + skb->ip_summed = CHECKSUM_UNNECESSARY; if (channel->type->receive_skb) if (channel->type->receive_skb(channel, skb)) -- cgit v1.2.3 From d4ef5b6f374dfbef2c1e00cea73c3b76109f6246 Mon Sep 17 00:00:00 2001 From: Jon Cooper Date: Mon, 8 Apr 2013 12:55:58 +0100 Subject: sfc: Increase size of RX SKB header area This allows the SKB to hold the headers without reallocation more often. Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index b915e09b8691..6efff3d030b6 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -36,7 +36,7 @@ #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) /* Size of buffer allocated for skb header area. */ -#define EFX_SKB_HEADERS 64u +#define EFX_SKB_HEADERS 128u /* This is the percentage fill level below which new RX descriptors * will be added to the RX descriptor ring. -- cgit v1.2.3 From 62ebac926b7a5cd7cb6dc02a8d6fa925fa206a23 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 8 Apr 2013 17:34:58 +0100 Subject: sfc: Report software timestamping capabilities The kernel can generate software receive timestamps and we should report those for all ports regardless of hardware capabilities. Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/ethtool.c | 16 +++++++++++++++- drivers/net/ethernet/sfc/nic.h | 4 ++-- drivers/net/ethernet/sfc/ptp.c | 13 +++++-------- 3 files changed, 22 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 6e768175e7e0..1fc21458413d 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -1114,6 +1114,20 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, return 0; } +int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Software capabilities */ + ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE); + ts_info->phc_index = -1; + + efx_ptp_get_ts_info(efx, ts_info); + return 0; +} + static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, struct ethtool_eeprom *ee, u8 *data) @@ -1176,7 +1190,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, - .get_ts_info = efx_ptp_get_ts_info, + .get_ts_info = efx_ethtool_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, }; diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 1b0003323498..d63c2991a751 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -254,8 +254,8 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, struct ethtool_ts_info; extern void efx_ptp_probe(struct efx_nic *efx); extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); -extern int efx_ptp_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info); +extern void efx_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info); extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 9a95abf2dedf..b495394a6dfa 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1203,18 +1203,16 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) return 0; } -int -efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) { - struct efx_nic *efx = netdev_priv(net_dev); struct efx_ptp_data *ptp = efx->ptp_data; if (!ptp) - return -EOPNOTSUPP; + return; - ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE); + ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); ts_info->phc_index = ptp_clock_index(ptp->phc_clock); ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | @@ -1224,7 +1222,6 @@ efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); - return 0; } int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) -- cgit v1.2.3 From 3ea84c5492e19daa478fad42f97b7dda9289e71c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 10 May 2013 16:51:33 -0700 Subject: sfc: Enable accelerated RFS on vlans As far as I know, the hardware doesn't support matching on both IP fields and vlan tag, but it can at least match on the IP fields. Signed-off-by: Andy Lutomirski Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/filter.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c index 2397f0e8d3eb..b74a60ab9ac7 100644 --- a/drivers/net/ethernet/sfc/filter.c +++ b/drivers/net/ethernet/sfc/filter.c @@ -1185,8 +1185,21 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, nhoff = skb_network_offset(skb); - if (skb->protocol != htons(ETH_P_IP)) + if (skb->protocol == htons(ETH_P_8021Q)) { + EFX_BUG_ON_PARANOID(skb_headlen(skb) < + nhoff + sizeof(struct vlan_hdr)); + if (((const struct vlan_hdr *)skb->data + nhoff)-> + h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + + /* This is IP over 802.1q VLAN. We can't filter on the + * IP 5-tuple and the vlan together, so just strip the + * vlan header and filter on the IP part. + */ + nhoff += sizeof(struct vlan_hdr); + } else if (skb->protocol != htons(ETH_P_IP)) { return -EPROTONOSUPPORT; + } /* RFS must validate the IP header length before calling us */ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); -- cgit v1.2.3 From d07df8ec089cc72dded73dac5cf564ef9bba2fa1 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 16 May 2013 18:38:11 +0100 Subject: sfc: Define and set RX buffer flag for packets parsed as TCP This will be useful for shortcutting some software packet parsing. Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/net_driver.h | 1 + drivers/net/ethernet/sfc/nic.c | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 2dec48bed274..f4c7e6b67743 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -243,6 +243,7 @@ struct efx_rx_buffer { #define EFX_RX_BUF_LAST_IN_PAGE 0x0001 #define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_DISCARD 0x0004 +#define EFX_RX_PKT_TCP 0x0040 /** * struct efx_rx_page_state - Page-based rx buffer state diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 39432d3d1fe9..f2b864c14ca9 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -1080,12 +1080,21 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); if (likely(rx_ev_pkt_ok)) { - /* If packet is marked as OK and packet type is TCP/IP or - * UDP/IP, then we can rely on the hardware checksum. + /* If packet is marked as OK then we can rely on the + * hardware checksum and classification. */ - flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || - rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? - EFX_RX_PKT_CSUMMED : 0; + flags = 0; + switch (rx_ev_hdr_type) { + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags |= EFX_RX_PKT_TCP; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags |= EFX_RX_PKT_CSUMMED; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + break; + } } else { flags = efx_handle_rx_not_ok(rx_queue, event); } -- cgit v1.2.3 From e79255de8581fac7c77dba443d5e79bbc6e72a1f Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 16 May 2013 18:38:13 +0100 Subject: sfc: Do not pass non-TCP packets into GRO code GRO can handle non-TCP packets and pass them up without coalescing, but it has to do some extra work to parse the packet which we can bypass using the hardware parse result. (This condition yields a false negative for TCP/IPv6 packets received by Falcon, but its performance is already poor in that case due to lack of checksum offload.) Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 6efff3d030b6..5c45118cc03a 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -629,7 +629,7 @@ void __efx_rx_packet(struct efx_channel *channel) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if (!channel->type->receive_skb) + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); -- cgit v1.2.3 From 1899c111a535e43046b14ae13639747d9d2544d6 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 22 May 2013 18:03:35 +0100 Subject: sfc: Fix IRQ cleanup in case of a probe failure The lifetime of an irq_cpu_rmap is odd: we have to allocate it before installing IRQ handlers and free it before removing the IRQ handlers. As a result of this asymmetry, it was omitted from some failure paths. On another failure path, we could try to remove IRQ handlers we had not yet installed. Move the irq_cpu_rmap allocation and freeing alongside IRQ handler installation and removal, in efx_nic_{init,fini}_interrupts(). Count the number of IRQ handlers successfully installed and only remove those on the failure path. Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/efx.c | 33 ------------------------------- drivers/net/ethernet/sfc/nic.c | 45 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 40 insertions(+), 38 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 787c9ebc17ee..e7284a2caffa 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include "net_driver.h" @@ -1284,29 +1283,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) return count; } -static int -efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) -{ -#ifdef CONFIG_RFS_ACCEL - unsigned int i; - int rc; - - efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); - if (!efx->net_dev->rx_cpu_rmap) - return -ENOMEM; - for (i = 0; i < efx->n_rx_channels; i++) { - rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, - xentries[i].vector); - if (rc) { - free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); - efx->net_dev->rx_cpu_rmap = NULL; - return rc; - } - } -#endif - return 0; -} - /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ @@ -1360,11 +1336,6 @@ static int efx_probe_interrupts(struct efx_nic *efx) efx->n_tx_channels = n_channels; efx->n_rx_channels = n_channels; } - rc = efx_init_rx_cpu_rmap(efx, xentries); - if (rc) { - pci_disable_msix(efx->pci_dev); - return rc; - } for (i = 0; i < efx->n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; @@ -2608,10 +2579,6 @@ static void efx_pci_remove_main(struct efx_nic *efx) BUG_ON(efx->state == STATE_READY); cancel_work_sync(&efx->reset_work); -#ifdef CONFIG_RFS_ACCEL - free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); - efx->net_dev->rx_cpu_rmap = NULL; -#endif efx_stop_interrupts(efx, false); efx_nic_fini_interrupt(efx); efx_fini_port(efx); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index f2b864c14ca9..56ed3bc71e00 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "net_driver.h" #include "bitfield.h" #include "efx.h" @@ -1706,6 +1707,7 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx) int efx_nic_init_interrupt(struct efx_nic *efx) { struct efx_channel *channel; + unsigned int n_irqs; int rc; if (!EFX_INT_MODE_USE_MSI(efx)) { @@ -1726,7 +1728,19 @@ int efx_nic_init_interrupt(struct efx_nic *efx) return 0; } +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + efx->net_dev->rx_cpu_rmap = + alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) { + rc = -ENOMEM; + goto fail1; + } + } +#endif + /* Hook MSI or MSI-X interrupt */ + n_irqs = 0; efx_for_each_channel(channel, efx) { rc = request_irq(channel->irq, efx_msi_interrupt, IRQF_PROBE_SHARED, /* Not shared */ @@ -1737,13 +1751,31 @@ int efx_nic_init_interrupt(struct efx_nic *efx) "failed to hook IRQ %d\n", channel->irq); goto fail2; } + ++n_irqs; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX && + channel->channel < efx->n_rx_channels) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + channel->irq); + if (rc) + goto fail2; + } +#endif } return 0; fail2: - efx_for_each_channel(channel, efx) +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + efx_for_each_channel(channel, efx) { + if (n_irqs-- == 0) + break; free_irq(channel->irq, &efx->channel[channel->channel]); + } fail1: return rc; } @@ -1753,11 +1785,14 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) struct efx_channel *channel; efx_oword_t reg; +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) { - if (channel->irq) - free_irq(channel->irq, &efx->channel[channel->channel]); - } + efx_for_each_channel(channel, efx) + free_irq(channel->irq, &efx->channel[channel->channel]); /* ACK legacy interrupt */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) -- cgit v1.2.3 From 636d73da27e83ce4882f8823f79063bb37980961 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 12 Jun 2013 18:09:08 +0100 Subject: sfc: Improve test for IOMMU in use The device::iommu_group field may be set even if no IOMMU is in use. iommu_present() is still a better indicator, although it doesn't tell us whether *our* device is affected. Reported-by: Alex Williamson Signed-off-by: Ben Hutchings --- drivers/net/ethernet/sfc/rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 5c45118cc03a..65646cd7af8e 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -677,7 +677,7 @@ static void efx_init_rx_recycle_ring(struct efx_nic *efx, #ifdef CONFIG_PPC64 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; #else - if (efx->pci_dev->dev.iommu_group) + if (iommu_present(&pci_bus_type)) bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; else bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; -- cgit v1.2.3 From d3c5f47ee2d16497fafaf2f26b0ffeb5c3d4e721 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 24 Jun 2013 12:43:40 -0700 Subject: net: Restore unintentional reverts. This restores commits: c573972c111eb4c6b3f3250ad71e7c75cc799833 1a5904342c7380ceddd61c0b37544d752d0b1433 da2e2c214953f37c2a6be20226537ca5a329724c which initially accidently went into 'net', were reverted there, and then properly placed into 'net-next'. But the next net --> net-next merge accidently wiped them out again. Reported-by: Paul Gortmaker Signed-off-by: David S. Miller --- drivers/net/ethernet/apple/bmac.c | 1 - drivers/net/ethernet/korina.c | 1 - drivers/net/ethernet/sun/sunbmac.c | 1 - 3 files changed, 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 714dcfe3a469..a597b766f080 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1016,7 +1016,6 @@ static void bmac_set_multicast(struct net_device *dev) static void bmac_set_multicast(struct net_device *dev) { struct netdev_hw_addr *ha; - int i; unsigned short rx_cfg; u32 crc; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 64646eb39e8b..270e65f21102 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -483,7 +483,6 @@ static void korina_multicast_list(struct net_device *dev) unsigned long flags; struct netdev_hw_addr *ha; u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ - int i; /* Set promiscuous mode */ if (dev->flags & IFF_PROMISC) diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 09b4f8c0b199..0d43fa9ff980 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -995,7 +995,6 @@ static void bigmac_set_multicast(struct net_device *dev) struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; struct netdev_hw_addr *ha; - int i; u32 tmp, crc; /* Disable the receiver. The bit self-clears when -- cgit v1.2.3 From c957d09ffda417f6c8e3d1f10e2b05228607d6d7 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 25 Jun 2013 08:50:11 +0300 Subject: bnx2x: Remove sparse and coccinelle warnings This patch solves several sparse issues as well as an unneeded semicolon found via coccinelle. Signed-off-by: Yuval Mintz Signed-off-by: Dmitry Kravkov Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 9 ++++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 8 +++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 +- 3 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index d342c5a34afc..ec3aa1d451e8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1722,7 +1722,7 @@ static int bnx2x_req_irq(struct bnx2x *bp) return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); } -int bnx2x_setup_irqs(struct bnx2x *bp) +static int bnx2x_setup_irqs(struct bnx2x *bp) { int rc = 0; if (bp->flags & USING_MSIX_FLAG && @@ -3543,9 +3543,12 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, /* outer IP header info */ if (xmit_type & XMIT_CSUM_V4) { struct iphdr *iph = ip_hdr(skb); + u16 csum = (__force u16)(~iph->check) - + (__force u16)iph->tot_len - + (__force u16)iph->frag_off; + pbd2->fw_ip_csum_wo_len_flags_frag = - bswab16(csum_fold((~iph->check) - - iph->tot_len - iph->frag_off)); + bswab16(csum_fold((__force __wsum)csum)); } else { pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 7c6faebb1838..b8c067d1a0f2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1391,7 +1391,7 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) bp->pm_cap + PCI_PM_CTRL, &pm); if ((rc && !netif_running(dev)) || - (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != PCI_D0))) + (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) return false; return true; @@ -1610,8 +1610,10 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, */ val = be32_to_cpu(val_be); - val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset)); - val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset)); + val &= ~le32_to_cpu((__force __le32) + (0xff << BYTE_OFFSET(offset))); + val |= le32_to_cpu((__force __le32) + (*data_buf << BYTE_OFFSET(offset))); rc = bnx2x_nvram_write_dword(bp, align_offset, val, cmd_flags); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 73189888766d..740518bbcb5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -615,7 +615,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) if (rc) { BNX2X_ERR("DMAE returned failure %d\n", rc); bnx2x_panic(); - }; + } } static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, -- cgit v1.2.3 From 51151a16a60f0a886a0b1e4a0697001198af50c4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 23 Jun 2013 08:17:56 -0700 Subject: mlx4: allow order-0 memory allocations in RX path Signed-off-by: Eric Dumazet mlx4 exclusively uses order-2 allocations in RX path, which are likely to fail under memory pressure. We therefore drop frames more than needed. This patch tries order-3, order-2, order-1 and finally order-0 allocations to keep good performance, yet allow allocations if/when memory gets fragmented. By using larger pages, and avoiding unnecessary get_page()/put_page() on compound pages, this patch improves performance as well, lowering false sharing on struct page. Also use GFP_KERNEL allocations in initialization path, as allocating 12 MB (390 order-3 pages) can easily fail with GFP_ATOMIC. Signed-off-by: Eric Dumazet Cc: Amir Vadai Acked-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 169 ++++++++++++++------------- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 12 +- 2 files changed, 95 insertions(+), 86 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9c57581b021c..76997b93fdfe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -43,40 +43,64 @@ #include "mlx4_en.h" +static int mlx4_alloc_pages(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *page_alloc, + const struct mlx4_en_frag_info *frag_info, + gfp_t _gfp) +{ + int order; + struct page *page; + dma_addr_t dma; + + for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + gfp_t gfp = _gfp; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + page = alloc_pages(gfp, order); + if (likely(page)) + break; + if (--order < 0 || + ((PAGE_SIZE << order) < frag_info->frag_size)) + return -ENOMEM; + } + dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(priv->ddev, dma)) { + put_page(page); + return -ENOMEM; + } + page_alloc->size = PAGE_SIZE << order; + page_alloc->page = page; + page_alloc->dma = dma; + page_alloc->offset = frag_info->frag_align; + /* Not doing get_page() for each frag is a big win + * on asymetric workloads. + */ + atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride); + return 0; +} + static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, - struct mlx4_en_rx_alloc *ring_alloc) + struct mlx4_en_rx_alloc *ring_alloc, + gfp_t gfp) { struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; - struct mlx4_en_frag_info *frag_info; + const struct mlx4_en_frag_info *frag_info; struct page *page; dma_addr_t dma; int i; for (i = 0; i < priv->num_frags; i++) { frag_info = &priv->frag_info[i]; - if (ring_alloc[i].offset == frag_info->last_offset) { - page = alloc_pages(GFP_ATOMIC | __GFP_COMP, - MLX4_EN_ALLOC_ORDER); - if (!page) - goto out; - dma = dma_map_page(priv->ddev, page, 0, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(priv->ddev, dma)) { - put_page(page); - goto out; - } - page_alloc[i].page = page; - page_alloc[i].dma = dma; - page_alloc[i].offset = frag_info->frag_align; - } else { - page_alloc[i].page = ring_alloc[i].page; - get_page(ring_alloc[i].page); - page_alloc[i].dma = ring_alloc[i].dma; - page_alloc[i].offset = ring_alloc[i].offset + - frag_info->frag_stride; - } + page_alloc[i] = ring_alloc[i]; + page_alloc[i].offset += frag_info->frag_stride; + if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size) + continue; + if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) + goto out; } for (i = 0; i < priv->num_frags; i++) { @@ -88,14 +112,16 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, return 0; - out: while (i--) { frag_info = &priv->frag_info[i]; - if (ring_alloc[i].offset == frag_info->last_offset) + if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - put_page(page_alloc[i].page); + page_alloc[i].size, PCI_DMA_FROMDEVICE); + page = page_alloc[i].page; + atomic_set(&page->_count, 1); + put_page(page); + } } return -ENOMEM; } @@ -104,12 +130,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *frags, int i) { - struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - if (frags[i].offset == frag_info->last_offset) { - dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE, + if (frags[i].offset + frag_info->frag_stride > frags[i].size) + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size, PCI_DMA_FROMDEVICE); - } + if (frags[i].page) put_page(frags[i].page); } @@ -117,35 +143,28 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { - struct mlx4_en_rx_alloc *page_alloc; int i; + struct mlx4_en_rx_alloc *page_alloc; for (i = 0; i < priv->num_frags; i++) { - page_alloc = &ring->page_alloc[i]; - page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, - MLX4_EN_ALLOC_ORDER); - if (!page_alloc->page) - goto out; + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; - page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(priv->ddev, page_alloc->dma)) { - put_page(page_alloc->page); - page_alloc->page = NULL; + if (mlx4_alloc_pages(priv, &ring->page_alloc[i], + frag_info, GFP_KERNEL)) goto out; - } - page_alloc->offset = priv->frag_info[i].frag_align; - en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", - i, page_alloc->page); } return 0; out: while (i--) { + struct page *page; + page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - put_page(page_alloc->page); + page_alloc->size, PCI_DMA_FROMDEVICE); + page = page_alloc->page; + atomic_set(&page->_count, 1); + put_page(page); page_alloc->page = NULL; } return -ENOMEM; @@ -158,13 +177,18 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, int i; for (i = 0; i < priv->num_frags; i++) { + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + page_alloc = &ring->page_alloc[i]; en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE); - put_page(page_alloc->page); + page_alloc->size, PCI_DMA_FROMDEVICE); + while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) { + put_page(page_alloc->page); + page_alloc->offset += frag_info->frag_stride; + } page_alloc->page = NULL; } } @@ -195,13 +219,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, } static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring, int index) + struct mlx4_en_rx_ring *ring, int index, + gfp_t gfp) { struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); - return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc); + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) @@ -235,7 +260,8 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) ring = &priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, - ring->actual_size)) { + ring->actual_size, + GFP_KERNEL)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate " "enough rx buffers\n"); @@ -450,11 +476,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, DMA_FROM_DEVICE); /* Save page reference in skb */ - get_page(frags[nr].page); __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); skb_frags_rx[nr].page_offset = frags[nr].offset; skb->truesize += frag_info->frag_stride; + frags[nr].page = NULL; } /* Adjust size of last fragment to match actual length */ if (nr > 0) @@ -547,7 +573,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, int index = ring->prod & ring->size_mask; while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index)) + if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC)) break; ring->prod++; index = ring->prod & ring->size_mask; @@ -805,21 +831,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) return done; } - -/* Calculate the last offset position that accommodates a full fragment - * (assuming fagment size = stride-align) */ -static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align) -{ - u16 res = MLX4_EN_ALLOC_SIZE % stride; - u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; - - en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " - "res:%d offset:%d\n", stride, align, res, offset); - return offset; -} - - -static int frag_sizes[] = { +static const int frag_sizes[] = { FRAG_SZ0, FRAG_SZ1, FRAG_SZ2, @@ -847,9 +859,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], SMP_CACHE_BYTES); } - priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset( - priv, priv->frag_info[i].frag_stride, - priv->frag_info[i].frag_align); buf_size += priv->frag_info[i].frag_size; i++; } @@ -861,13 +870,13 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " "num_frags:%d):\n", eff_mtu, priv->num_frags); for (i = 0; i < priv->num_frags; i++) { - en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " - "stride:%d last_offset:%d\n", i, - priv->frag_info[i].frag_size, - priv->frag_info[i].frag_prefix_size, - priv->frag_info[i].frag_align, - priv->frag_info[i].frag_stride, - priv->frag_info[i].last_offset); + en_err(priv, + " frag:%d - size:%d prefix:%d align:%d stride:%d\n", + i, + priv->frag_info[i].frag_size, + priv->frag_info[i].frag_prefix_size, + priv->frag_info[i].frag_align, + priv->frag_info[i].frag_stride); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 57192a8f1d5e..35fb60e2320c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -96,7 +96,8 @@ /* Use the maximum between 16384 and a single page */ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) -#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE) + +#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU * and 4K allocations) */ @@ -234,9 +235,10 @@ struct mlx4_en_tx_desc { #define MLX4_EN_CX3_HIGH_ID 0x1005 struct mlx4_en_rx_alloc { - struct page *page; - dma_addr_t dma; - u16 offset; + struct page *page; + dma_addr_t dma; + u32 offset; + u32 size; }; struct mlx4_en_tx_ring { @@ -439,8 +441,6 @@ struct mlx4_en_frag_info { u16 frag_prefix_size; u16 frag_stride; u16 frag_align; - u16 last_offset; - }; #ifdef CONFIG_MLX4_EN_DCB -- cgit v1.2.3 From 8850494a33cc67bbf31d2e3ce630d0f4e14efa56 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Tue, 25 Jun 2013 12:09:29 +0300 Subject: net/mlx4_en: Fix resource leak in error flow Wrong condition was used when calling iounmap. Signed-off-by: Dotan Barak Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index a5c9df07a7d0..a071cda2dd04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -310,7 +310,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) err_mr: (void) mlx4_mr_free(dev, &mdev->mr); err_map: - if (!mdev->uar_map) + if (mdev->uar_map) iounmap(mdev->uar_map); err_uar: mlx4_uar_free(dev, &mdev->priv_uar); -- cgit v1.2.3 From 6123db2ec529f1d1865298388e129a6aad2fae17 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 25 Jun 2013 12:09:30 +0300 Subject: net/mlx4_en: Do not query stats when device port is down There are no counters allocated to the eth device when the port is down, so this query is meaningless at that time. It also leads to querying incorrect counters (since the counter_index is not valid when the device port is down). Signed-off-by: Jack Morgenstein Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7299ada876c2..c0b02d7bcc2f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1375,12 +1375,13 @@ static void mlx4_en_do_get_stats(struct work_struct *work) mutex_lock(&mdev->state_lock); if (mdev->device_up) { - err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); - if (err) - en_dbg(HW, priv, "Could not update stats\n"); + if (priv->port_up) { + err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); + if (err) + en_dbg(HW, priv, "Could not update stats\n"); - if (priv->port_up) mlx4_en_auto_moderation(priv); + } queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } -- cgit v1.2.3 From 4801ae70d80dc815997e3cb6bca1dfe36d1594fd Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Tue, 25 Jun 2013 12:09:31 +0300 Subject: net/mlx4_en: Move register_netdev() to the end of initialization function To avoid a race between the open function and everything that happens after register_netdev() move it to be the last operation called. Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c0b02d7bcc2f..1f0f8170d79d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2323,6 +2323,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, mdev->pndev[port] = dev; netif_carrier_off(dev); + mlx4_en_set_default_moderation(priv); + err = register_netdev(dev); if (err) { en_err(priv, "Netdev registration failed for port %d\n", port); @@ -2354,7 +2356,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, en_err(priv, "Failed Initializing port\n"); goto out; } - mlx4_en_set_default_moderation(priv); queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) -- cgit v1.2.3 From 9e19b54554fac0a1c1c92f0a10b29d216b84a470 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Tue, 25 Jun 2013 12:09:32 +0300 Subject: net/mlx4_en: Change log level from error to debug for vlan related messages The port vlan table size is 126 (used for IBoE) so after 126 we will not have space and the user need to see it only in debug print and not error. Signed-off-by: Aviad Yehezkel Reviewed-by: Yevgeny Petrilin Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 1f0f8170d79d..f256a734e021 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -405,7 +405,7 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, en_err(priv, "Failed configuring VLAN filter\n"); } if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) - en_err(priv, "failed adding vlan %d\n", vid); + en_dbg(HW, priv, "failed adding vlan %d\n", vid); mutex_unlock(&mdev->state_lock); return 0; @@ -428,7 +428,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) mlx4_unregister_vlan(mdev->dev, priv->port, idx); else - en_err(priv, "could not find vid %d in cache\n", vid); + en_dbg(HW, priv, "could not find vid %d in cache\n", vid); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); -- cgit v1.2.3 From 0cc5c8bf11852dec3225fda2f53a599243095d23 Mon Sep 17 00:00:00 2001 From: Eugenia Emantayev Date: Tue, 25 Jun 2013 12:09:33 +0300 Subject: net/mlx4_en: Fix a race between napi poll function and RX ring cleanup The RX rings were cleaned while there was still possible RX traffic completion handling. Change the sequance of events so that the port is closed and the QPs are being stopped before RX cleanup. Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f256a734e021..f1dcddcb8050 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1635,6 +1635,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) return; } + /* close port*/ + mlx4_CLOSE_PORT(mdev->dev, priv->port); + /* Synchronize with tx routine */ netif_tx_lock_bh(dev); if (detach) @@ -1735,14 +1738,11 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) } local_bh_enable(); - mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) msleep(1); + mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); } - - /* close port*/ - mlx4_CLOSE_PORT(mdev->dev, priv->port); } static void mlx4_en_restart(struct work_struct *work) -- cgit v1.2.3 From b944ebec787be9396978b0f7773f99e751330196 Mon Sep 17 00:00:00 2001 From: Yevgeny Petrilin Date: Tue, 25 Jun 2013 12:09:34 +0300 Subject: net/mlx4_en: Add prints when TX timeout occurs Print a warning when a TX timeout is detected Signed-off-by: Yevgeny Petrilin Signed-off-by: Eugenia Emantayev Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f1dcddcb8050..caf204770569 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1236,10 +1236,19 @@ static void mlx4_en_tx_timeout(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + int i; if (netif_msg_timer(priv)) en_warn(priv, "Tx timeout called on port:%d\n", priv->port); + for (i = 0; i < priv->tx_ring_num; i++) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + continue; + en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", + i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, + priv->tx_ring[i].cons, priv->tx_ring[i].prod); + } + priv->port_stats.tx_timeout++; en_dbg(DRV, priv, "Scheduling watchdog\n"); queue_work(mdev->workqueue, &priv->watchdog_task); -- cgit v1.2.3 From 42f1e9020e22d64d18292c6cb9182f4beeb43cad Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Tue, 25 Jun 2013 12:09:35 +0300 Subject: net/mlx4_en: Remove an unnecessary test Since this variable is now part of a structure and not allocated dynamically, this test is irrelevant now. Signed-off-by: Dotan Barak Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 0f91222ea3d7..9d4a1ea030d8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -207,9 +207,6 @@ static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); int i; - if (!priv->maxrate) - return -EINVAL; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) maxrate->tc_maxrate[i] = priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; -- cgit v1.2.3 From 618fad954b9becf095f1b52391af29f743ff4662 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Tue, 25 Jun 2013 12:09:36 +0300 Subject: net/mlx4_core: Replace sscanf() with kstrtoint() It is not safe to use sscanf. Signed-off-by: Dotan Barak Signed-off-by: Vladimir Sokolovsky Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2f4a26039e80..81e4529092a3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -839,11 +839,11 @@ static ssize_t set_port_ib_mtu(struct device *dev, return -EINVAL; } - err = sscanf(buf, "%d", &mtu); - if (err > 0) + err = kstrtoint(buf, 0, &mtu); + if (!err) ibta_mtu = int_to_ibta_mtu(mtu); - if (err <= 0 || ibta_mtu < 0) { + if (err || ibta_mtu < 0) { mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); return -EINVAL; } -- cgit v1.2.3 From 674925edb466b027d7c61993ebe3250fb8989ee0 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Tue, 25 Jun 2013 12:09:37 +0300 Subject: net/mlx4_core: Add warning in case of command timeouts Warning prints when there are command timeout to help debugging future failures. Signed-off-by: Dotan Barak Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index ea1e03896353..df04c8206ebe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -257,6 +257,8 @@ static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op, if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "communication channel command 0x%x timed out\n", + op); err = -EBUSY; goto out; } @@ -486,6 +488,8 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, } if (cmd_pending(dev)) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); err = -ETIMEDOUT; goto out; } @@ -549,6 +553,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); err = -EBUSY; goto out; } -- cgit v1.2.3 From 30e514a71753ac3fd0ddea1411d5602ccbe14acf Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Tue, 25 Jun 2013 12:09:38 +0300 Subject: net/mlx4_core: Fail device init if num_vfs is negative Should not allow negative num_vfs Signed-off-by: Jack Morgenstein Signed-off-by: Vladimir Sokolovsky Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 81e4529092a3..56160a2bb57b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2077,6 +2077,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) num_vfs, MLX4_MAX_NUM_VF); return -EINVAL; } + + if (num_vfs < 0) { + pr_err("num_vfs module parameter cannot be negative\n"); + return -EINVAL; + } /* * Check for BARs. */ -- cgit v1.2.3 From e4f2379db6c6823c5d4a4c2c912df00c65de51d7 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Mon, 24 Jun 2013 09:54:27 +0400 Subject: ethernet/arc/arc_emac - Add new driver Driver for non-standard on-chip ethernet device ARC EMAC 10/100, instantiated in some legacy ARC (Synopsys) FPGA Boards such as ARCAngel4/ML50x. Signed-off-by: Alexey Brodkin Cc: Andy Shevchenko Cc: Francois Romieu Cc: Joe Perches Cc: Vineet Gupta Cc: Mischa Jonker Cc: Arnd Bergmann Cc: Grant Likely Cc: Rob Herring Cc: Paul Gortmaker Cc: "David S. Miller" Cc: linux-kernel@vger.kernel.org Cc: devicetree-discuss@lists.ozlabs.org Cc: Florian Fainelli Cc: David Laight Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/arc_emac.txt | 38 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/arc/Kconfig | 31 + drivers/net/ethernet/arc/Makefile | 6 + drivers/net/ethernet/arc/emac.h | 214 ++++++ drivers/net/ethernet/arc/emac_main.c | 806 +++++++++++++++++++++ drivers/net/ethernet/arc/emac_mdio.c | 152 ++++ 8 files changed, 1249 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/arc_emac.txt create mode 100644 drivers/net/ethernet/arc/Kconfig create mode 100644 drivers/net/ethernet/arc/Makefile create mode 100644 drivers/net/ethernet/arc/emac.h create mode 100644 drivers/net/ethernet/arc/emac_main.c create mode 100644 drivers/net/ethernet/arc/emac_mdio.c (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/arc_emac.txt b/Documentation/devicetree/bindings/net/arc_emac.txt new file mode 100644 index 000000000000..bcbc3f009158 --- /dev/null +++ b/Documentation/devicetree/bindings/net/arc_emac.txt @@ -0,0 +1,38 @@ +* Synopsys ARC EMAC 10/100 Ethernet driver (EMAC) + +Required properties: +- compatible: Should be "snps,arc-emac" +- reg: Address and length of the register set for the device +- interrupts: Should contain the EMAC interrupts +- clock-frequency: CPU frequency. It is needed to calculate and set polling +period of EMAC. +- max-speed: Maximum supported data-rate in Mbit/s. In some HW configurations +bandwidth of external memory controller might be a limiting factor. That's why +it's required to specify which data-rate is supported on current SoC or FPGA. +For example if only 10 Mbit/s is supported (10BASE-T) set "10". If 100 Mbit/s is +supported (100BASE-TX) set "100". +- phy: PHY device attached to the EMAC via MDIO bus + +Child nodes of the driver are the individual PHY devices connected to the +MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus. + +Optional properties: +- mac-address: 6 bytes, mac address + +Examples: + + ethernet@c0fc2000 { + compatible = "snps,arc-emac"; + reg = <0xc0fc2000 0x3c>; + interrupts = <6>; + mac-address = [ 00 11 22 33 44 55 ]; + clock-frequency = <80000000>; + max-speed = <100>; + phy = <&phy0>; + + #address-cells = <1>; + #size-cells = <0>; + phy0: ethernet-phy@0 { + reg = <1>; + }; + }; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index a989669b353a..2037080c504d 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -24,6 +24,7 @@ source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/adi/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 009da27b6e26..390bd0bfaa27 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ obj-$(CONFIG_NET_CADENCE) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig new file mode 100644 index 000000000000..514c57fd26f1 --- /dev/null +++ b/drivers/net/ethernet/arc/Kconfig @@ -0,0 +1,31 @@ +# +# ARC EMAC network device configuration +# + +config NET_VENDOR_ARC + bool "ARC devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about ARC cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ARC + +config ARC_EMAC + tristate "ARC EMAC support" + select MII + select PHYLIB + depends on OF_IRQ + depends on OF_NET + ---help--- + On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x + non-standard on-chip ethernet device ARC EMAC 10/100 is used. + Say Y here if you have such a board. If unsure, say N. + +endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile new file mode 100644 index 000000000000..00c8657637d5 --- /dev/null +++ b/drivers/net/ethernet/arc/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the ARC network device drivers. +# + +arc_emac-objs := emac_main.o emac_mdio.o +obj-$(CONFIG_ARC_EMAC) += arc_emac.o diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h new file mode 100644 index 000000000000..dc08678bf9a4 --- /dev/null +++ b/drivers/net/ethernet/arc/emac.h @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * Registers and bits definitions of ARC EMAC + */ + +#ifndef ARC_EMAC_H +#define ARC_EMAC_H + +#include +#include +#include +#include + +/* STATUS and ENABLE Register bit masks */ +#define TXINT_MASK (1<<0) /* Transmit interrupt */ +#define RXINT_MASK (1<<1) /* Receive interrupt */ +#define ERR_MASK (1<<2) /* Error interrupt */ +#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */ +#define MSER_MASK (1<<4) /* Missed packet counter error */ +#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */ +#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */ +#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */ +#define MDIO_MASK (1<<12) /* MDIO complete interrupt */ +#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */ + +/* CONTROL Register bit masks */ +#define EN_MASK (1<<0) /* VMAC enable */ +#define TXRN_MASK (1<<3) /* TX enable */ +#define RXRN_MASK (1<<4) /* RX enable */ +#define DSBC_MASK (1<<8) /* Disable receive broadcast */ +#define ENFL_MASK (1<<10) /* Enable Full-duplex */ +#define PROM_MASK (1<<11) /* Promiscuous mode */ + +/* Buffer descriptor INFO bit masks */ +#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */ +#define FIRST_MASK (1<<16) /* First buffer in chain */ +#define LAST_MASK (1<<17) /* Last buffer in chain */ +#define LEN_MASK 0x000007FF /* last 11 bits */ +#define CRLS (1<<21) +#define DEFR (1<<22) +#define DROP (1<<23) +#define RTRY (1<<24) +#define LTCL (1<<28) +#define UFLO (1<<29) + +#define FOR_EMAC OWN_MASK +#define FOR_CPU 0 + +/* ARC EMAC register set combines entries for MAC and MDIO */ +enum { + R_ID = 0, + R_STATUS, + R_ENABLE, + R_CTRL, + R_POLLRATE, + R_RXERR, + R_MISS, + R_TX_RING, + R_RX_RING, + R_ADDRL, + R_ADDRH, + R_LAFL, + R_LAFH, + R_MDIO, +}; + +#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */ + +#define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */ + +#define EMAC_BUFFER_SIZE 1536 /* EMAC buffer size */ + +/** + * struct arc_emac_bd - EMAC buffer descriptor (BD). + * + * @info: Contains status information on the buffer itself. + * @data: 32-bit byte addressable pointer to the packet data. + */ +struct arc_emac_bd { + __le32 info; + dma_addr_t data; +}; + +/* Number of Rx/Tx BD's */ +#define RX_BD_NUM 128 +#define TX_BD_NUM 128 + +#define RX_RING_SZ (RX_BD_NUM * sizeof(struct arc_emac_bd)) +#define TX_RING_SZ (TX_BD_NUM * sizeof(struct arc_emac_bd)) + +/** + * struct buffer_state - Stores Rx/Tx buffer state. + * @sk_buff: Pointer to socket buffer. + * @addr: Start address of DMA-mapped memory region. + * @len: Length of DMA-mapped memory region. + */ +struct buffer_state { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(addr); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/** + * struct arc_emac_priv - Storage of EMAC's private information. + * @dev: Pointer to the current device. + * @ndev: Pointer to the current network device. + * @phy_dev: Pointer to attached PHY device. + * @bus: Pointer to the current MII bus. + * @regs: Base address of EMAC memory-mapped control registers. + * @napi: Structure for NAPI. + * @stats: Network device statistics. + * @rxbd: Pointer to Rx BD ring. + * @txbd: Pointer to Tx BD ring. + * @rxbd_dma: DMA handle for Rx BD ring. + * @txbd_dma: DMA handle for Tx BD ring. + * @rx_buff: Storage for Rx buffers states. + * @tx_buff: Storage for Tx buffers states. + * @txbd_curr: Index of Tx BD to use on the next "ndo_start_xmit". + * @txbd_dirty: Index of Tx BD to free on the next Tx interrupt. + * @last_rx_bd: Index of the last Rx BD we've got from EMAC. + * @link: PHY's last seen link state. + * @duplex: PHY's last set duplex mode. + * @speed: PHY's last set speed. + * @max_speed: Maximum supported by current system network data-rate. + */ +struct arc_emac_priv { + /* Devices */ + struct device *dev; + struct net_device *ndev; + struct phy_device *phy_dev; + struct mii_bus *bus; + + void __iomem *regs; + + struct napi_struct napi; + struct net_device_stats stats; + + struct arc_emac_bd *rxbd; + struct arc_emac_bd *txbd; + + dma_addr_t rxbd_dma; + dma_addr_t txbd_dma; + + struct buffer_state rx_buff[RX_BD_NUM]; + struct buffer_state tx_buff[TX_BD_NUM]; + unsigned int txbd_curr; + unsigned int txbd_dirty; + + unsigned int last_rx_bd; + + unsigned int link; + unsigned int duplex; + unsigned int speed; + unsigned int max_speed; +}; + +/** + * arc_reg_set - Sets EMAC register with provided value. + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @value: Value to set in register. + */ +static inline void arc_reg_set(struct arc_emac_priv *priv, int reg, int value) +{ + iowrite32(value, priv->regs + reg * sizeof(int)); +} + +/** + * arc_reg_get - Gets value of specified EMAC register. + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * + * returns: Value of requested register. + */ +static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg) +{ + return ioread32(priv->regs + reg * sizeof(int)); +} + +/** + * arc_reg_or - Applies mask to specified EMAC register - ("reg" | "mask"). + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @mask: Mask to apply to specified register. + * + * This function reads initial register value, then applies provided mask + * to it and then writes register back. + */ +static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) +{ + unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value | mask); +} + +/** + * arc_reg_clr - Applies mask to specified EMAC register - ("reg" & ~"mask"). + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @mask: Mask to apply to specified register. + * + * This function reads initial register value, then applies provided mask + * to it and then writes register back. + */ +static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) +{ + unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value & ~mask); +} + +int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv); +int arc_mdio_remove(struct arc_emac_priv *priv); + +#endif /* ARC_EMAC_H */ diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c new file mode 100644 index 000000000000..20345f6bf894 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_main.c @@ -0,0 +1,806 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the ARC EMAC 10100 (hardware revision 5) + * + * Contributors: + * Amit Bhor + * Sameer Dhavale + * Vineet Gupta + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "emac.h" + +#define DRV_NAME "arc_emac" +#define DRV_VERSION "1.0" + +/** + * arc_emac_adjust_link - Adjust the PHY link duplex. + * @ndev: Pointer to the net_device structure. + * + * This function is called to change the duplex setting after auto negotiation + * is done by the PHY. + */ +static void arc_emac_adjust_link(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct phy_device *phy_dev = priv->phy_dev; + unsigned int reg, state_changed = 0; + + if (priv->link != phy_dev->link) { + priv->link = phy_dev->link; + state_changed = 1; + } + + if (priv->speed != phy_dev->speed) { + priv->speed = phy_dev->speed; + state_changed = 1; + } + + if (priv->duplex != phy_dev->duplex) { + reg = arc_reg_get(priv, R_CTRL); + + if (DUPLEX_FULL == phy_dev->duplex) + reg |= ENFL_MASK; + else + reg &= ~ENFL_MASK; + + arc_reg_set(priv, R_CTRL, reg); + priv->duplex = phy_dev->duplex; + state_changed = 1; + } + + if (state_changed) + phy_print_status(phy_dev); +} + +/** + * arc_emac_get_settings - Get PHY settings. + * @ndev: Pointer to net_device structure. + * @cmd: Pointer to ethtool_cmd structure. + * + * This implements ethtool command for getting PHY settings. If PHY could + * not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to get the PHY settings. + * Issue "ethtool ethX" under linux prompt to execute this function. + */ +static int arc_emac_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + return phy_ethtool_gset(priv->phy_dev, cmd); +} + +/** + * arc_emac_set_settings - Set PHY settings as passed in the argument. + * @ndev: Pointer to net_device structure. + * @cmd: Pointer to ethtool_cmd structure. + * + * This implements ethtool command for setting various PHY settings. If PHY + * could not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to set the PHY. + * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this + * function. + */ +static int arc_emac_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + return phy_ethtool_sset(priv->phy_dev, cmd); +} + +/** + * arc_emac_get_drvinfo - Get EMAC driver information. + * @ndev: Pointer to net_device structure. + * @info: Pointer to ethtool_drvinfo structure. + * + * This implements ethtool command for getting the driver information. + * Issue "ethtool -i ethX" under linux prompt to execute this function. + */ +static void arc_emac_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops arc_emac_ethtool_ops = { + .get_settings = arc_emac_get_settings, + .set_settings = arc_emac_set_settings, + .get_drvinfo = arc_emac_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) + +/** + * arc_emac_tx_clean - clears processed by EMAC Tx BDs. + * @ndev: Pointer to the network device. + */ +static void arc_emac_tx_clean(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats; + unsigned int i; + + for (i = 0; i < TX_BD_NUM; i++) { + unsigned int *txbd_dirty = &priv->txbd_dirty; + struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; + struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; + struct sk_buff *skb = tx_buff->skb; + unsigned int info = le32_to_cpu(txbd->info); + + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; + + if ((info & FOR_EMAC) || !txbd->data) + break; + + if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { + stats->tx_errors++; + stats->tx_dropped++; + + if (info & DEFR) + stats->tx_carrier_errors++; + + if (info & LTCL) + stats->collisions++; + + if (info & UFLO) + stats->tx_fifo_errors++; + } else if (likely(info & FIRST_OR_LAST_MASK)) { + stats->tx_packets++; + stats->tx_bytes += skb->len; + } + + dma_unmap_single(&ndev->dev, dma_unmap_addr(&tx_buff, addr), + dma_unmap_len(&tx_buff, len), DMA_TO_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(skb); + + txbd->data = 0; + txbd->info = 0; + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } +} + +/** + * arc_emac_rx - processing of Rx packets. + * @ndev: Pointer to the network device. + * @budget: How many BDs to process on 1 call. + * + * returns: Number of processed BDs + * + * Iterate through Rx BDs and deliver received packages to upper layer. + */ +static int arc_emac_rx(struct net_device *ndev, int budget) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int work_done; + + for (work_done = 0; work_done <= budget; work_done++) { + unsigned int *last_rx_bd = &priv->last_rx_bd; + struct net_device_stats *stats = &priv->stats; + struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; + struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; + unsigned int buflen = EMAC_BUFFER_SIZE; + unsigned int pktlen, info = le32_to_cpu(rxbd->info); + struct sk_buff *skb; + dma_addr_t addr; + + if (unlikely((info & OWN_MASK) == FOR_EMAC)) + break; + + /* Make a note that we saw a packet at this BD. + * So next time, driver starts from this + 1 + */ + *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; + + if (unlikely((info & FIRST_OR_LAST_MASK) != + FIRST_OR_LAST_MASK)) { + /* We pre-allocate buffers of MTU size so incoming + * packets won't be split/chained. + */ + if (net_ratelimit()) + netdev_err(ndev, "incomplete packet received\n"); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | buflen); + stats->rx_errors++; + stats->rx_length_errors++; + continue; + } + + pktlen = info & LEN_MASK; + stats->rx_packets++; + stats->rx_bytes += pktlen; + skb = rx_buff->skb; + skb_put(skb, pktlen); + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, ndev); + + dma_unmap_single(&ndev->dev, dma_unmap_addr(&rx_buff, addr), + dma_unmap_len(&rx_buff, len), DMA_FROM_DEVICE); + + /* Prepare the BD for next cycle */ + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, buflen); + if (unlikely(!rx_buff->skb)) { + stats->rx_errors++; + /* Because receive_skb is below, increment rx_dropped */ + stats->rx_dropped++; + continue; + } + + /* receive_skb only if new skb was allocated to avoid holes */ + netif_receive_skb(skb); + + addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + buflen, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, addr)) { + if (net_ratelimit()) + netdev_err(ndev, "cannot dma map\n"); + dev_kfree_skb(rx_buff->skb); + stats->rx_errors++; + continue; + } + dma_unmap_addr_set(&rx_buff, mapping, addr); + dma_unmap_len_set(&rx_buff, len, buflen); + + rxbd->data = cpu_to_le32(rx_buff->skb->data); + + /* Make sure pointer to data buffer is set */ + wmb(); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | buflen); + } + + return work_done; +} + +/** + * arc_emac_poll - NAPI poll handler. + * @napi: Pointer to napi_struct structure. + * @budget: How many BDs to process on 1 call. + * + * returns: Number of processed BDs + */ +static int arc_emac_poll(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int work_done; + + arc_emac_tx_clean(ndev); + + work_done = arc_emac_rx(ndev, budget); + if (work_done < budget) { + napi_complete(napi); + arc_reg_or(priv, R_ENABLE, RXINT_MASK); + } + + return work_done; +} + +/** + * arc_emac_intr - Global interrupt handler for EMAC. + * @irq: irq number. + * @dev_instance: device instance. + * + * returns: IRQ_HANDLED for all cases. + * + * ARC EMAC has only 1 interrupt line, and depending on bits raised in + * STATUS register we may tell what is a reason for interrupt to fire. + */ +static irqreturn_t arc_emac_intr(int irq, void *dev_instance) +{ + struct net_device *ndev = dev_instance; + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats; + unsigned int status; + + status = arc_reg_get(priv, R_STATUS); + status &= ~MDIO_MASK; + + /* Reset all flags except "MDIO complete" */ + arc_reg_set(priv, R_STATUS, status); + + if (status & RXINT_MASK) { + if (likely(napi_schedule_prep(&priv->napi))) { + arc_reg_clr(priv, R_ENABLE, RXINT_MASK); + __napi_schedule(&priv->napi); + } + } + + if (status & ERR_MASK) { + /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding + * 8-bit error counter overrun. + */ + + if (status & MSER_MASK) { + stats->rx_missed_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXCR_MASK) { + stats->rx_crc_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXFR_MASK) { + stats->rx_frame_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXFL_MASK) { + stats->rx_over_errors += 0x100; + stats->rx_errors += 0x100; + } + } + + return IRQ_HANDLED; +} + +/** + * arc_emac_open - Open the network device. + * @ndev: Pointer to the network device. + * + * returns: 0, on success or non-zero error value on failure. + * + * This function sets the MAC address, requests and enables an IRQ + * for the EMAC device and starts the Tx queue. + * It also connects to the phy device. + */ +static int arc_emac_open(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct phy_device *phy_dev = priv->phy_dev; + struct arc_emac_bd *bd; + struct sk_buff *skb; + int i; + + phy_dev->autoneg = AUTONEG_ENABLE; + phy_dev->speed = 0; + phy_dev->duplex = 0; + phy_dev->advertising = phy_dev->supported; + + if (priv->max_speed > 100) { + phy_dev->advertising &= PHY_GBIT_FEATURES; + } else if (priv->max_speed <= 100) { + phy_dev->advertising &= PHY_BASIC_FEATURES; + if (priv->max_speed <= 10) { + phy_dev->advertising &= ~SUPPORTED_100baseT_Half; + phy_dev->advertising &= ~SUPPORTED_100baseT_Full; + } + } + + /* Allocate and set buffers for Rx BD's */ + bd = priv->rxbd; + for (i = 0; i < RX_BD_NUM; i++) { + skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); + if (unlikely(!skb)) + return -ENOMEM; + + priv->rx_buff[i].skb = skb; + bd->data = cpu_to_le32(skb->data); + + /* Make sure pointer to data buffer is set */ + wmb(); + + /* Set ownership to EMAC */ + bd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + bd++; + } + + priv->last_rx_bd = 0; + + /* Clean Tx BD's */ + memset(priv->txbd, 0, TX_RING_SZ); + + /* Initialize logical address filter */ + arc_reg_set(priv, R_LAFL, 0); + arc_reg_set(priv, R_LAFH, 0); + + /* Set BD ring pointers for device side */ + arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); + arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); + + /* Enable interrupts */ + arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + + /* Set CONTROL */ + arc_reg_set(priv, R_CTRL, + (RX_BD_NUM << 24) | /* RX BD table length */ + (TX_BD_NUM << 16) | /* TX BD table length */ + TXRN_MASK | RXRN_MASK); + + napi_enable(&priv->napi); + + /* Enable EMAC */ + arc_reg_or(priv, R_CTRL, EN_MASK); + + phy_start_aneg(priv->phy_dev); + + netif_start_queue(ndev); + + return 0; +} + +/** + * arc_emac_stop - Close the network device. + * @ndev: Pointer to the network device. + * + * This function stops the Tx queue, disables interrupts and frees the IRQ for + * the EMAC device. + * It also disconnects the PHY device associated with the EMAC device. + */ +static int arc_emac_stop(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + napi_disable(&priv->napi); + netif_stop_queue(ndev); + + /* Disable interrupts */ + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + + /* Disable EMAC */ + arc_reg_clr(priv, R_CTRL, EN_MASK); + + return 0; +} + +/** + * arc_emac_stats - Get system network statistics. + * @ndev: Pointer to net_device structure. + * + * Returns the address of the device statistics structure. + * Statistics are updated in interrupt handler. + */ +static struct net_device_stats *arc_emac_stats(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats; + unsigned long miss, rxerr; + u8 rxcrc, rxfram, rxoflow; + + rxerr = arc_reg_get(priv, R_RXERR); + miss = arc_reg_get(priv, R_MISS); + + rxcrc = rxerr; + rxfram = rxerr >> 8; + rxoflow = rxerr >> 16; + + stats->rx_errors += miss; + stats->rx_errors += rxcrc + rxfram + rxoflow; + + stats->rx_over_errors += rxoflow; + stats->rx_frame_errors += rxfram; + stats->rx_crc_errors += rxcrc; + stats->rx_missed_errors += miss; + + return stats; +} + +/** + * arc_emac_tx - Starts the data transmission. + * @skb: sk_buff pointer that contains data to be Transmitted. + * @ndev: Pointer to net_device structure. + * + * returns: NETDEV_TX_OK, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free. + * + * This function is invoked from upper layers to initiate transmission. + */ +static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int len, *txbd_curr = &priv->txbd_curr; + struct net_device_stats *stats = &priv->stats; + __le32 *info = &priv->txbd[*txbd_curr].info; + dma_addr_t addr; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + len = max_t(unsigned int, ETH_ZLEN, skb->len); + + /* EMAC still holds this buffer in its possession. + * CPU must not modify this buffer descriptor + */ + if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + addr = dma_map_single(&ndev->dev, (void *)skb->data, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&ndev->dev, addr))) { + stats->tx_dropped++; + stats->tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], mapping, addr); + dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); + + priv->tx_buff[*txbd_curr].skb = skb; + priv->txbd[*txbd_curr].data = cpu_to_le32(skb->data); + + /* Make sure pointer to data buffer is set */ + wmb(); + + *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); + + /* Increment index to point to the next BD */ + *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; + + /* Get "info" of the next BD */ + info = &priv->txbd[*txbd_curr].info; + + /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ + if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) + netif_stop_queue(ndev); + + arc_reg_set(priv, R_STATUS, TXPL_MASK); + + skb_tx_timestamp(skb); + + return NETDEV_TX_OK; +} + +/** + * arc_emac_set_address - Set the MAC address for this device. + * @ndev: Pointer to net_device structure. + * @p: 6 byte Address to be written as MAC address. + * + * This function copies the HW address from the sockaddr structure to the + * net_device structure and updates the address in HW. + * + * returns: -EBUSY if the net device is busy or 0 if the address is set + * successfully. + */ +static int arc_emac_set_address(struct net_device *ndev, void *p) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = p; + unsigned int addr_low, addr_hi; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); + addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); + + arc_reg_set(priv, R_ADDRL, addr_low); + arc_reg_set(priv, R_ADDRH, addr_hi); + + return 0; +} + +static const struct net_device_ops arc_emac_netdev_ops = { + .ndo_open = arc_emac_open, + .ndo_stop = arc_emac_stop, + .ndo_start_xmit = arc_emac_tx, + .ndo_set_mac_address = arc_emac_set_address, + .ndo_get_stats = arc_emac_stats, +}; + +static int arc_emac_probe(struct platform_device *pdev) +{ + struct resource res_regs, res_irq; + struct device_node *phy_node; + struct arc_emac_priv *priv; + struct net_device *ndev; + const char *mac_addr; + unsigned int id, clock_frequency; + int err; + + if (!pdev->dev.of_node) + return -ENODEV; + + /* Get PHY from device tree */ + phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0); + if (!phy_node) { + dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n"); + return -ENODEV; + } + + /* Get EMAC registers base address from device tree */ + err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs); + if (err) { + dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n"); + return -ENODEV; + } + + /* Get CPU clock frequency from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", + &clock_frequency)) { + dev_err(&pdev->dev, "failed to retrieve from device tree\n"); + return -EINVAL; + } + + /* Get IRQ from device tree */ + err = of_irq_to_resource(pdev->dev.of_node, 0, &res_irq); + if (!err) { + dev_err(&pdev->dev, "failed to retrieve value from device tree\n"); + return -ENODEV; + } + + ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->netdev_ops = &arc_emac_netdev_ops; + ndev->ethtool_ops = &arc_emac_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + /* FIXME :: no multicast support yet */ + ndev->flags &= ~IFF_MULTICAST; + + priv = netdev_priv(ndev); + priv->dev = &pdev->dev; + priv->ndev = ndev; + + priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out; + } + dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); + + id = arc_reg_get(priv, R_ID); + + /* Check for EMAC revision 5 or 7, magic number */ + if (!(id == 0x0005fd02 || id == 0x0007fd02)) { + dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); + err = -ENODEV; + goto out; + } + dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); + + /* Set poll rate so that it polls every 1 ms */ + arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); + + /* Get max speed of operation from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "max-speed", + &priv->max_speed)) { + dev_err(&pdev->dev, "failed to retrieve from device tree\n"); + err = -EINVAL; + goto out; + } + + ndev->irq = res_irq.start; + dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); + + /* Register interrupt handler for device */ + err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0, + ndev->name, ndev); + if (err) { + dev_err(&pdev->dev, "could not allocate IRQ\n"); + goto out; + } + + /* Get MAC address from device tree */ + mac_addr = of_get_mac_address(pdev->dev.of_node); + + if (!mac_addr || !is_valid_ether_addr(mac_addr)) + eth_hw_addr_random(ndev); + else + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + + dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); + + /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ + priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ, + &priv->rxbd_dma, GFP_KERNEL); + + if (!priv->rxbd) { + dev_err(&pdev->dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto out; + } + + priv->txbd = priv->rxbd + RX_BD_NUM; + + priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; + dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", + (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); + + err = arc_mdio_probe(pdev, priv); + if (err) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto out; + } + + priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (!priv->phy_dev) { + dev_err(&pdev->dev, "of_phy_connect() failed\n"); + err = -ENODEV; + goto out; + } + + dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", + priv->phy_dev->drv->name, priv->phy_dev->phy_id); + + netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); + + err = register_netdev(ndev); + if (err) { + netif_napi_del(&priv->napi); + dev_err(&pdev->dev, "failed to register network device\n"); + goto out; + } + + return 0; + +out: + free_netdev(ndev); + return err; +} + +static int arc_emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct arc_emac_priv *priv = netdev_priv(ndev); + + phy_disconnect(priv->phy_dev); + priv->phy_dev = NULL; + arc_mdio_remove(priv); + unregister_netdev(ndev); + netif_napi_del(&priv->napi); + free_netdev(ndev); + + return 0; +} + +static const struct of_device_id arc_emac_dt_ids[] = { + { .compatible = "snps,arc-emac" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, arc_emac_dt_ids); + +static struct platform_driver arc_emac_driver = { + .probe = arc_emac_probe, + .remove = arc_emac_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = arc_emac_dt_ids, + }, +}; + +module_platform_driver(arc_emac_driver); + +MODULE_AUTHOR("Alexey Brodkin "); +MODULE_DESCRIPTION("ARC EMAC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c new file mode 100644 index 000000000000..26ba2423f33a --- /dev/null +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * MDIO implementation for ARC EMAC + */ + +#include +#include +#include + +#include "emac.h" + +/* Number of seconds we wait for "MDIO complete" flag to appear */ +#define ARC_MDIO_COMPLETE_POLL_COUNT 1 + +/** + * arc_mdio_complete_wait - Waits until MDIO transaction is completed. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: 0 on success, -ETIMEDOUT on a timeout. + */ +static int arc_mdio_complete_wait(struct arc_emac_priv *priv) +{ + unsigned int i; + + for (i = 0; i < ARC_MDIO_COMPLETE_POLL_COUNT * 40; i++) { + unsigned int status = arc_reg_get(priv, R_STATUS); + + status &= MDIO_MASK; + + if (status) { + /* Reset "MDIO complete" flag */ + arc_reg_set(priv, R_STATUS, status); + return 0; + } + + msleep(25); + } + + return -ETIMEDOUT; +} + +/** + * arc_mdio_read - MDIO interface read function. + * @bus: Pointer to MII bus structure. + * @phy_addr: Address of the PHY device. + * @reg_num: PHY register to read. + * + * returns: The register contents on success, -ETIMEDOUT on a timeout. + * + * Reads the contents of the requested register from the requested PHY + * address. + */ +static int arc_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) +{ + struct arc_emac_priv *priv = bus->priv; + unsigned int value; + int error; + + arc_reg_set(priv, R_MDIO, + 0x60020000 | (phy_addr << 23) | (reg_num << 18)); + + error = arc_mdio_complete_wait(priv); + if (error < 0) + return error; + + value = arc_reg_get(priv, R_MDIO) & 0xffff; + + dev_dbg(priv->dev, "arc_mdio_read(phy_addr=%i, reg_num=%x) = %x\n", + phy_addr, reg_num, value); + + return value; +} + +/** + * arc_mdio_write - MDIO interface write function. + * @bus: Pointer to MII bus structure. + * @phy_addr: Address of the PHY device. + * @reg_num: PHY register to write to. + * @value: Value to be written into the register. + * + * returns: 0 on success, -ETIMEDOUT on a timeout. + * + * Writes the value to the requested register. + */ +static int arc_mdio_write(struct mii_bus *bus, int phy_addr, + int reg_num, u16 value) +{ + struct arc_emac_priv *priv = bus->priv; + + dev_dbg(priv->dev, + "arc_mdio_write(phy_addr=%i, reg_num=%x, value=%x)\n", + phy_addr, reg_num, value); + + arc_reg_set(priv, R_MDIO, + 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); + + return arc_mdio_complete_wait(priv); +} + +/** + * arc_mdio_probe - MDIO probe function. + * @pdev: Pointer to platform device. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: 0 on success, -ENOMEM when mdiobus_alloc + * (to allocate memory for MII bus structure) fails. + * + * Sets up and registers the MDIO interface. + */ +int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) +{ + struct mii_bus *bus; + int error; + + bus = mdiobus_alloc(); + if (!bus) + return -ENOMEM; + + priv->bus = bus; + bus->priv = priv; + bus->parent = priv->dev; + bus->name = "Synopsys MII Bus", + bus->read = &arc_mdio_read; + bus->write = &arc_mdio_write; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + + error = of_mdiobus_register(bus, pdev->dev.of_node); + if (error) { + dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); + mdiobus_free(bus); + return error; + } + + return 0; +} + +/** + * arc_mdio_remove - MDIO remove function. + * @priv: Pointer to ARC EMAC private data structure. + * + * Unregisters the MDIO and frees any associate memory for MII bus. + */ +int arc_mdio_remove(struct arc_emac_priv *priv) +{ + mdiobus_unregister(priv->bus); + mdiobus_free(priv->bus); + priv->bus = NULL; + + return 0; +} -- cgit v1.2.3 From 6892b41d9701283085b655c6086fb57a5d63fa47 Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Tue, 25 Jun 2013 21:24:51 +0530 Subject: net: davinci: emac: Convert to devm_* api Use devm_ioremap_resource instead of devm_request_mem_region()/devm_ioremap() and devm_request_irq() instead of request_irq(). This ensures more consistent error values and simplifies error paths. Signed-off-by: Lad, Prabhakar Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_emac.c | 49 +++++++--------------------------- 1 file changed, 9 insertions(+), 40 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index efb6f65a8be3..7a66f9c33159 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1532,7 +1532,7 @@ static int emac_dev_open(struct net_device *ndev) struct device *emac_dev = &ndev->dev; u32 cnt; struct resource *res; - int q, m, ret; + int ret; int i = 0; int k = 0; struct emac_priv *priv = netdev_priv(ndev); @@ -1567,8 +1567,9 @@ static int emac_dev_open(struct net_device *ndev) while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { - if (request_irq(i, emac_irq, IRQF_DISABLED, - ndev->name, ndev)) + if (devm_request_irq(&priv->pdev->dev, i, emac_irq, + IRQF_DISABLED, + ndev->name, ndev)) goto rollback; } k++; @@ -1641,15 +1642,7 @@ static int emac_dev_open(struct net_device *ndev) rollback: - dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); - - for (q = k; k >= 0; k--) { - for (m = i; m >= res->start; m--) - free_irq(m, ndev); - res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); - m = res->end; - } - + dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); ret = -EBUSY; err: pm_runtime_put(&priv->pdev->dev); @@ -1667,9 +1660,6 @@ err: */ static int emac_dev_stop(struct net_device *ndev) { - struct resource *res; - int i = 0; - int irq_num; struct emac_priv *priv = netdev_priv(ndev); struct device *emac_dev = &ndev->dev; @@ -1685,13 +1675,6 @@ static int emac_dev_stop(struct net_device *ndev) if (priv->phydev) phy_disconnect(priv->phydev); - /* Free IRQ */ - while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { - for (irq_num = res->start; irq_num <= res->end; irq_num++) - free_irq(irq_num, priv->ndev); - i++; - } - if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); @@ -1856,7 +1839,7 @@ static int davinci_emac_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev; struct emac_priv *priv; - unsigned long size, hw_ram_addr; + unsigned long hw_ram_addr; struct emac_platform_data *pdata; struct device *emac_dev; struct cpdma_params dma_params; @@ -1907,25 +1890,11 @@ static int davinci_emac_probe(struct platform_device *pdev) emac_dev = &ndev->dev; /* Get EMAC platform data */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev,"error getting res\n"); - rc = -ENOENT; - goto no_pdata; - } - priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; - size = resource_size(res); - if (!devm_request_mem_region(&pdev->dev, res->start, - size, ndev->name)) { - dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); - rc = -ENXIO; - goto no_pdata; - } - - priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size); - if (!priv->remap_addr) { + priv->remap_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->remap_addr)) { dev_err(&pdev->dev, "unable to map IO\n"); - rc = -ENOMEM; + rc = PTR_ERR(priv->remap_addr); goto no_pdata; } priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; -- cgit v1.2.3 From 151328c828795574bed6ce8b37c3d0bd586e162f Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Tue, 25 Jun 2013 21:24:52 +0530 Subject: net: davinci_emac: simplify the OF parser code This patch cleans up the OF parser code, removes unnecessary checks on of_property_read_*() and guards davinci_emac_of_match table with CONFIG_OF. Signed-off-by: Lad, Prabhakar Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_emac.c | 67 ++++++++++++---------------------- 1 file changed, 23 insertions(+), 44 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 7a66f9c33159..f118d7133128 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1754,29 +1754,22 @@ static const struct net_device_ops emac_netdev_ops = { #endif }; -#ifdef CONFIG_OF -static struct emac_platform_data - *davinci_emac_of_get_pdata(struct platform_device *pdev, - struct emac_priv *priv) +static struct emac_platform_data * +davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) { struct device_node *np; struct emac_platform_data *pdata = NULL; const u8 *mac_addr; - u32 data; - int ret; - pdata = pdev->dev.platform_data; - if (!pdata) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - goto nodata; - } + if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) + return pdev->dev.platform_data; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; np = pdev->dev.of_node; - if (!np) - goto nodata; - else - pdata->version = EMAC_VERSION_2; + pdata->version = EMAC_VERSION_2; if (!is_valid_ether_addr(pdata->mac_addr)) { mac_addr = of_get_mac_address(np); @@ -1784,47 +1777,31 @@ static struct emac_platform_data memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); } - ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data); - if (!ret) - pdata->ctrl_reg_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", + &pdata->ctrl_reg_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", - &data); - if (!ret) - pdata->ctrl_mod_reg_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", + &pdata->ctrl_mod_reg_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data); - if (!ret) - pdata->ctrl_ram_offset = data; + of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", + &pdata->ctrl_ram_offset); - ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data); - if (!ret) - pdata->ctrl_ram_size = data; + of_property_read_u32(np, "ti,davinci-ctrl-ram-size", + &pdata->ctrl_ram_size); - ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data); - if (!ret) - pdata->rmii_en = data; + of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en); - ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data); - if (!ret) - pdata->no_bd_ram = data; + pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram"); priv->phy_node = of_parse_phandle(np, "phy-handle", 0); if (!priv->phy_node) pdata->phy_id = ""; pdev->dev.platform_data = pdata; -nodata: + return pdata; } -#else -static struct emac_platform_data - *davinci_emac_of_get_pdata(struct platform_device *pdev, - struct emac_priv *priv) -{ - return pdev->dev.platform_data; -} -#endif + /** * davinci_emac_probe - EMAC device probe * @pdev: The DaVinci EMAC device that we are removing @@ -2045,11 +2022,13 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { .resume = davinci_emac_resume, }; +#if IS_ENABLED(CONFIG_OF) static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); +#endif /* davinci_emac_driver: EMAC platform driver structure */ static struct platform_driver davinci_emac_driver = { -- cgit v1.2.3 From 277e2a84c12e0d4af77d0f0a52623211eb6ab223 Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Tue, 25 Jun 2013 21:24:53 +0530 Subject: net: davinci_mdio: gaurd the DT code with IS_ENABLED(CONFIG_OF) guard the davinci_mdio_of_mtable table and davinci_mdio_probe_dt() with CONFIG_OF. Signed-off-by: Lad, Prabhakar Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_mdio.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index c47f0dbcebb5..dac6f5832d18 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -291,6 +291,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +#if IS_ENABLED(CONFIG_OF) static int davinci_mdio_probe_dt(struct mdio_platform_data *data, struct platform_device *pdev) { @@ -308,7 +309,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, return 0; } - +#endif static int davinci_mdio_probe(struct platform_device *pdev) { @@ -477,11 +478,13 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = { .resume_early = davinci_mdio_resume, }; +#if IS_ENABLED(CONFIG_OF) static const struct of_device_id davinci_mdio_of_mtable[] = { { .compatible = "ti,davinci_mdio", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); +#endif static struct platform_driver davinci_mdio_driver = { .driver = { -- cgit v1.2.3 From a4a1139b242f03dfb8a5d7a86fa674bda1cf60b2 Mon Sep 17 00:00:00 2001 From: Alexey Brodkin Date: Wed, 26 Jun 2013 11:49:26 +0400 Subject: arc_emac: fix compile-time errors & warnings on PPC64 As reported by "kbuild test robot" there were some errors and warnings on attempt to build kernel with "make ARCH=powerpc allmodconfig". And this patch addresses both errors and warnings. Below is a list of introduced changes: 1. Fix compile-time errors (misspellings in "dma_unmap_single") on PPC. 2. Use DMA address instead of "skb->data" as a pointer to data buffer. This fixed warnings on pointer to int conversion on 64-bit systems. 3. Re-implemented initial allocation of Rx buffers in "arc_emac_open" in the same way they're re-allocated during operation (receiving packets). So once again DMA address could be used instead of "skb->data". 4. Explicitly use EMAC_BUFFER_SIZE for Rx buffers allocation. Signed-off-by: Alexey Brodkin Cc: netdev@vger.kernel.org Cc: Andy Shevchenko Cc: Francois Romieu Cc: Joe Perches Cc: Vineet Gupta Cc: Mischa Jonker Cc: Arnd Bergmann Cc: Grant Likely Cc: Rob Herring Cc: Paul Gortmaker Cc: linux-kernel@vger.kernel.org Cc: devicetree-discuss@lists.ozlabs.org Cc: Florian Fainelli Cc: David Laight Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac_main.c | 65 +++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 20345f6bf894..f1b121ee5525 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -171,8 +171,8 @@ static void arc_emac_tx_clean(struct net_device *ndev) stats->tx_bytes += skb->len; } - dma_unmap_single(&ndev->dev, dma_unmap_addr(&tx_buff, addr), - dma_unmap_len(&tx_buff, len), DMA_TO_DEVICE); + dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), + dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); /* return the sk_buff to system */ dev_kfree_skb_irq(skb); @@ -204,7 +204,6 @@ static int arc_emac_rx(struct net_device *ndev, int budget) struct net_device_stats *stats = &priv->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; - unsigned int buflen = EMAC_BUFFER_SIZE; unsigned int pktlen, info = le32_to_cpu(rxbd->info); struct sk_buff *skb; dma_addr_t addr; @@ -226,7 +225,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) netdev_err(ndev, "incomplete packet received\n"); /* Return ownership to EMAC */ - rxbd->info = cpu_to_le32(FOR_EMAC | buflen); + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_length_errors++; continue; @@ -240,11 +239,12 @@ static int arc_emac_rx(struct net_device *ndev, int budget) skb->dev = ndev; skb->protocol = eth_type_trans(skb, ndev); - dma_unmap_single(&ndev->dev, dma_unmap_addr(&rx_buff, addr), - dma_unmap_len(&rx_buff, len), DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); /* Prepare the BD for next cycle */ - rx_buff->skb = netdev_alloc_skb_ip_align(ndev, buflen); + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, + EMAC_BUFFER_SIZE); if (unlikely(!rx_buff->skb)) { stats->rx_errors++; /* Because receive_skb is below, increment rx_dropped */ @@ -256,7 +256,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) netif_receive_skb(skb); addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, - buflen, DMA_FROM_DEVICE); + EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { if (net_ratelimit()) netdev_err(ndev, "cannot dma map\n"); @@ -264,16 +264,16 @@ static int arc_emac_rx(struct net_device *ndev, int budget) stats->rx_errors++; continue; } - dma_unmap_addr_set(&rx_buff, mapping, addr); - dma_unmap_len_set(&rx_buff, len, buflen); + dma_unmap_addr_set(rx_buff, addr, addr); + dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); - rxbd->data = cpu_to_le32(rx_buff->skb->data); + rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); /* Return ownership to EMAC */ - rxbd->info = cpu_to_le32(FOR_EMAC | buflen); + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); } return work_done; @@ -376,8 +376,6 @@ static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct phy_device *phy_dev = priv->phy_dev; - struct arc_emac_bd *bd; - struct sk_buff *skb; int i; phy_dev->autoneg = AUTONEG_ENABLE; @@ -395,25 +393,40 @@ static int arc_emac_open(struct net_device *ndev) } } + priv->last_rx_bd = 0; + /* Allocate and set buffers for Rx BD's */ - bd = priv->rxbd; for (i = 0; i < RX_BD_NUM; i++) { - skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); - if (unlikely(!skb)) + dma_addr_t addr; + unsigned int *last_rx_bd = &priv->last_rx_bd; + struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; + struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; + + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, + EMAC_BUFFER_SIZE); + if (unlikely(!rx_buff->skb)) + return -ENOMEM; + + addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, addr)) { + netdev_err(ndev, "cannot dma map\n"); + dev_kfree_skb(rx_buff->skb); return -ENOMEM; + } + dma_unmap_addr_set(rx_buff, addr, addr); + dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); - priv->rx_buff[i].skb = skb; - bd->data = cpu_to_le32(skb->data); + rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); - /* Set ownership to EMAC */ - bd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); - bd++; - } + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); - priv->last_rx_bd = 0; + *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; + } /* Clean Tx BD's */ memset(priv->txbd, 0, TX_RING_SZ); @@ -543,11 +556,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) dev_kfree_skb(skb); return NETDEV_TX_OK; } - dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], mapping, addr); + dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); priv->tx_buff[*txbd_curr].skb = skb; - priv->txbd[*txbd_curr].data = cpu_to_le32(skb->data); + priv->txbd[*txbd_curr].data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); -- cgit v1.2.3 From 38ae92dc215e939897f17861d658f882d0eaab0f Mon Sep 17 00:00:00 2001 From: Chris Healy Date: Tue, 25 Jun 2013 23:18:52 -0700 Subject: fec: Add support for reading RMON registers Add ethtool operation to read RMON registers. Tested against net-next on i.MX28. v2: make conditional on #ifndef CONFIG_M5272 Signed-off-by: Chris Healy Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 55 ++++++++++++++ drivers/net/ethernet/freescale/fec_main.c | 114 ++++++++++++++++++++++++++++++ 2 files changed, 169 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index e3ed6c5ae801..8362a0399afb 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -60,6 +60,61 @@ #define BM_MIIGSK_CFGR_RMII 0x01 #define BM_MIIGSK_CFGR_FRCONT_10M 0x40 +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ +#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */ +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ +#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ +#define RMON_T_COL 0x224 /* RMON TX collision count */ +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ +#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ +#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ +#define RMON_T_OCTETS 0x244 /* RMON TX octets */ +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ +#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */ +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ +#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ +#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */ +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ +#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */ +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ +#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2A4 /* Reserved */ +#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */ + #else #define FEC_ECNTRL 0x000 /* Ethernet control reg */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 46f2544fd1a7..ed6180e7db2f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -604,6 +604,14 @@ fec_restart(struct net_device *ndev, int duplex) if (fep->bufdesc_ex) ecntl |= (1 << 4); +#ifndef CONFIG_M5272 + /* Disable, clear, and enable the MIB */ + writel(1 << 31, fep->hwp + FEC_MIB_CTRLSTAT); + for (i = RMON_T_DROP; i < IEEE_R_OCTETS_OK; i++) + writel(0, fep->hwp + i); + writel(0, fep->hwp + FEC_MIB_CTRLSTAT); +#endif + /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); @@ -1435,6 +1443,107 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, return 0; } +#ifndef CONFIG_M5272 +static const struct fec_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} fec_stats[] = { + /* RMON TX */ + { "tx_dropped", RMON_T_DROP }, + { "tx_packets", RMON_T_PACKETS }, + { "tx_broadcast", RMON_T_BC_PKT }, + { "tx_multicast", RMON_T_MC_PKT }, + { "tx_crc_errors", RMON_T_CRC_ALIGN }, + { "tx_undersize", RMON_T_UNDERSIZE }, + { "tx_oversize", RMON_T_OVERSIZE }, + { "tx_fragment", RMON_T_FRAG }, + { "tx_jabber", RMON_T_JAB }, + { "tx_collision", RMON_T_COL }, + { "tx_64byte", RMON_T_P64 }, + { "tx_65to127byte", RMON_T_P65TO127 }, + { "tx_128to255byte", RMON_T_P128TO255 }, + { "tx_256to511byte", RMON_T_P256TO511 }, + { "tx_512to1023byte", RMON_T_P512TO1023 }, + { "tx_1024to2047byte", RMON_T_P1024TO2047 }, + { "tx_GTE2048byte", RMON_T_P_GTE2048 }, + { "tx_octets", RMON_T_OCTETS }, + + /* IEEE TX */ + { "IEEE_tx_drop", IEEE_T_DROP }, + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, + { "IEEE_tx_1col", IEEE_T_1COL }, + { "IEEE_tx_mcol", IEEE_T_MCOL }, + { "IEEE_tx_def", IEEE_T_DEF }, + { "IEEE_tx_lcol", IEEE_T_LCOL }, + { "IEEE_tx_excol", IEEE_T_EXCOL }, + { "IEEE_tx_macerr", IEEE_T_MACERR }, + { "IEEE_tx_cserr", IEEE_T_CSERR }, + { "IEEE_tx_sqe", IEEE_T_SQE }, + { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, + + /* RMON RX */ + { "rx_packets", RMON_R_PACKETS }, + { "rx_broadcast", RMON_R_BC_PKT }, + { "rx_multicast", RMON_R_MC_PKT }, + { "rx_crc_errors", RMON_R_CRC_ALIGN }, + { "rx_undersize", RMON_R_UNDERSIZE }, + { "rx_oversize", RMON_R_OVERSIZE }, + { "rx_fragment", RMON_R_FRAG }, + { "rx_jabber", RMON_R_JAB }, + { "rx_64byte", RMON_R_P64 }, + { "rx_65to127byte", RMON_R_P65TO127 }, + { "rx_128to255byte", RMON_R_P128TO255 }, + { "rx_256to511byte", RMON_R_P256TO511 }, + { "rx_512to1023byte", RMON_R_P512TO1023 }, + { "rx_1024to2047byte", RMON_R_P1024TO2047 }, + { "rx_GTE2048byte", RMON_R_P_GTE2048 }, + { "rx_octets", RMON_R_OCTETS }, + + /* IEEE RX */ + { "IEEE_rx_drop", IEEE_R_DROP }, + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, + { "IEEE_rx_crc", IEEE_R_CRC }, + { "IEEE_rx_align", IEEE_R_ALIGN }, + { "IEEE_rx_macerr", IEEE_R_MACERR }, + { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, +}; + +static void fec_enet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fec_enet_private *fep = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + data[i] = readl(fep->hwp + fec_stats[i].offset); +} + +static void fec_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + fec_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int fec_enet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fec_stats); + default: + return -EOPNOTSUPP; + } +} +#endif + static int fec_enet_nway_reset(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); @@ -1455,6 +1564,11 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ts_info = fec_enet_get_ts_info, .nway_reset = fec_enet_nway_reset, +#ifndef CONFIG_M5272 + .get_ethtool_stats = fec_enet_get_ethtool_stats, + .get_strings = fec_enet_get_strings, + .get_sset_count = fec_enet_get_sset_count, +#endif }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) -- cgit v1.2.3 From 83a35e360433b58791bc9c4e288cace466d62e3a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 28 Jun 2013 11:27:31 +0200 Subject: treewide: relase -> release Signed-off-by: Geert Uytterhoeven Signed-off-by: Jiri Kosina --- Makefile | 2 +- drivers/input/misc/da9055_onkey.c | 2 +- drivers/media/dvb-frontends/stv0367.c | 2 +- drivers/net/ethernet/intel/igb/e1000_phy.c | 2 +- drivers/s390/char/vmur.c | 2 +- drivers/scsi/libfc/fc_fcp.c | 2 +- mm/mmu_notifier.c | 2 +- sound/pci/hda/hda_codec.c | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/Makefile b/Makefile index 73e20dba55c1..75789ec41f53 100644 --- a/Makefile +++ b/Makefile @@ -794,7 +794,7 @@ PHONY += $(vmlinux-dirs) $(vmlinux-dirs): prepare scripts $(Q)$(MAKE) $(build)=$@ -# Store (new) KERNELRELASE string in include/config/kernel.release +# Store (new) KERNELRELEASE string in include/config/kernel.release include/config/kernel.release: include/config/auto.conf FORCE $(Q)rm -f $@ $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" > $@ diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c index ee6ae3a00174..a0af8b2506ce 100644 --- a/drivers/input/misc/da9055_onkey.c +++ b/drivers/input/misc/da9055_onkey.c @@ -36,7 +36,7 @@ static void da9055_onkey_query(struct da9055_onkey *onkey) } else { key_stat &= DA9055_NOKEY_STS; /* - * Onkey status bit is cleared when onkey button is relased. + * Onkey status bit is cleared when onkey button is released. */ if (!key_stat) { input_report_key(onkey->input, KEY_POWER, 0); diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index 0c8e45949b11..7b6dba3ce55e 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c @@ -2919,7 +2919,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state, if (tuner_lock == 0) return FE_367CAB_NOTUNER; #endif - /* Relase the TRL to start demodulator acquisition */ + /* Release the TRL to start demodulator acquisition */ /* Wait for QAM lock */ LockTime = 0; stv0367_writereg(state, R367CAB_CTRL_1, 0x00); diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 115b0da6e013..9979ebcf2a0c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -2014,7 +2014,7 @@ out: * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to - * reset and relase the semaphore (if necessary). + * reset and release the semaphore (if necessary). **/ s32 igb_phy_hw_reset(struct e1000_hw *hw) { diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index c180e3135b3b..64c467998a90 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -89,7 +89,7 @@ static DEFINE_MUTEX(vmur_mutex); * urd references: * - ur_probe gets a urd reference, ur_remove drops the reference * dev_get_drvdata(&cdev->dev) - * - ur_open gets a urd reference, ur_relase drops the reference + * - ur_open gets a urd reference, ur_release drops the reference * (urf->urd) * * cdev references: diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 09c81b2f2169..5fd0f1fbe586 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2043,7 +2043,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_unlock_irqrestore(&si->scsi_queue_lock, flags); return SUCCESS; } - /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ + /* grab a ref so the fsp and sc_cmd cannot be released from under us */ fc_fcp_pkt_hold(fsp); spin_unlock_irqrestore(&si->scsi_queue_lock, flags); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 6725ff183374..93e6089cb456 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -315,7 +315,7 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) /* * Wait for any running method to finish, of course including - * ->release if it was run by mmu_notifier_relase instead of us. + * ->release if it was run by mmu_notifier_release instead of us. */ synchronize_srcu(&srcu); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 55108b5fb291..a9676ceafe00 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2523,7 +2523,7 @@ int snd_hda_codec_reset(struct hda_codec *codec) flush_workqueue(bus->workq); #endif snd_hda_ctls_clear(codec); - /* relase PCMs */ + /* release PCMs */ for (i = 0; i < codec->num_pcms; i++) { if (codec->pcm_info[i].pcm) { snd_device_free(card, codec->pcm_info[i].pcm); -- cgit v1.2.3 From bd79680956573dd70bfa207af6b143aa94980b96 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 28 Jun 2013 14:02:52 +0300 Subject: pch_gbe: remove inline keyword for exported functions There is no much sense to mark functions inline that are going to be used in the other compile modules. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 21 +++++++++++---------- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c | 3 +-- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 5ae03e815ee9..82e72b21a8fb 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -19,6 +19,7 @@ */ #include "pch_gbe.h" #include "pch_gbe_phy.h" +#include "pch_gbe_api.h" /* bus type values */ #define pch_gbe_bus_type_unknown 0 @@ -112,7 +113,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) * 0: Successfully * ENOSYS: Function is not registered */ -inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) +s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) { if (!hw->reg) { pr_err("ERROR: Registers not mapped\n"); @@ -126,7 +127,7 @@ inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) * pch_gbe_hal_get_bus_info - Obtain bus information for adapter * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) +void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) { if (!hw->func->get_bus_info) pr_err("ERROR: configuration\n"); @@ -141,7 +142,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) * 0: Successfully * ENOSYS: Function is not registered */ -inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) +s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) { if (!hw->func->init_hw) { pr_err("ERROR: configuration\n"); @@ -159,7 +160,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) * 0: Successfully * Negative value: Failed */ -inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, +s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data) { if (!hw->func->read_phy_reg) @@ -176,7 +177,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, * 0: Successfully * Negative value: Failed */ -inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, +s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data) { if (!hw->func->write_phy_reg) @@ -188,7 +189,7 @@ inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, * pch_gbe_hal_phy_hw_reset - Hard PHY reset * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) +void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) { if (!hw->func->reset_phy) pr_err("ERROR: configuration\n"); @@ -200,7 +201,7 @@ inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) * pch_gbe_hal_phy_sw_reset - Soft PHY reset * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) +void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) { if (!hw->func->sw_reset_phy) pr_err("ERROR: configuration\n"); @@ -215,7 +216,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) * 0: Successfully * ENOSYS: Function is not registered */ -inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) +s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) { if (!hw->func->read_mac_addr) { pr_err("ERROR: configuration\n"); @@ -228,7 +229,7 @@ inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) * pch_gbe_hal_power_up_phy - Power up PHY * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) +void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) { if (hw->func->power_up_phy) hw->func->power_up_phy(hw); @@ -238,7 +239,7 @@ inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) * pch_gbe_hal_power_down_phy - Power down PHY * @hw: Pointer to the HW structure */ -inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) +void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) { if (hw->func->power_down_phy) hw->func->power_down_phy(hw); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 0c1c65a9ce5e..16d5ffac1981 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -287,7 +287,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } -inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) +static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) { iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index 28bb9603d736..b97c6575e018 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -235,7 +235,7 @@ void pch_gbe_phy_power_down(struct pch_gbe_hw *hw) * pch_gbe_phy_set_rgmii - RGMII interface setting * @hw: Pointer to the HW structure */ -inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) +void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) { pch_gbe_phy_sw_reset(hw); } @@ -270,5 +270,4 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); - } -- cgit v1.2.3 From 453ca931f515161902dbb325d7f39a992c3059ce Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 28 Jun 2013 14:02:53 +0300 Subject: pch_gbe: convert pr_* to netdev_* We may use nice macros to prefix our messages with proper device name. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 49 ++-- .../ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c | 2 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 275 ++++++++++++--------- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c | 63 +++-- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c | 23 +- 6 files changed, 250 insertions(+), 164 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 7fb7e178c74e..7779036690cc 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -633,6 +633,8 @@ struct pch_gbe_adapter { struct pci_dev *ptp_pdev; }; +#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) + extern const char pch_driver_version[]; /* pch_gbe_main.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index 82e72b21a8fb..ff3ad70935a6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -71,7 +71,9 @@ static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) ret_val = pch_gbe_phy_get_id(hw); if (ret_val) { - pr_err("pch_gbe_phy_get_id error\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); return ret_val; } pch_gbe_phy_init_setting(hw); @@ -116,7 +118,9 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) { if (!hw->reg) { - pr_err("ERROR: Registers not mapped\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); return -ENOSYS; } pch_gbe_plat_init_function_pointers(hw); @@ -129,10 +133,13 @@ s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) */ void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) { - if (!hw->func->get_bus_info) - pr_err("ERROR: configuration\n"); - else - hw->func->get_bus_info(hw); + if (!hw->func->get_bus_info) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->get_bus_info(hw); } /** @@ -145,7 +152,9 @@ void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) { if (!hw->func->init_hw) { - pr_err("ERROR: configuration\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); return -ENOSYS; } return hw->func->init_hw(hw); @@ -191,10 +200,13 @@ s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, */ void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) { - if (!hw->func->reset_phy) - pr_err("ERROR: configuration\n"); - else - hw->func->reset_phy(hw); + if (!hw->func->reset_phy) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->reset_phy(hw); } /** @@ -203,10 +215,13 @@ void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) */ void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) { - if (!hw->func->sw_reset_phy) - pr_err("ERROR: configuration\n"); - else - hw->func->sw_reset_phy(hw); + if (!hw->func->sw_reset_phy) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->sw_reset_phy(hw); } /** @@ -219,7 +234,9 @@ void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) { if (!hw->func->read_mac_addr) { - pr_err("ERROR: configuration\n"); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); return -ENOSYS; } return hw->func->read_mac_addr(hw); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index 24b787be6062..1129db0cdf82 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -122,7 +122,7 @@ static int pch_gbe_set_settings(struct net_device *netdev, } ret = mii_ethtool_sset(&adapter->mii, ecmd); if (ret) { - pr_err("Error: mii_ethtool_sset\n"); + netdev_err(netdev, "Error: mii_ethtool_sset\n"); return ret; } hw->mac.link_speed = speed; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 16d5ffac1981..97db56393197 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -300,6 +300,7 @@ static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) */ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 adr1a, adr1b; adr1a = ioread32(&hw->reg->mac_adr[0].high); @@ -312,7 +313,7 @@ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) hw->mac.addr[4] = (u8)(adr1b & 0xFF); hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF); - pr_debug("hw->mac.addr : %pM\n", hw->mac.addr); + netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr); return 0; } @@ -324,6 +325,7 @@ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) static void pch_gbe_wait_clr_bit(void *reg, u32 bit) { u32 tmp; + /* wait busy */ tmp = 1000; while ((ioread32(reg) & bit) && --tmp) @@ -340,9 +342,10 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit) */ static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 mar_low, mar_high, adrmask; - pr_debug("index : 0x%x\n", index); + netdev_dbg(adapter->netdev, "index : 0x%x\n", index); /* * HW expects these in little endian so we reverse the byte order @@ -468,10 +471,11 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, */ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); struct pch_gbe_mac_info *mac = &hw->mac; u32 rx_fctrl; - pr_debug("mac->fc = %u\n", mac->fc); + netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc); rx_fctrl = ioread32(&hw->reg->RX_FCTRL); @@ -493,14 +497,16 @@ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) mac->tx_fc_enable = true; break; default: - pr_err("Flow control param set incorrectly\n"); + netdev_err(adapter->netdev, + "Flow control param set incorrectly\n"); return -EINVAL; } if (mac->link_duplex == DUPLEX_HALF) rx_fctrl &= ~PCH_GBE_FL_CTRL_EN; iowrite32(rx_fctrl, &hw->reg->RX_FCTRL); - pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n", - ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable); + netdev_dbg(adapter->netdev, + "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n", + ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable); return 0; } @@ -511,10 +517,11 @@ s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) */ static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 addr_mask; - pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n", - wu_evt, ioread32(&hw->reg->ADDR_MASK)); + netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n", + wu_evt, ioread32(&hw->reg->ADDR_MASK)); if (wu_evt) { /* Set Wake-On-Lan address mask */ @@ -546,6 +553,7 @@ static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt) u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, u16 data) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); u32 data_out = 0; unsigned int i; unsigned long flags; @@ -558,7 +566,7 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, udelay(20); } if (i == 0) { - pr_err("pch-gbe.miim won't go Ready\n"); + netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n"); spin_unlock_irqrestore(&hw->miim_lock, flags); return 0; /* No way to indicate timeout error */ } @@ -573,9 +581,9 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, } spin_unlock_irqrestore(&hw->miim_lock, flags); - pr_debug("PHY %s: reg=%d, data=0x%04X\n", - dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg, - dir == PCH_GBE_MIIM_OPER_READ ? data_out : data); + netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n", + dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg, + dir == PCH_GBE_MIIM_OPER_READ ? data_out : data); return (u16) data_out; } @@ -585,6 +593,7 @@ u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, */ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); unsigned long tmp2, tmp3; /* Set Pause packet */ @@ -606,10 +615,13 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) /* Transmit Pause Packet */ iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ); - pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", - ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2), - ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4), - ioread32(&hw->reg->PAUSE_PKT5)); + netdev_dbg(adapter->netdev, + "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + ioread32(&hw->reg->PAUSE_PKT1), + ioread32(&hw->reg->PAUSE_PKT2), + ioread32(&hw->reg->PAUSE_PKT3), + ioread32(&hw->reg->PAUSE_PKT4), + ioread32(&hw->reg->PAUSE_PKT5)); return; } @@ -669,7 +681,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter) break; } adapter->hw.phy.addr = adapter->mii.phy_id; - pr_debug("phy_addr = %d\n", adapter->mii.phy_id); + netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); if (addr == 32) return -EAGAIN; /* Selected the phy and isolate the rest */ @@ -758,13 +770,15 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) */ void pch_gbe_reset(struct pch_gbe_adapter *adapter) { + struct net_device *netdev = adapter->netdev; + pch_gbe_mac_reset_hw(&adapter->hw); /* reprogram multicast address register after reset */ - pch_gbe_set_multi(adapter->netdev); + pch_gbe_set_multi(netdev); /* Setup the receive address. */ pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); if (pch_gbe_hal_init_hw(&adapter->hw)) - pr_err("Hardware Error\n"); + netdev_err(netdev, "Hardware Error\n"); } /** @@ -778,7 +792,7 @@ static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter) free_irq(adapter->pdev->irq, netdev); if (adapter->have_msi) { pci_disable_msi(adapter->pdev); - pr_debug("call pci_disable_msi\n"); + netdev_dbg(netdev, "call pci_disable_msi\n"); } } @@ -795,7 +809,8 @@ static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter) ioread32(&hw->reg->INT_ST); synchronize_irq(adapter->pdev->irq); - pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); + netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", + ioread32(&hw->reg->INT_EN)); } /** @@ -809,7 +824,8 @@ static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter) if (likely(atomic_dec_and_test(&adapter->irq_sem))) iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN); ioread32(&hw->reg->INT_ST); - pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); + netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", + ioread32(&hw->reg->INT_EN)); } @@ -846,9 +862,9 @@ static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter) struct pch_gbe_hw *hw = &adapter->hw; u32 tdba, tdlen, dctrl; - pr_debug("dma addr = 0x%08llx size = 0x%08x\n", - (unsigned long long)adapter->tx_ring->dma, - adapter->tx_ring->size); + netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n", + (unsigned long long)adapter->tx_ring->dma, + adapter->tx_ring->size); /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = adapter->tx_ring->dma; @@ -894,9 +910,9 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) struct pch_gbe_hw *hw = &adapter->hw; u32 rdba, rdlen, rxdma; - pr_debug("dma adr = 0x%08llx size = 0x%08x\n", - (unsigned long long)adapter->rx_ring->dma, - adapter->rx_ring->size); + netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n", + (unsigned long long)adapter->rx_ring->dma, + adapter->rx_ring->size); pch_gbe_mac_force_mac_fc(hw); @@ -907,9 +923,10 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) rxdma &= ~PCH_GBE_RX_DMA_EN; iowrite32(rxdma, &hw->reg->DMA_CTRL); - pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n", - ioread32(&hw->reg->MAC_RX_EN), - ioread32(&hw->reg->DMA_CTRL)); + netdev_dbg(adapter->netdev, + "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n", + ioread32(&hw->reg->MAC_RX_EN), + ioread32(&hw->reg->DMA_CTRL)); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -977,7 +994,8 @@ static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter, buffer_info = &tx_ring->buffer_info[i]; pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info); } - pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i); + netdev_dbg(adapter->netdev, + "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i); size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); @@ -1009,7 +1027,8 @@ pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info); } - pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i); + netdev_dbg(adapter->netdev, + "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i); size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); @@ -1087,7 +1106,7 @@ static void pch_gbe_watchdog(unsigned long data) struct net_device *netdev = adapter->netdev; struct pch_gbe_hw *hw = &adapter->hw; - pr_debug("right now = %ld\n", jiffies); + netdev_dbg(netdev, "right now = %ld\n", jiffies); pch_gbe_update_stats(adapter); if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) { @@ -1095,7 +1114,7 @@ static void pch_gbe_watchdog(unsigned long data) netdev->tx_queue_len = adapter->tx_queue_len; /* mii library handles link maintenance tasks */ if (mii_ethtool_gset(&adapter->mii, &cmd)) { - pr_err("ethtool get setting Error\n"); + netdev_err(netdev, "ethtool get setting Error\n"); mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD)); @@ -1213,7 +1232,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, buffer_info->length, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { - pr_err("TX DMA map failed\n"); + netdev_err(adapter->netdev, "TX DMA map failed\n"); buffer_info->dma = 0; buffer_info->time_stamp = 0; tx_ring->next_to_use = ring_num; @@ -1333,13 +1352,13 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* When request status is no interruption factor */ if (unlikely(!int_st)) return IRQ_NONE; /* Not our interrupt. End processing. */ - pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st); + netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st); if (int_st & PCH_GBE_INT_RX_FRAME_ERR) adapter->stats.intr_rx_frame_err_count++; if (int_st & PCH_GBE_INT_RX_FIFO_ERR) if (!adapter->rx_stop_flag) { adapter->stats.intr_rx_fifo_err_count++; - pr_debug("Rx fifo over run\n"); + netdev_dbg(netdev, "Rx fifo over run\n"); adapter->rx_stop_flag = true; int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), @@ -1359,7 +1378,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) /* When Rx descriptor is empty */ if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { adapter->stats.intr_rx_dsc_empty_count++; - pr_debug("Rx descriptor is empty\n"); + netdev_dbg(netdev, "Rx descriptor is empty\n"); int_en = ioread32(&hw->reg->INT_EN); iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); if (hw->mac.tx_fc_enable) { @@ -1382,8 +1401,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) __napi_schedule(&adapter->napi); } } - pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n", - IRQ_HANDLED, ioread32(&hw->reg->INT_EN)); + netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n", + IRQ_HANDLED, ioread32(&hw->reg->INT_EN)); return IRQ_HANDLED; } @@ -1437,9 +1456,10 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, rx_desc->buffer_addr = (buffer_info->dma); rx_desc->gbec_status = DSC_INIT16; - pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n", - i, (unsigned long long)buffer_info->dma, - buffer_info->length); + netdev_dbg(netdev, + "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n", + i, (unsigned long long)buffer_info->dma, + buffer_info->length); if (unlikely(++i == rx_ring->count)) i = 0; @@ -1531,12 +1551,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, bool cleaned = false; int unused, thresh; - pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); + netdev_dbg(adapter->netdev, "next_to_clean : %d\n", + tx_ring->next_to_clean); i = tx_ring->next_to_clean; tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); - pr_debug("gbec_status:0x%04x dma_status:0x%04x\n", - tx_desc->gbec_status, tx_desc->dma_status); + netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n", + tx_desc->gbec_status, tx_desc->dma_status); unused = PCH_GBE_DESC_UNUSED(tx_ring); thresh = tx_ring->count - PCH_GBE_TX_WEIGHT; @@ -1544,8 +1565,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, { /* current marked clean, tx queue filling up, do extra clean */ int j, k; if (unused < 8) { /* tx queue nearly full */ - pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n", - tx_ring->next_to_clean,tx_ring->next_to_use,unused); + netdev_dbg(adapter->netdev, + "clean_tx: transmit queue warning (%x,%x) unused=%d\n", + tx_ring->next_to_clean, tx_ring->next_to_use, + unused); } /* current marked clean, scan for more that need cleaning. */ @@ -1557,49 +1580,56 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, if (++k >= tx_ring->count) k = 0; /*increment, wrap*/ } if (j < PCH_GBE_TX_WEIGHT) { - pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", - unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status); + netdev_dbg(adapter->netdev, + "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", + unused, j, i, k, tx_ring->next_to_use, + tx_desc->gbec_status); i = k; /*found one to clean, usu gbec_status==2000.*/ } } while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { - pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); + netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n", + tx_desc->gbec_status); buffer_info = &tx_ring->buffer_info[i]; skb = buffer_info->skb; cleaned = true; if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) { adapter->stats.tx_aborted_errors++; - pr_err("Transfer Abort Error\n"); + netdev_err(adapter->netdev, "Transfer Abort Error\n"); } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER) ) { adapter->stats.tx_carrier_errors++; - pr_err("Transfer Carrier Sense Error\n"); + netdev_err(adapter->netdev, + "Transfer Carrier Sense Error\n"); } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL) ) { adapter->stats.tx_aborted_errors++; - pr_err("Transfer Collision Abort Error\n"); + netdev_err(adapter->netdev, + "Transfer Collision Abort Error\n"); } else if ((tx_desc->gbec_status & (PCH_GBE_TXD_GMAC_STAT_SNGCOL | PCH_GBE_TXD_GMAC_STAT_MLTCOL))) { adapter->stats.collisions++; adapter->stats.tx_packets++; adapter->stats.tx_bytes += skb->len; - pr_debug("Transfer Collision\n"); + netdev_dbg(adapter->netdev, "Transfer Collision\n"); } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT) ) { adapter->stats.tx_packets++; adapter->stats.tx_bytes += skb->len; } if (buffer_info->mapped) { - pr_debug("unmap buffer_info->dma : %d\n", i); + netdev_dbg(adapter->netdev, + "unmap buffer_info->dma : %d\n", i); dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->mapped = false; } if (buffer_info->skb) { - pr_debug("trim buffer_info->skb : %d\n", i); + netdev_dbg(adapter->netdev, + "trim buffer_info->skb : %d\n", i); skb_trim(buffer_info->skb, 0); } tx_desc->gbec_status = DSC_INIT16; @@ -1613,8 +1643,9 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, break; } } - pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", - cleaned_count); + netdev_dbg(adapter->netdev, + "called pch_gbe_unmap_and_free_tx_resource() %d count\n", + cleaned_count); if (cleaned_count > 0) { /*skip this if nothing cleaned*/ /* Recover from running out of Tx resources in xmit_frame */ spin_lock(&tx_ring->tx_lock); @@ -1622,12 +1653,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, { netif_wake_queue(adapter->netdev); adapter->stats.tx_restart_count++; - pr_debug("Tx wake queue\n"); + netdev_dbg(adapter->netdev, "Tx wake queue\n"); } tx_ring->next_to_clean = i; - pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); + netdev_dbg(adapter->netdev, "next_to_clean : %d\n", + tx_ring->next_to_clean); spin_unlock(&tx_ring->tx_lock); } return cleaned; @@ -1684,22 +1716,22 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, buffer_info->length, DMA_FROM_DEVICE); buffer_info->mapped = false; - pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " - "TCP:0x%08x] BufInf = 0x%p\n", - i, dma_status, gbec_status, tcp_ip_status, - buffer_info); + netdev_dbg(netdev, + "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n", + i, dma_status, gbec_status, tcp_ip_status, + buffer_info); /* Error check */ if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) { adapter->stats.rx_frame_errors++; - pr_err("Receive Not Octal Error\n"); + netdev_err(netdev, "Receive Not Octal Error\n"); } else if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NBLERR)) { adapter->stats.rx_frame_errors++; - pr_err("Receive Nibble Error\n"); + netdev_err(netdev, "Receive Nibble Error\n"); } else if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_CRCERR)) { adapter->stats.rx_crc_errors++; - pr_err("Receive CRC Error\n"); + netdev_err(netdev, "Receive CRC Error\n"); } else { /* get receive length */ /* length convert[-3], length includes FCS length */ @@ -1730,8 +1762,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, napi_gro_receive(&adapter->napi, skb); (*work_done)++; - pr_debug("Receive skb->ip_summed: %d length: %d\n", - skb->ip_summed, length); + netdev_dbg(netdev, + "Receive skb->ip_summed: %d length: %d\n", + skb->ip_summed, length); } /* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { @@ -1787,10 +1820,10 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo); tx_desc->gbec_status = DSC_INIT16; } - pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n" - "next_to_clean = 0x%08x next_to_use = 0x%08x\n", - tx_ring->desc, (unsigned long long)tx_ring->dma, - tx_ring->next_to_clean, tx_ring->next_to_use); + netdev_dbg(adapter->netdev, + "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n", + tx_ring->desc, (unsigned long long)tx_ring->dma, + tx_ring->next_to_clean, tx_ring->next_to_use); return 0; } @@ -1829,10 +1862,10 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo); rx_desc->gbec_status = DSC_INIT16; } - pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx " - "next_to_clean = 0x%08x next_to_use = 0x%08x\n", - rx_ring->desc, (unsigned long long)rx_ring->dma, - rx_ring->next_to_clean, rx_ring->next_to_use); + netdev_dbg(adapter->netdev, + "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n", + rx_ring->desc, (unsigned long long)rx_ring->dma, + rx_ring->next_to_clean, rx_ring->next_to_use); return 0; } @@ -1886,9 +1919,9 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) flags = IRQF_SHARED; adapter->have_msi = false; err = pci_enable_msi(adapter->pdev); - pr_debug("call pci_enable_msi\n"); + netdev_dbg(netdev, "call pci_enable_msi\n"); if (err) { - pr_debug("call pci_enable_msi - Error: %d\n", err); + netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err); } else { flags = 0; adapter->have_msi = true; @@ -1896,9 +1929,11 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) err = request_irq(adapter->pdev->irq, &pch_gbe_intr, flags, netdev->name, netdev); if (err) - pr_err("Unable to allocate interrupt Error: %d\n", err); - pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", - adapter->have_msi, flags, err); + netdev_err(netdev, "Unable to allocate interrupt Error: %d\n", + err); + netdev_dbg(netdev, + "adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", + adapter->have_msi, flags, err); return err; } @@ -1919,7 +1954,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) /* Ensure we have a valid MAC */ if (!is_valid_ether_addr(adapter->hw.mac.addr)) { - pr_err("Error: Invalid MAC address\n"); + netdev_err(netdev, "Error: Invalid MAC address\n"); goto out; } @@ -1933,12 +1968,14 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) err = pch_gbe_request_irq(adapter); if (err) { - pr_err("Error: can't bring device up - irq request failed\n"); + netdev_err(netdev, + "Error: can't bring device up - irq request failed\n"); goto out; } err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); if (err) { - pr_err("Error: can't bring device up - alloc rx buffers pool failed\n"); + netdev_err(netdev, + "Error: can't bring device up - alloc rx buffers pool failed\n"); goto freeirq; } pch_gbe_alloc_tx_buffers(adapter, tx_ring); @@ -2015,11 +2052,11 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) /* Initialize the hardware-specific values */ if (pch_gbe_hal_setup_init_funcs(hw)) { - pr_err("Hardware Initialization Failure\n"); + netdev_err(netdev, "Hardware Initialization Failure\n"); return -EIO; } if (pch_gbe_alloc_queues(adapter)) { - pr_err("Unable to allocate memory for queues\n"); + netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } spin_lock_init(&adapter->hw.miim_lock); @@ -2030,9 +2067,10 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) pch_gbe_init_stats(adapter); - pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n", - (u32) adapter->rx_buffer_len, - hw->mac.min_frame_size, hw->mac.max_frame_size); + netdev_dbg(netdev, + "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n", + (u32) adapter->rx_buffer_len, + hw->mac.min_frame_size, hw->mac.max_frame_size); return 0; } @@ -2061,7 +2099,7 @@ static int pch_gbe_open(struct net_device *netdev) err = pch_gbe_up(adapter); if (err) goto err_up; - pr_debug("Success End\n"); + netdev_dbg(netdev, "Success End\n"); return 0; err_up: @@ -2072,7 +2110,7 @@ err_setup_rx: pch_gbe_free_tx_resources(adapter, adapter->tx_ring); err_setup_tx: pch_gbe_reset(adapter); - pr_err("Error End\n"); + netdev_err(netdev, "Error End\n"); return err; } @@ -2116,8 +2154,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); spin_unlock_irqrestore(&tx_ring->tx_lock, flags); - pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", - tx_ring->next_to_use, tx_ring->next_to_clean); + netdev_dbg(netdev, + "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", + tx_ring->next_to_use, tx_ring->next_to_clean); return NETDEV_TX_BUSY; } @@ -2152,7 +2191,7 @@ static void pch_gbe_set_multi(struct net_device *netdev) int i; int mc_count; - pr_debug("netdev->flags : 0x%08x\n", netdev->flags); + netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); /* Check for Promiscuous and All Multicast modes */ rctl = ioread32(&hw->reg->RX_MODE); @@ -2192,7 +2231,8 @@ static void pch_gbe_set_multi(struct net_device *netdev) PCH_GBE_MAR_ENTRIES); kfree(mta_list); - pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", + netdev_dbg(netdev, + "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", ioread32(&hw->reg->RX_MODE), mc_count); } @@ -2218,12 +2258,12 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr) pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0); ret_val = 0; } - pr_debug("ret_val : 0x%08x\n", ret_val); - pr_debug("dev_addr : %pM\n", netdev->dev_addr); - pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr); - pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n", - ioread32(&adapter->hw.reg->mac_adr[0].high), - ioread32(&adapter->hw.reg->mac_adr[0].low)); + netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val); + netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr); + netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr); + netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n", + ioread32(&adapter->hw.reg->mac_adr[0].high), + ioread32(&adapter->hw.reg->mac_adr[0].low)); return ret_val; } @@ -2245,7 +2285,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) { - pr_err("Invalid MTU setting\n"); + netdev_err(netdev, "Invalid MTU setting\n"); return -EINVAL; } if (max_frame <= PCH_GBE_FRAME_SIZE_2048) @@ -2274,9 +2314,10 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) adapter->hw.mac.max_frame_size = max_frame; } - pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", - max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, - adapter->hw.mac.max_frame_size); + netdev_dbg(netdev, + "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", + max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, + adapter->hw.mac.max_frame_size); return 0; } @@ -2317,7 +2358,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - pr_debug("cmd : 0x%04x\n", cmd); + netdev_dbg(netdev, "cmd : 0x%04x\n", cmd); if (cmd == SIOCSHWTSTAMP) return hwtstamp_ioctl(netdev, ifr, cmd); @@ -2354,7 +2395,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) bool poll_end_flag = false; bool cleaned = false; - pr_debug("budget : %d\n", budget); + netdev_dbg(adapter->netdev, "budget : %d\n", budget); pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); @@ -2377,8 +2418,9 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) pch_gbe_enable_dma_rx(&adapter->hw); } - pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", - poll_end_flag, work_done, budget); + netdev_dbg(adapter->netdev, + "poll_end_flag : %d work_done : %d budget : %d\n", + poll_end_flag, work_done, budget); return work_done; } @@ -2435,7 +2477,7 @@ static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) struct pch_gbe_hw *hw = &adapter->hw; if (pci_enable_device(pdev)) { - pr_err("Cannot re-enable PCI device after reset\n"); + netdev_err(netdev, "Cannot re-enable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); @@ -2455,7 +2497,8 @@ static void pch_gbe_io_resume(struct pci_dev *pdev) if (netif_running(netdev)) { if (pch_gbe_up(adapter)) { - pr_debug("can't bring device back up after reset\n"); + netdev_dbg(netdev, + "can't bring device back up after reset\n"); return; } } @@ -2509,7 +2552,7 @@ static int pch_gbe_resume(struct device *device) err = pci_enable_device(pdev); if (err) { - pr_err("Cannot enable PCI device from suspend\n"); + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); @@ -2609,7 +2652,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { - pr_err("Bad ptp filter\n"); + dev_err(&pdev->dev, "Bad ptp filter\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index 8653c3b81f84..cf7c9b3a255b 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -237,16 +237,17 @@ static int pch_gbe_validate_option(int *value, case enable_option: switch (*value) { case OPTION_ENABLED: - pr_debug("%s Enabled\n", opt->name); + netdev_dbg(adapter->netdev, "%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: - pr_debug("%s Disabled\n", opt->name); + netdev_dbg(adapter->netdev, "%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - pr_debug("%s set to %i\n", opt->name, *value); + netdev_dbg(adapter->netdev, "%s set to %i\n", + opt->name, *value); return 0; } break; @@ -258,7 +259,8 @@ static int pch_gbe_validate_option(int *value, ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') - pr_debug("%s\n", ent->str); + netdev_dbg(adapter->netdev, "%s\n", + ent->str); return 0; } } @@ -268,8 +270,8 @@ static int pch_gbe_validate_option(int *value, BUG(); } - pr_debug("Invalid %s value specified (%i) %s\n", - opt->name, *value, opt->err); + netdev_dbg(adapter->netdev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); *value = opt->def; return -1; } @@ -318,7 +320,8 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) .p = an_list} } }; if (speed || dplx) { - pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); + netdev_dbg(adapter->netdev, + "AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); hw->phy.autoneg_advertised = opt.def; } else { int tmp = AutoNeg; @@ -332,13 +335,16 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) case 0: hw->mac.autoneg = hw->mac.fc_autoneg = 1; if ((speed || dplx)) - pr_debug("Speed and duplex autonegotiation enabled\n"); + netdev_dbg(adapter->netdev, + "Speed and duplex autonegotiation enabled\n"); hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case HALF_DUPLEX: - pr_debug("Half Duplex specified without Speed\n"); - pr_debug("Using Autonegotiation at Half Duplex only\n"); + netdev_dbg(adapter->netdev, + "Half Duplex specified without Speed\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at Half Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | PHY_ADVERTISE_100_HALF; @@ -346,8 +352,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_HALF; break; case FULL_DUPLEX: - pr_debug("Full Duplex specified without Speed\n"); - pr_debug("Using Autonegotiation at Full Duplex only\n"); + netdev_dbg(adapter->netdev, + "Full Duplex specified without Speed\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at Full Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL | PHY_ADVERTISE_100_FULL | @@ -356,8 +364,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_10: - pr_debug("10 Mbps Speed specified without Duplex\n"); - pr_debug("Using Autonegotiation at 10 Mbps only\n"); + netdev_dbg(adapter->netdev, + "10 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 10 Mbps only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | PHY_ADVERTISE_10_FULL; @@ -365,22 +375,24 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_10 + HALF_DUPLEX: - pr_debug("Forcing to 10 Mbps Half Duplex\n"); + netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Half Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_10 + FULL_DUPLEX: - pr_debug("Forcing to 10 Mbps Full Duplex\n"); + netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Full Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_10; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_100: - pr_debug("100 Mbps Speed specified without Duplex\n"); - pr_debug("Using Autonegotiation at 100 Mbps only\n"); + netdev_dbg(adapter->netdev, + "100 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 100 Mbps only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF | PHY_ADVERTISE_100_FULL; @@ -388,28 +400,33 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_100 + HALF_DUPLEX: - pr_debug("Forcing to 100 Mbps Half Duplex\n"); + netdev_dbg(adapter->netdev, + "Forcing to 100 Mbps Half Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_HALF; break; case SPEED_100 + FULL_DUPLEX: - pr_debug("Forcing to 100 Mbps Full Duplex\n"); + netdev_dbg(adapter->netdev, + "Forcing to 100 Mbps Full Duplex\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 0; hw->phy.autoneg_advertised = 0; hw->mac.link_speed = SPEED_100; hw->mac.link_duplex = DUPLEX_FULL; break; case SPEED_1000: - pr_debug("1000 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "1000 Mbps Speed specified without Duplex\n"); goto full_duplex_only; case SPEED_1000 + HALF_DUPLEX: - pr_debug("Half Duplex is not supported at 1000 Mbps\n"); + netdev_dbg(adapter->netdev, + "Half Duplex is not supported at 1000 Mbps\n"); /* fall through */ case SPEED_1000 + FULL_DUPLEX: full_duplex_only: - pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); hw->mac.autoneg = hw->mac.fc_autoneg = 1; hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL; hw->mac.link_speed = SPEED_1000; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index b97c6575e018..da079073a6c6 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -97,6 +97,7 @@ */ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); struct pch_gbe_phy_info *phy = &hw->phy; s32 ret; u16 phy_id1; @@ -115,8 +116,9 @@ s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) phy->id = (u32)phy_id1; phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10)); phy->revision = (u32) (phy_id2 & 0x000F); - pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n", - phy->id, phy->revision); + netdev_dbg(adapter->netdev, + "phy->id : 0x%08x phy->revision : 0x%08x\n", + phy->id, phy->revision); return 0; } @@ -134,7 +136,10 @@ s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data) struct pch_gbe_phy_info *phy = &hw->phy; if (offset > PHY_MAX_REG_ADDRESS) { - pr_err("PHY Address %d is out of range\n", offset); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "PHY Address %d is out of range\n", + offset); return -EINVAL; } *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ, @@ -156,7 +161,10 @@ s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) struct pch_gbe_phy_info *phy = &hw->phy; if (offset > PHY_MAX_REG_ADDRESS) { - pr_err("PHY Address %d is out of range\n", offset); + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "PHY Address %d is out of range\n", + offset); return -EINVAL; } pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE, @@ -246,15 +254,14 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) */ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) { - struct pch_gbe_adapter *adapter; + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; int ret; u16 mii_reg; - adapter = container_of(hw, struct pch_gbe_adapter, hw); ret = mii_ethtool_gset(&adapter->mii, &cmd); if (ret) - pr_err("Error: mii_ethtool_gset\n"); + netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n"); ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); cmd.duplex = hw->mac.link_duplex; @@ -263,7 +270,7 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); ret = mii_ethtool_sset(&adapter->mii, &cmd); if (ret) - pr_err("Error: mii_ethtool_sset\n"); + netdev_err(adapter->netdev, "Error: mii_ethtool_sset\n"); pch_gbe_phy_sw_reset(hw); -- cgit v1.2.3 From 29cc436cb90da4cabf404d8ceedc762fc7387b15 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 28 Jun 2013 14:02:54 +0300 Subject: pch_gbe: use managed functions pcim_* and devm_* This makes the error handling much more simpler than open-coding everything and in addition makes the probe function smaller an tidier. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 50 +++++++--------------- 1 file changed, 15 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 97db56393197..ab1039a95bf9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -636,15 +636,15 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) */ static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) { - adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL); + adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev, + sizeof(*adapter->tx_ring), GFP_KERNEL); if (!adapter->tx_ring) return -ENOMEM; - adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL); - if (!adapter->rx_ring) { - kfree(adapter->tx_ring); + adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, + sizeof(*adapter->rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) return -ENOMEM; - } return 0; } @@ -2588,13 +2588,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) pch_gbe_hal_phy_hw_reset(&adapter->hw); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); - - iounmap(adapter->hw.reg); - pci_release_regions(pdev); free_netdev(netdev); - pci_disable_device(pdev); } static int pch_gbe_probe(struct pci_dev *pdev, @@ -2604,7 +2598,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, struct pch_gbe_adapter *adapter; int ret; - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) return ret; @@ -2617,24 +2611,22 @@ static int pch_gbe_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "ERR: No usable DMA " "configuration, aborting\n"); - goto err_disable_device; + return ret; } } } - ret = pci_request_regions(pdev, KBUILD_MODNAME); + ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev)); if (ret) { dev_err(&pdev->dev, "ERR: Can't reserve PCI I/O and memory resources\n"); - goto err_disable_device; + return ret; } pci_set_master(pdev); netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter)); - if (!netdev) { - ret = -ENOMEM; - goto err_release_pci; - } + if (!netdev) + return -ENOMEM; SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -2642,18 +2634,14 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; - adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0); - if (!adapter->hw.reg) { - ret = -EIO; - dev_err(&pdev->dev, "Can't ioremap\n"); - goto err_free_netdev; - } + adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { dev_err(&pdev->dev, "Bad ptp filter\n"); - return -EINVAL; + ret = -EINVAL; + goto err_free_netdev; } netdev->netdev_ops = &pch_gbe_netdev_ops; @@ -2671,7 +2659,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, /* setup the private structure */ ret = pch_gbe_sw_init(adapter); if (ret) - goto err_iounmap; + goto err_free_netdev; /* Initialize PHY */ ret = pch_gbe_init_phy(adapter); @@ -2727,16 +2715,8 @@ static int pch_gbe_probe(struct pci_dev *pdev, err_free_adapter: pch_gbe_hal_phy_hw_reset(&adapter->hw); - kfree(adapter->tx_ring); - kfree(adapter->rx_ring); -err_iounmap: - iounmap(adapter->hw.reg); err_free_netdev: free_netdev(netdev); -err_release_pci: - pci_release_regions(pdev); -err_disable_device: - pci_disable_device(pdev); return ret; } -- cgit v1.2.3 From cf6122be45de68bc773f64e4266db76129ea556f Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 27 Jun 2013 11:40:47 +0200 Subject: drivers: net: cpsw: add newline after MACID log Cosmetic patch to add a newline after logging the device's MACID. Signed-off-by: Daniel Mack Acked-by: Mugunthan V N Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 101b037a7dce..2c3657adc7cb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1700,10 +1700,10 @@ static int cpsw_probe(struct platform_device *pdev) if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); - pr_info("Detected MACID = %pM", priv->mac_addr); + pr_info("Detected MACID = %pM\n", priv->mac_addr); } else { eth_random_addr(priv->mac_addr); - pr_info("Random MACID = %pM", priv->mac_addr); + pr_info("Random MACID = %pM\n", priv->mac_addr); } memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); -- cgit v1.2.3 From 1ca01512a2a95aa061c3fc24b7c5d7fad9f606bf Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 27 Jun 2013 20:53:42 +0800 Subject: net/trivial: replace numeric with standard PM state macros Use standard PM state macros PCI_Dx instead of numeric 0/1/2.. Signed-off-by: Yijing Wang Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ne2k-pci.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 4 ++-- drivers/net/ethernet/intel/e100.c | 4 ++-- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +- drivers/net/ethernet/realtek/8139cp.c | 2 +- drivers/net/ethernet/ti/tlan.c | 2 +- drivers/net/ethernet/via/via-velocity.c | 2 +- drivers/net/wireless/orinoco/orinoco_pci.h | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 587a885de259..92201080e07a 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -676,7 +676,7 @@ static int ne2k_pci_resume (struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata (pdev); int rc; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); rc = pci_enable_device(pdev); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index cd69ac79f565..2df48bb0f1ca 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4364,7 +4364,7 @@ static int be_resume(struct pci_dev *pdev) if (status) return status; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* tell fw we're ready to fire cmds */ @@ -4460,7 +4460,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* Check if card is ok and fw is ready */ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index d2bea3f07c73..5115ae76a5d1 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -3069,7 +3069,7 @@ static int e100_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3160,7 +3160,7 @@ static void e100_io_resume(struct pci_dev *pdev) struct nic *nic = netdev_priv(netdev); /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); netif_device_attach(netdev); if (netif_running(netdev)) { diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 7be9788ed0f6..967bae8b85c5 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3299,7 +3299,7 @@ static int myri10ge_resume(struct pci_dev *pdev) if (mgp == NULL) return -EINVAL; netdev = mgp->dev; - pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */ + pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */ msleep(5); /* give card time to respond */ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); if (vendor == 0xffff) { diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 03523459c406..e6acb9fa5767 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1817,7 +1817,7 @@ static int cp_set_eeprom(struct net_device *dev, /* Put the board into D3cold state and wait for WakeUp signal */ static void cp_set_d3_state (struct cp_private *cp) { - pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */ + pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ pci_set_power_state (cp->pdev, PCI_D3hot); } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 59abfbcd0d55..591437e59b90 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -372,7 +372,7 @@ static int tlan_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_enable_wake(pdev, 0, 0); + pci_enable_wake(pdev, PCI_D0, 0); netif_device_attach(dev); if (netif_running(dev)) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 76919948b4ee..1d6dc41f755d 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -3211,7 +3211,7 @@ static int velocity_resume(struct device *dev) velocity_set_power_state(vptr, PCI_D0); if (vptr->pdev) { - pci_enable_wake(vptr->pdev, 0, 0); + pci_enable_wake(vptr->pdev, PCI_D0, 0); pci_restore_state(vptr->pdev); } diff --git a/drivers/net/wireless/orinoco/orinoco_pci.h b/drivers/net/wireless/orinoco/orinoco_pci.h index ea7231af40a8..43f5b9f5a0b0 100644 --- a/drivers/net/wireless/orinoco/orinoco_pci.h +++ b/drivers/net/wireless/orinoco/orinoco_pci.h @@ -38,7 +38,7 @@ static int orinoco_pci_resume(struct pci_dev *pdev) struct net_device *dev = priv->ndev; int err; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "%s: pci_enable_device failed on resume\n", -- cgit v1.2.3 From b01978cacfd7e3a4ca703b0e48f2e18de8865df5 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 27 Jun 2013 19:05:21 +0300 Subject: net/mlx4_core: Dynamic VST to VST vlan/qos changes Within VST mode, enable modifying the vlan and/or qos for a VF without requiring unbind/rebind. This requires firmware which supports the UPDATE_QP command. (If the command is not available, we fall back to requiring unbind/bind to activate these changes). To avoid race conditions with modify-qp on QPs that are affected by update-qp, this operation is performed on the comm_wq. If the update operation succeeds for all the necessary QPs, a vlan_unregister is performed for the abandoned vlan id. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 122 ++++++++++++++++- drivers/net/ethernet/mellanox/mlx4/fw.c | 5 +- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 20 +++ .../net/ethernet/mellanox/mlx4/resource_tracker.c | 145 ++++++++++++++++++++- include/linux/mlx4/cmd.h | 1 + include/linux/mlx4/device.h | 3 +- include/linux/mlx4/qp.h | 34 +++++ 7 files changed, 319 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index df04c8206ebe..7b927891dc35 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -112,6 +112,14 @@ enum { GO_BIT_TIMEOUT_MSECS = 10000 }; +enum mlx4_vlan_transition { + MLX4_VLAN_TRANSITION_VST_VST = 0, + MLX4_VLAN_TRANSITION_VST_VGT = 1, + MLX4_VLAN_TRANSITION_VGT_VST = 2, + MLX4_VLAN_TRANSITION_VGT_VGT = 3, +}; + + struct mlx4_cmd_context { struct completion done; int result; @@ -792,6 +800,15 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } +int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1225,6 +1242,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_GEN_QP_wrapper }, + { + .opcode = MLX4_CMD_UPDATE_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = MLX4_CMD_UPDATE_QP_wrapper + }, { .opcode = MLX4_CMD_CONF_SPECIAL_QP, .has_inbox = false, @@ -1495,6 +1521,72 @@ out: return ret; } + +int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, + int slave, int port) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_vport_state *vp_admin; + struct mlx4_vf_immed_vlan_work *work; + int err; + int admin_vlan_ix = NO_INDX; + + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (vp_oper->state.default_vlan == vp_admin->default_vlan && + vp_oper->state.default_qos == vp_admin->default_qos) + return 0; + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + if (vp_oper->state.default_vlan != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, + &admin_vlan_ix); + if (err) { + mlx4_warn((&priv->dev), + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; + mlx4_dbg((&(priv->dev)), + "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_admin->default_vlan), + admin_vlan_ix, slave, port); + } + + /* save original vlan ix and vlan id */ + work->orig_vlan_id = vp_oper->state.default_vlan; + work->orig_vlan_ix = vp_oper->vlan_idx; + + /* handle new qos */ + if (vp_oper->state.default_qos != vp_admin->default_qos) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; + + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) + vp_oper->vlan_idx = admin_vlan_ix; + + vp_oper->state.default_vlan = vp_admin->default_vlan; + vp_oper->state.default_qos = vp_admin->default_qos; + + /* iterate over QPs owned by this slave, using UPDATE_QP */ + work->port = port; + work->slave = slave; + work->qos = vp_oper->state.default_qos; + work->vlan_id = vp_oper->state.default_vlan; + work->vlan_ix = vp_oper->vlan_idx; + work->priv = priv; + INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); + queue_work(priv->mfunc.master.comm_wq, &work->work); + + return 0; +} + + static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) { int port, err; @@ -2109,11 +2201,18 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); +static int calculate_transition(u16 oper_vlan, u16 admin_vlan) +{ + return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); +} + int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { struct mlx4_priv *priv = mlx4_priv(dev); - struct mlx4_vport_state *s_info; + struct mlx4_vport_oper_state *vf_oper; + struct mlx4_vport_state *vf_admin; int slave; + enum mlx4_vlan_transition vlan_trans; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) @@ -2126,12 +2225,25 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) if (slave < 0) return -EINVAL; - s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + if ((0 == vlan) && (0 == qos)) - s_info->default_vlan = MLX4_VGT; + vf_admin->default_vlan = MLX4_VGT; else - s_info->default_vlan = vlan; - s_info->default_qos = qos; + vf_admin->default_vlan = vlan; + vf_admin->default_qos = qos; + + vlan_trans = calculate_transition(vf_oper->state.default_vlan, + vf_admin->default_vlan); + + if (priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && + vlan_trans == MLX4_VLAN_TRANSITION_VST_VST) { + mlx4_info(dev, "updating vf %d port %d config params immediately\n", + vf, port); + mlx4_master_immediate_activate_vlan_qos(priv, slave, port); + } return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 569bbe3e7403..8873d6802c80 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -133,7 +133,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [4] = "Automatic MAC reassignment support", [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", - [7] = "FSM (MAC anti-spoofing) support" + [7] = "FSM (MAC anti-spoofing) support", + [8] = "Dynamic QP updates support" }; int i; @@ -659,6 +660,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + if (field32 & (1 << 16)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; if (field32 & (1 << 26)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; if (field32 & (1 << 20)) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 75272935a3f7..5abcb6501e30 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -571,6 +571,24 @@ struct mlx4_cmd { u8 comm_toggle; }; +enum { + MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, + MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, +}; +struct mlx4_vf_immed_vlan_work { + struct work_struct work; + struct mlx4_priv *priv; + int flags; + int slave; + int vlan_ix; + int orig_vlan_ix; + u8 port; + u8 qos; + u16 vlan_id; + u16 orig_vlan_id; +}; + + struct mlx4_uar_table { struct mlx4_bitmap bitmap; }; @@ -1218,4 +1236,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) #define NOT_MASKED_PD_BITS 17 +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); + #endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1157f028a90f..46323a20060c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -101,6 +101,8 @@ struct res_qp { spinlock_t mcg_spl; int local_qpn; atomic_t ref_count; + u32 qpc_flags; + u8 sched_queue; }; enum res_mtt_states { @@ -355,7 +357,7 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, - u8 slave) + u8 slave, u32 qpn) { struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; @@ -369,9 +371,17 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_VGT != vp_oper->state.default_vlan) { qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; - if (MLX4_QP_ST_RC == qp_type) + if (MLX4_QP_ST_RC == qp_type || + (MLX4_QP_ST_UD == qp_type && + !mlx4_is_qp_reserved(dev, qpn))) return -EINVAL; + /* the reserved QPs (special, proxy, tunnel) + * do not operate over vlans + */ + if (mlx4_is_qp_reserved(dev, qpn)) + return 0; + /* force strip vlan by clear vsd */ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); if (0 != vp_oper->state.default_vlan) { @@ -2114,6 +2124,8 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, if (err) return err; qp->local_qpn = local_qpn; + qp->sched_queue = 0; + qp->qpc_flags = be32_to_cpu(qpc->flags); err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) @@ -2836,6 +2848,9 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, { int err; struct mlx4_qp_context *qpc = inbox->buf + 8; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + u8 orig_sched_queue; err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); if (err) @@ -2844,11 +2859,30 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); adjust_proxy_tun_qkey(dev, vhcr, qpc); - err = update_vport_qp_param(dev, inbox, slave); + orig_sched_queue = qpc->pri_path.sched_queue; + err = update_vport_qp_param(dev, inbox, slave, qpn); if (err) return err; - return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + /* if no error, save sched queue value passed in by VF. This is + * essentially the QOS value provided by the VF. This will be useful + * if we allow dynamic changes from VST back to VGT + */ + if (!err) + qp->sched_queue = orig_sched_queue; + + put_res(dev, slave, qpn, RES_QP); + return err; } int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, @@ -3932,3 +3966,106 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_xrcdns(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) +{ + struct mlx4_vf_immed_vlan_work *work = + container_of(_work, struct mlx4_vf_immed_vlan_work, work); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *upd_context; + struct mlx4_dev *dev = &work->priv->dev; + struct mlx4_resource_tracker *tracker = + &work->priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[work->slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); + + int err; + int port, errors = 0; + u8 vlan_control; + + if (mlx4_is_slave(dev)) { + mlx4_warn(dev, "Trying to update-qp in slave %d\n", + work->slave); + goto out; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto out; + + if (!work->vlan_id) + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + + upd_context = mailbox->buf; + upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask); + upd_context->qp_context.pri_path.vlan_control = vlan_control; + upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == work->slave) { + if (qp->com.from_state != RES_QP_HW || + !qp->sched_queue || /* no INIT2RTR trans yet */ + mlx4_is_qp_reserved(dev, qp->local_qpn) || + qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + port = (qp->sched_queue >> 6 & 1) + 1; + if (port != work->port) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue & 0xC7; + upd_context->qp_context.pri_path.sched_queue |= + ((work->qos & 0x7) << 3); + + err = mlx4_cmd(dev, mailbox->dma, + qp->local_qpn & 0xffffff, + 0, MLX4_CMD_UPDATE_QP, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (err) { + mlx4_info(dev, "UPDATE_QP failed for slave %d, " + "port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, + err); + errors++; + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (errors) + mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", + errors, work->slave, work->port); + + /* unregister previous vlan_id if needed and we had no errors + * while updating the QPs + */ + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && + NO_INDX != work->orig_vlan_ix) + __mlx4_unregister_vlan(&work->priv->dev, work->port, + work->orig_vlan_ix); +out: + kfree(work); + return; +} diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 8074a9711cf1..bb1c8096a7eb 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -111,6 +111,7 @@ enum { MLX4_CMD_INIT2INIT_QP = 0x2d, MLX4_CMD_SUSPEND_QP = 0x32, MLX4_CMD_UNSUSPEND_QP = 0x33, + MLX4_CMD_UPDATE_QP = 0x61, /* special QP and management commands */ MLX4_CMD_CONF_SPECIAL_QP = 0x23, MLX4_CMD_MAD_IFC = 0x24, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a51b0134ce18..52c23a892bab 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -157,7 +157,8 @@ enum { MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, - MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7 + MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8 }; enum { diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 352eec9df1b8..f43e32aa054a 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -206,6 +206,40 @@ struct mlx4_qp_context { u32 reserved5[10]; }; +struct mlx4_update_qp_context { + __be64 qp_mask; + __be64 primary_addr_path_mask; + __be64 secondary_addr_path_mask; + u64 reserved1; + struct mlx4_qp_context qp_context; + u64 reserved2[58]; +}; + +enum { + MLX4_UPD_QP_MASK_PM_STATE = 32, + MLX4_UPD_QP_MASK_VSD = 33, +}; + +enum { + MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32, + MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32, + MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32, + MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32, + MLX4_UPD_QP_PATH_MASK_CV = 4 + 32, + MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32, + MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32, + MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, + MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, + MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, +}; + enum { /* param3 */ MLX4_STRIP_VLAN = 1 << 30 }; -- cgit v1.2.3 From 0a6eac24583848490e9a9c02daef5e33c997431f Mon Sep 17 00:00:00 2001 From: Rony Efraim Date: Thu, 27 Jun 2013 19:05:22 +0300 Subject: net/mlx4_core: Add HW enforcement to VF link state When the firmware supports the UPDATE_QP command, if the VF link is disabled, block all QPs opened by the VF, by programming the UPDATE_QP command to drop all RX & TX traffic to/from these QPs. Operates only in VST mode. Signed-off-by: Rony Efraim Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 57 ++++++++++++++-------- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + .../net/ethernet/mellanox/mlx4/resource_tracker.c | 22 +++++++-- include/linux/mlx4/qp.h | 2 + 4 files changed, 60 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 7b927891dc35..707a7d0d08e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1521,6 +1521,10 @@ out: return ret; } +static int calculate_transition(u16 oper_vlan, u16 admin_vlan) +{ + return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); +} int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, int slave, int port) @@ -1528,16 +1532,37 @@ int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, struct mlx4_vport_oper_state *vp_oper; struct mlx4_vport_state *vp_admin; struct mlx4_vf_immed_vlan_work *work; + struct mlx4_dev *dev = &(priv->dev); int err; int admin_vlan_ix = NO_INDX; + enum mlx4_vlan_transition vlan_trans; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; if (vp_oper->state.default_vlan == vp_admin->default_vlan && - vp_oper->state.default_qos == vp_admin->default_qos) + vp_oper->state.default_qos == vp_admin->default_qos && + vp_oper->state.link_state == vp_admin->link_state) return 0; + vlan_trans = calculate_transition(vp_oper->state.default_vlan, + vp_admin->default_vlan); + + if (!(priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && + vlan_trans == MLX4_VLAN_TRANSITION_VST_VST)) { + /* even if the UPDATE_QP command isn't supported, we still want + * to set this VF link according to the admin directive + */ + vp_oper->state.link_state = vp_admin->link_state; + return -1; + } + + mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", + slave, port); + mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, + vp_admin->default_qos, vp_admin->link_state); + work = kzalloc(sizeof(*work), GFP_KERNEL); if (!work) return -ENOMEM; @@ -1572,6 +1597,10 @@ int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, vp_oper->state.default_vlan = vp_admin->default_vlan; vp_oper->state.default_qos = vp_admin->default_qos; + vp_oper->state.link_state = vp_admin->link_state; + + if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; /* iterate over QPs owned by this slave, using UPDATE_QP */ work->port = port; @@ -2201,10 +2230,6 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) } EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); -static int calculate_transition(u16 oper_vlan, u16 admin_vlan) -{ - return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT)); -} int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) { @@ -2212,7 +2237,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) struct mlx4_vport_oper_state *vf_oper; struct mlx4_vport_state *vf_admin; int slave; - enum mlx4_vlan_transition vlan_trans; if ((!mlx4_is_master(dev)) || !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) @@ -2234,16 +2258,10 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) vf_admin->default_vlan = vlan; vf_admin->default_qos = qos; - vlan_trans = calculate_transition(vf_oper->state.default_vlan, - vf_admin->default_vlan); - - if (priv->mfunc.master.slave_state[slave].active && - dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP && - vlan_trans == MLX4_VLAN_TRANSITION_VST_VST) { - mlx4_info(dev, "updating vf %d port %d config params immediately\n", + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_info(dev, + "updating vf %d port %d config will take effect on next VF restart\n", vf, port); - mlx4_master_immediate_activate_vlan_qos(priv, slave, port); - } return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); @@ -2307,7 +2325,6 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_vport_state *s_info; - struct mlx4_vport_oper_state *vp_oper; int slave; u8 link_stat_event; @@ -2337,14 +2354,16 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat link_state, slave, port); return -EINVAL; }; - /* update the admin & oper state on the link state */ s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; - vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; s_info->link_state = link_state; - vp_oper->state.link_state = link_state; /* send event */ mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_dbg(dev, + "updating vf %d port %d no link state HW enforcment\n", + vf, port); return 0; } EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 5abcb6501e30..17d9277e33ef 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -574,6 +574,7 @@ struct mlx4_cmd { enum { MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, + MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, }; struct mlx4_vf_immed_vlan_work { struct work_struct work; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 46323a20060c..f984a89c27df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -384,7 +384,17 @@ static int update_vport_qp_param(struct mlx4_dev *dev, /* force strip vlan by clear vsd */ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); - if (0 != vp_oper->state.default_vlan) { + + if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } else if (0 != vp_oper->state.default_vlan) { qpc->pri_path.vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | @@ -4002,8 +4012,14 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) goto out; - - if (!work->vlan_id) + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else if (!work->vlan_id) vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; else diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index f43e32aa054a..262deac02c9e 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -152,6 +152,8 @@ enum { /* fl */ }; enum { /* vlan_control */ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4, MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2, MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0 -- cgit v1.2.3 From ef0cc4b1d296ce041e3b24d103177dd70642740c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:13 +0200 Subject: alx: treat flow control correctly in alx_set_pauseparam() Even when alx_setup_speed_duplex() is called, we still need to call alx_cfg_mac_flowcontrol() and set hw->flowctrl if flow control changed. This was a bug I accidentally introduced while simplifying the original driver. Reported-by: Ben Hutchings Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/ethtool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 6fa2aec2bc81..50a91d012bce 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -187,7 +187,8 @@ static int alx_set_pauseparam(struct net_device *netdev, if (reconfig_phy) { err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); - return err; + if (err) + return err; } /* flow control on mac */ -- cgit v1.2.3 From 17fdd35268f3856702dae5c3c0d8f756ec2c6d2d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:14 +0200 Subject: alx: fix 100mbit/half duplex speed translation 100mbit half duplex is ADVERTISED_100baseT_Half, not ADVERTISED_10baseT_Half. Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index 220a16ad0e49..dc71cfb6ce61 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -1134,7 +1134,7 @@ static inline u32 alx_speed_to_ethadv(int speed) case SPEED_100 + DUPLEX_FULL: return ADVERTISED_100baseT_Full; case SPEED_100 + DUPLEX_HALF: - return ADVERTISED_10baseT_Half; + return ADVERTISED_100baseT_Half; case SPEED_10 + DUPLEX_FULL: return ADVERTISED_10baseT_Full; case SPEED_10 + DUPLEX_HALF: -- cgit v1.2.3 From c43861d35a73cfc86ae0506b60e73852849bca6a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:15 +0200 Subject: alx: remove NET_CORE Kconfig select That select doesn't make any sense, remove it. Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 39e8d6cc8843..58ad37c733bc 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -67,7 +67,6 @@ config ALX tristate "Qualcomm Atheros AR816x/AR817x support" depends on PCI select CRC32 - select NET_CORE select MDIO help This driver supports the Qualcomm Atheros L1F ethernet adapter, -- cgit v1.2.3 From 4a134c39db2d9d6f31c55e7c3773fc33189c9320 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:16 +0200 Subject: alx: make sizes unsigned The ring sizes should be unsigned, pointed out by Ben Hutchings. Reported-by: Ben Hutchings Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/alx.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 50b3ae2b143d..d71103dbf2cd 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -85,16 +85,16 @@ struct alx_priv { struct { dma_addr_t dma; void *virt; - int size; + unsigned int size; } descmem; /* protect int_mask updates */ spinlock_t irq_lock; u32 int_mask; - int tx_ringsz; - int rx_ringsz; - int rxbuf_size; + unsigned int tx_ringsz; + unsigned int rx_ringsz; + unsigned int rxbuf_size; struct napi_struct napi; struct alx_tx_queue txq; -- cgit v1.2.3 From a5b87cc9e0538bf6680d431e0076d778e5bae38e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:17 +0200 Subject: alx: separate link speed/duplex fields As suggested by Ben Hutchings, use separate fields to track current link speed and duplex setting. Reported-by: Ben Hutchings Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/ethtool.c | 31 ++----- drivers/net/ethernet/atheros/alx/hw.c | 139 +++++++++++++---------------- drivers/net/ethernet/atheros/alx/hw.h | 24 ++++- drivers/net/ethernet/atheros/alx/main.c | 37 ++++---- 4 files changed, 106 insertions(+), 125 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 50a91d012bce..5e19e08b691b 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -85,14 +85,8 @@ static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } } - if (hw->link_speed != SPEED_UNKNOWN) { - ethtool_cmd_speed_set(ecmd, - hw->link_speed - hw->link_speed % 10); - ecmd->duplex = hw->link_speed % 10; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } + ethtool_cmd_speed_set(ecmd, hw->link_speed); + ecmd->duplex = hw->duplex; return 0; } @@ -110,24 +104,11 @@ static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) return -EINVAL; adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; } else { - int speed = ethtool_cmd_speed(ecmd); - - switch (speed + ecmd->duplex) { - case SPEED_10 + DUPLEX_HALF: - adv_cfg = ADVERTISED_10baseT_Half; - break; - case SPEED_10 + DUPLEX_FULL: - adv_cfg = ADVERTISED_10baseT_Full; - break; - case SPEED_100 + DUPLEX_HALF: - adv_cfg = ADVERTISED_100baseT_Half; - break; - case SPEED_100 + DUPLEX_FULL: - adv_cfg = ADVERTISED_100baseT_Full; - break; - default: + adv_cfg = alx_speed_to_ethadv(ethtool_cmd_speed(ecmd), + ecmd->duplex); + + if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full) return -EINVAL; - } } hw->adv_cfg = adv_cfg; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index dc71cfb6ce61..aed48a7f1ca6 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -624,12 +624,12 @@ void alx_start_mac(struct alx_hw *hw) alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); mac = hw->rx_ctrl; - if (hw->link_speed % 10 == DUPLEX_FULL) + if (hw->duplex == DUPLEX_FULL) mac |= ALX_MAC_CTRL_FULLD; else mac &= ~ALX_MAC_CTRL_FULLD; ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, - hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : + hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : ALX_MAC_CTRL_SPEED_10_100); mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; hw->rx_ctrl = mac; @@ -790,28 +790,22 @@ void alx_post_phy_link(struct alx_hw *hw) u16 phy_val, len, agc; u8 revid = alx_hw_revision(hw); bool adj_th = revid == ALX_REV_B0; - int speed; - - if (hw->link_speed == SPEED_UNKNOWN) - speed = SPEED_UNKNOWN; - else - speed = hw->link_speed - hw->link_speed % 10; if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) return; /* 1000BT/AZ, wrong cable length */ - if (speed != SPEED_UNKNOWN) { + if (hw->link_speed != SPEED_UNKNOWN) { alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, &phy_val); len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); - if ((speed == SPEED_1000 && + if ((hw->link_speed == SPEED_1000 && (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || - (speed == SPEED_100 && + (hw->link_speed == SPEED_100 && (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, @@ -831,10 +825,10 @@ void alx_post_phy_link(struct alx_hw *hw) /* threshold adjust */ if (adj_th && hw->lnk_patch) { - if (speed == SPEED_100) { + if (hw->link_speed == SPEED_100) { alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, ALX_MSE16DB_UP); - } else if (speed == SPEED_1000) { + } else if (hw->link_speed == SPEED_1000) { /* * Giga link threshold, raise the tolerance of * noise 50% @@ -869,7 +863,7 @@ void alx_post_phy_link(struct alx_hw *hw) * 1. phy link must be established before calling this function * 2. wol option (pattern,magic,link,etc.) is configed before call it. */ -int alx_pre_suspend(struct alx_hw *hw, int speed) +int alx_pre_suspend(struct alx_hw *hw, int speed, u8 duplex) { u32 master, mac, phy, val; int err = 0; @@ -897,9 +891,9 @@ int alx_pre_suspend(struct alx_hw *hw, int speed) mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; if (hw->sleep_ctrl & ALX_SLEEP_CIFS) mac |= ALX_MAC_CTRL_TX_EN; - if (speed % 10 == DUPLEX_FULL) + if (duplex == DUPLEX_FULL) mac |= ALX_MAC_CTRL_FULLD; - if (speed >= SPEED_1000) + if (speed == SPEED_1000) ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_1000); phy |= ALX_PHY_CTRL_DSPRST_OUT; @@ -938,7 +932,7 @@ bool alx_phy_configured(struct alx_hw *hw) return cfg == hw_cfg; } -int alx_get_phy_link(struct alx_hw *hw, int *speed) +int alx_read_phy_link(struct alx_hw *hw) { struct pci_dev *pdev = hw->pdev; u16 bmsr, giga; @@ -953,7 +947,8 @@ int alx_get_phy_link(struct alx_hw *hw, int *speed) return err; if (!(bmsr & BMSR_LSTATUS)) { - *speed = SPEED_UNKNOWN; + hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; return 0; } @@ -967,20 +962,20 @@ int alx_get_phy_link(struct alx_hw *hw, int *speed) switch (giga & ALX_GIGA_PSSR_SPEED) { case ALX_GIGA_PSSR_1000MBS: - *speed = SPEED_1000; + hw->link_speed = SPEED_1000; break; case ALX_GIGA_PSSR_100MBS: - *speed = SPEED_100; + hw->link_speed = SPEED_100; break; case ALX_GIGA_PSSR_10MBS: - *speed = SPEED_10; + hw->link_speed = SPEED_10; break; default: goto wrong_speed; } - *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; - return 1; + hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; + return 0; wrong_speed: dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); @@ -1126,81 +1121,67 @@ void alx_configure_basic(struct alx_hw *hw) alx_write_mem32(hw, ALX_WRR, val); } -static inline u32 alx_speed_to_ethadv(int speed) -{ - switch (speed) { - case SPEED_1000 + DUPLEX_FULL: - return ADVERTISED_1000baseT_Full; - case SPEED_100 + DUPLEX_FULL: - return ADVERTISED_100baseT_Full; - case SPEED_100 + DUPLEX_HALF: - return ADVERTISED_100baseT_Half; - case SPEED_10 + DUPLEX_FULL: - return ADVERTISED_10baseT_Full; - case SPEED_10 + DUPLEX_HALF: - return ADVERTISED_10baseT_Half; - default: - return 0; - } -} - -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed) +int alx_select_powersaving_speed(struct alx_hw *hw, int *speed, u8 *duplex) { - int i, err, spd; + int i, err; u16 lpa; - err = alx_get_phy_link(hw, &spd); - if (err < 0) + err = alx_read_phy_link(hw); + if (err) return err; - if (spd == SPEED_UNKNOWN) + if (hw->link_speed == SPEED_UNKNOWN) { + *speed = SPEED_UNKNOWN; + *duplex = DUPLEX_UNKNOWN; return 0; + } err = alx_read_phy_reg(hw, MII_LPA, &lpa); if (err) return err; if (!(lpa & LPA_LPACK)) { - *speed = spd; + *speed = hw->link_speed; return 0; } - if (lpa & LPA_10FULL) - *speed = SPEED_10 + DUPLEX_FULL; - else if (lpa & LPA_10HALF) - *speed = SPEED_10 + DUPLEX_HALF; - else if (lpa & LPA_100FULL) - *speed = SPEED_100 + DUPLEX_FULL; - else - *speed = SPEED_100 + DUPLEX_HALF; - - if (*speed != spd) { - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - err = alx_setup_speed_duplex(hw, - alx_speed_to_ethadv(*speed) | - ADVERTISED_Autoneg, - ALX_FC_ANEG | ALX_FC_RX | - ALX_FC_TX); - if (err) - return err; + if (lpa & LPA_10FULL) { + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + } else if (lpa & LPA_10HALF) { + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + } else if (lpa & LPA_100FULL) { + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + } else { + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + } - /* wait for linkup */ - for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { - int speed2; + if (*speed == hw->link_speed && *duplex == hw->duplex) + return 0; + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + if (err) + return err; + err = alx_setup_speed_duplex(hw, alx_speed_to_ethadv(*speed, *duplex) | + ADVERTISED_Autoneg, ALX_FC_ANEG | + ALX_FC_RX | ALX_FC_TX); + if (err) + return err; - msleep(100); + /* wait for linkup */ + for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { + msleep(100); - err = alx_get_phy_link(hw, &speed2); - if (err < 0) - return err; - if (speed2 != SPEED_UNKNOWN) - break; - } - if (i == ALX_MAX_SETUP_LNK_CYCLE) - return -ETIMEDOUT; + err = alx_read_phy_link(hw); + if (err < 0) + return err; + if (hw->link_speed != SPEED_UNKNOWN) + break; } + if (i == ALX_MAX_SETUP_LNK_CYCLE) + return -ETIMEDOUT; return 0; } diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index 65e723d2172a..a60e35c1c1b5 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -412,10 +412,11 @@ struct alx_hw { u32 smb_timer; /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ int link_speed; + u8 duplex; /* auto-neg advertisement or force mode config */ - u32 adv_cfg; u8 flowctrl; + u32 adv_cfg; u32 sleep_ctrl; @@ -478,12 +479,12 @@ void alx_reset_pcie(struct alx_hw *hw); void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); void alx_post_phy_link(struct alx_hw *hw); -int alx_pre_suspend(struct alx_hw *hw, int speed); +int alx_pre_suspend(struct alx_hw *hw, int speed, u8 duplex); int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); -int alx_get_phy_link(struct alx_hw *hw, int *speed); +int alx_read_phy_link(struct alx_hw *hw); int alx_clear_phy_intr(struct alx_hw *hw); int alx_config_wol(struct alx_hw *hw); void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); @@ -493,7 +494,22 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); bool alx_phy_configured(struct alx_hw *hw); void alx_configure_basic(struct alx_hw *hw); void alx_disable_rss(struct alx_hw *hw); -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed); +int alx_select_powersaving_speed(struct alx_hw *hw, int *speed, u8 *duplex); bool alx_get_phy_info(struct alx_hw *hw); +static inline u32 alx_speed_to_ethadv(int speed, u8 duplex) +{ + if (speed == SPEED_1000 && duplex == DUPLEX_FULL) + return ADVERTISED_1000baseT_Full; + if (speed == SPEED_100 && duplex == DUPLEX_FULL) + return ADVERTISED_100baseT_Full; + if (speed == SPEED_100 && duplex== DUPLEX_HALF) + return ADVERTISED_100baseT_Half; + if (speed == SPEED_10 && duplex == DUPLEX_FULL) + return ADVERTISED_10baseT_Full; + if (speed == SPEED_10 && duplex == DUPLEX_HALF) + return ADVERTISED_10baseT_Half; + return 0; +} + #endif diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 418de8b13165..148b4b976474 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -712,6 +712,7 @@ static int alx_init_sw(struct alx_priv *alx) hw->dma_chnl = hw->max_dma_chnl; hw->ith_tpd = alx->tx_ringsz / 3; hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; hw->adv_cfg = ADVERTISED_Autoneg | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | @@ -758,6 +759,7 @@ static void alx_halt(struct alx_priv *alx) alx_netif_stop(alx); hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; alx_reset_mac(hw); @@ -869,18 +871,18 @@ static void __alx_stop(struct alx_priv *alx) alx_free_rings(alx); } -static const char *alx_speed_desc(u16 speed) +static const char *alx_speed_desc(struct alx_hw *hw) { - switch (speed) { - case SPEED_1000 + DUPLEX_FULL: + switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { + case ADVERTISED_1000baseT_Full: return "1 Gbps Full"; - case SPEED_100 + DUPLEX_FULL: + case ADVERTISED_100baseT_Full: return "100 Mbps Full"; - case SPEED_100 + DUPLEX_HALF: + case ADVERTISED_100baseT_Half: return "100 Mbps Half"; - case SPEED_10 + DUPLEX_FULL: + case ADVERTISED_10baseT_Full: return "10 Mbps Full"; - case SPEED_10 + DUPLEX_HALF: + case ADVERTISED_10baseT_Half: return "10 Mbps Half"; default: return "Unknown speed"; @@ -891,7 +893,8 @@ static void alx_check_link(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; unsigned long flags; - int speed, old_speed; + int old_speed; + u8 old_duplex; int err; /* clear PHY internal interrupt status, otherwise the main @@ -899,7 +902,9 @@ static void alx_check_link(struct alx_priv *alx) */ alx_clear_phy_intr(hw); - err = alx_get_phy_link(hw, &speed); + old_speed = hw->link_speed; + old_duplex = hw->duplex; + err = alx_read_phy_link(hw); if (err < 0) goto reset; @@ -908,15 +913,12 @@ static void alx_check_link(struct alx_priv *alx) alx_write_mem32(hw, ALX_IMR, alx->int_mask); spin_unlock_irqrestore(&alx->irq_lock, flags); - old_speed = hw->link_speed; - - if (old_speed == speed) + if (old_speed == hw->link_speed) return; - hw->link_speed = speed; - if (speed != SPEED_UNKNOWN) { + if (hw->link_speed != SPEED_UNKNOWN) { netif_info(alx, link, alx->dev, - "NIC Up: %s\n", alx_speed_desc(speed)); + "NIC Up: %s\n", alx_speed_desc(hw)); alx_post_phy_link(hw); alx_enable_aspm(hw, true, true); alx_start_mac(hw); @@ -965,6 +967,7 @@ static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) struct net_device *netdev = alx->dev; struct alx_hw *hw = &alx->hw; int err, speed; + u8 duplex; netif_device_detach(netdev); @@ -977,13 +980,13 @@ static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) return err; #endif - err = alx_select_powersaving_speed(hw, &speed); + err = alx_select_powersaving_speed(hw, &speed, &duplex); if (err) return err; err = alx_clear_phy_intr(hw); if (err) return err; - err = alx_pre_suspend(hw, speed); + err = alx_pre_suspend(hw, speed, duplex); if (err) return err; err = alx_config_wol(hw); -- cgit v1.2.3 From 46ab9b347d677976b2b4072837cb4065839c0b80 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:18 +0200 Subject: alx: fix MAC address alignment problem In two places, parts of MAC addresses are used as u32/u16 values. This can cause alignment problems, use put_unaligned and get_unaligned to fix this. Reported-by: Ben Hutchings Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/hw.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index aed48a7f1ca6..ea99e5d8743f 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -282,8 +282,8 @@ static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) mac1 = alx_read_mem32(hw, ALX_STAD1); /* addr should be big-endian */ - *(__be32 *)(addr + 2) = cpu_to_be32(mac0); - *(__be16 *)addr = cpu_to_be16(mac1); + put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2)); + put_unaligned(cpu_to_be16(mac1), (__be16 *)addr); return is_valid_ether_addr(addr); } @@ -326,9 +326,9 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) u32 val; /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ - val = be32_to_cpu(*(__be32 *)(addr + 2)); + val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2))); alx_write_mem32(hw, ALX_STAD0, val); - val = be16_to_cpu(*(__be16 *)addr); + val = be16_to_cpu(get_unaligned((__be16 *)addr)); alx_write_mem32(hw, ALX_STAD1, val); } -- cgit v1.2.3 From 7ec5689461989fd80f1cf82ae084f5d50a5e63ee Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 29 Jun 2013 19:23:19 +0200 Subject: alx: fix ethtool support code A number of places treated features wrongly, listing not-supported features instead of supported ones. Also, the get_drvinfo ethtool callback isn't needed, and alx_get_pauseparam can be simplified. Reported-by: Ben Hutchings Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/ethtool.c | 64 ++++++++++++++---------------- 1 file changed, 29 insertions(+), 35 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 5e19e08b691b..926100626d60 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -46,21 +46,37 @@ #include "reg.h" #include "hw.h" +static u32 alx_get_supported_speeds(struct alx_hw *hw) +{ + u32 supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full; + + if (alx_hw_giga(hw)) + supported |= SUPPORTED_1000baseT_Full; + + BUILD_BUG_ON(SUPPORTED_10baseT_Half != ADVERTISED_10baseT_Half); + BUILD_BUG_ON(SUPPORTED_10baseT_Full != ADVERTISED_10baseT_Full); + BUILD_BUG_ON(SUPPORTED_100baseT_Half != ADVERTISED_100baseT_Half); + BUILD_BUG_ON(SUPPORTED_100baseT_Full != ADVERTISED_100baseT_Full); + BUILD_BUG_ON(SUPPORTED_1000baseT_Full != ADVERTISED_1000baseT_Full); + + return supported; +} static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; - ecmd->supported = SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | + ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | - SUPPORTED_Pause; + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; if (alx_hw_giga(hw)) ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported |= alx_get_supported_speeds(hw); ecmd->advertising = ADVERTISED_TP; if (hw->adv_cfg & ADVERTISED_Autoneg) @@ -68,6 +84,7 @@ static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->port = PORT_TP; ecmd->phy_address = 0; + if (hw->adv_cfg & ADVERTISED_Autoneg) ecmd->autoneg = AUTONEG_ENABLE; else @@ -100,7 +117,7 @@ static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ASSERT_RTNL(); if (ecmd->autoneg == AUTONEG_ENABLE) { - if (ecmd->advertising & ADVERTISED_1000baseT_Half) + if (ecmd->advertising & ~alx_get_supported_speeds(hw)) return -EINVAL; adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; } else { @@ -121,21 +138,10 @@ static void alx_get_pauseparam(struct net_device *netdev, struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; - if (hw->flowctrl & ALX_FC_ANEG && - hw->adv_cfg & ADVERTISED_Autoneg) - pause->autoneg = AUTONEG_ENABLE; - else - pause->autoneg = AUTONEG_DISABLE; - - if (hw->flowctrl & ALX_FC_TX) - pause->tx_pause = 1; - else - pause->tx_pause = 0; - - if (hw->flowctrl & ALX_FC_RX) - pause->rx_pause = 1; - else - pause->rx_pause = 0; + pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg); + pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX); + pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX); } @@ -214,8 +220,7 @@ static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; - if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | - WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_PHY)) return -EOPNOTSUPP; hw->sleep_ctrl = 0; @@ -230,22 +235,11 @@ static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return 0; } -static void alx_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct alx_priv *alx = netdev_priv(netdev); - - strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev), - sizeof(drvinfo->bus_info)); -} - const struct ethtool_ops alx_ethtool_ops = { .get_settings = alx_get_settings, .set_settings = alx_set_settings, .get_pauseparam = alx_get_pauseparam, .set_pauseparam = alx_set_pauseparam, - .get_drvinfo = alx_get_drvinfo, .get_msglevel = alx_get_msglevel, .set_msglevel = alx_set_msglevel, .get_wol = alx_get_wol, -- cgit v1.2.3 From 772e42b07fbdc650206746e00cb2914e362594a3 Mon Sep 17 00:00:00 2001 From: Christoph Müllner Date: Thu, 27 Jun 2013 21:18:23 +0200 Subject: net: fec: Fix multicast list setup in fec_restart(). Setup the multicast list of the net_device instead of clearing it blindly. This restores the multicast groups in case of a link down/up event or when resuming from suspend. Signed-off-by: Christoph Muellner Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ed6180e7db2f..5664acd8e0f4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -59,6 +59,8 @@ #include "fec.h" +static void set_multicast_list(struct net_device *ndev); + #if defined(CONFIG_ARM) #define FEC_ALIGNMENT 0xf #else @@ -470,9 +472,8 @@ fec_restart(struct net_device *ndev, int duplex) /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); - /* Reset all multicast. */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + /* Setup multicast filter. */ + set_multicast_list(ndev); #ifndef CONFIG_M5272 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_HASH_TABLE_LOW); -- cgit v1.2.3 From 0d909dcdeffeee3c42703dc1d17766d94fd47b71 Mon Sep 17 00:00:00 2001 From: Byungho An Date: Fri, 28 Jun 2013 16:35:31 +0900 Subject: net: stmmac: fixed operator typo This patch fixed operator typo from & to ==. Due to incorrect operator, the result is incorrect. Signed-off-by: Byungho An Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ee919ca8b8a0..4fb74a07c01c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -788,13 +788,13 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) int interface = priv->plat->interface; if (priv->dma_cap.pcs) { - if ((interface & PHY_INTERFACE_MODE_RGMII) || - (interface & PHY_INTERFACE_MODE_RGMII_ID) || - (interface & PHY_INTERFACE_MODE_RGMII_RXID) || - (interface & PHY_INTERFACE_MODE_RGMII_TXID)) { + if ((interface == PHY_INTERFACE_MODE_RGMII) || + (interface == PHY_INTERFACE_MODE_RGMII_ID) || + (interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { pr_debug("STMMAC: PCS RGMII support enable\n"); priv->pcs = STMMAC_PCS_RGMII; - } else if (interface & PHY_INTERFACE_MODE_SGMII) { + } else if (interface == PHY_INTERFACE_MODE_SGMII) { pr_debug("STMMAC: PCS SGMII support enable\n"); priv->pcs = STMMAC_PCS_SGMII; } -- cgit v1.2.3 From 61369d0259a724606f3941a46d2c75155d66cf2c Mon Sep 17 00:00:00 2001 From: Byungho An Date: Fri, 28 Jun 2013 16:35:32 +0900 Subject: net: stmmac: fixed enh_desc set always zero This patch fixed that enh_desc value is always zero. Due to calling order of stmmac_selec_desc_mode(), enh_desc value is always zero. Even though mac is set to use enhanced dma descriptor, if enh_desc is zero, functions related dma descriptor are not working correctly. Signed-off-by: Byungho An Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4fb74a07c01c..520693385d8d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2566,9 +2566,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv) /* Get and dump the chip ID */ priv->synopsys_id = stmmac_get_synopsys_id(priv); - /* To use alternate (extended) or normal descriptor structures */ - stmmac_selec_desc_mode(priv); - /* To use the chained or ring mode */ if (chain_mode) { priv->hw->chain = &chain_mode_ops; @@ -2603,6 +2600,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) } else pr_info(" No HW DMA feature register supported"); + /* To use alternate (extended) or normal descriptor structures */ + stmmac_selec_desc_mode(priv); + ret = priv->hw->mac->rx_ipc(priv->ioaddr); if (!ret) { pr_warn(" RX IPC Checksum Offload not configured.\n"); -- cgit v1.2.3 From 15627e847e4356ebdd49e7c3f10a671819995be6 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 1 Jul 2013 00:13:27 +0100 Subject: cassini: Make missing firmware non-fatal The firmware patch for the Saturn PHY fixes a bug, but is not absolutely essential. And its licence is unclear, so it is not included in all distributions. Just log an error message and continue if it is missing or invalid. References: http://bugs.debian.org/712674 Signed-off-by: Ben Hutchings Tested-by: Jose Andres Arias Velichko (against 3.2) Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 4c682a3d0424..759441b29e53 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -808,44 +808,43 @@ static int cas_reset_mii_phy(struct cas *cp) return limit <= 0; } -static int cas_saturn_firmware_init(struct cas *cp) +static void cas_saturn_firmware_init(struct cas *cp) { const struct firmware *fw; const char fw_name[] = "sun/cassini.bin"; int err; if (PHY_NS_DP83065 != cp->phy_id) - return 0; + return; err = request_firmware(&fw, fw_name, &cp->pdev->dev); if (err) { pr_err("Failed to load firmware \"%s\"\n", fw_name); - return err; + return; } if (fw->size < 2) { pr_err("bogus length %zu in \"%s\"\n", fw->size, fw_name); - err = -EINVAL; goto out; } cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; cp->fw_size = fw->size - 2; cp->fw_data = vmalloc(cp->fw_size); - if (!cp->fw_data) { - err = -ENOMEM; + if (!cp->fw_data) goto out; - } memcpy(cp->fw_data, &fw->data[2], cp->fw_size); out: release_firmware(fw); - return err; } static void cas_saturn_firmware_load(struct cas *cp) { int i; + if (!cp->fw_data) + return; + cas_phy_powerdown(cp); /* expanded memory access mode */ @@ -5083,8 +5082,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cas_check_invariants(cp)) goto err_out_iounmap; if (cp->cas_flags & CAS_FLAG_SATURN) - if (cas_saturn_firmware_init(cp)) - goto err_out_iounmap; + cas_saturn_firmware_init(cp); cp->init_block = (struct cas_init_block *) pci_alloc_consistent(pdev, sizeof(struct cas_init_block), -- cgit v1.2.3 From 058eec4116935c5640299913e1e0715e87ec622a Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Mon, 1 Jul 2013 17:23:05 +0200 Subject: bnx2x: remove zeroing of dump data buffer There is no need to initialize the dump data with zeros. data is allocated with vzalloc, so it's already zero-filled. More importantly, the memset is harmful, because dump->len (the length requested by userspace) can be bigger than the allocated buffer (whose size is determined by asking the driver's .get_dump_flag method). Signed-off-by: Michal Schmidt Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index b8c067d1a0f2..4f75ae854b3d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -985,8 +985,6 @@ static int bnx2x_get_dump_data(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); struct dump_header dump_hdr = {0}; - memset(p, 0, dump->len); - /* Disable parity attentions as long as following dump may * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. -- cgit v1.2.3 From 5bb680d6cbe36de9d7ba12b05f845c91a8692318 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Mon, 1 Jul 2013 17:23:06 +0200 Subject: bnx2x: fix dump flag handling bnx2x interprets the dump flag as an index of a register preset. It is important to validate the index to avoid out of bounds memory accesses. Signed-off-by: Michal Schmidt Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 3 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 4f75ae854b3d..cf5cebc358b8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -959,6 +959,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) struct bnx2x *bp = netdev_priv(dev); /* Use the ethtool_dump "flag" field as the dump preset index */ + if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS) + return -EINVAL; + bp->dump_preset_idx = val->flag; return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 740518bbcb5f..15a528bda87c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11636,6 +11636,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->min_msix_vec_cnt = 2; BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); + bp->dump_preset_idx = 1; + return rc; } -- cgit v1.2.3 From 8cc2d927c26a677415a9d0d23b9a043107f948c3 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Mon, 1 Jul 2013 17:23:20 +0200 Subject: bnx2x: fill in sane dump flag information bnx2x did not set dump->version in its .get_dump_flag() method. Let's set it to BNX2X_DUMP_VERSION. It's not a particularly nice number (0x50acff01 currently), but at least it's something deterministic. dump->flag should report the value previously set using ethtool -W. Signed-off-by: Michal Schmidt Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index cf5cebc358b8..c5f225101684 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -971,12 +971,12 @@ static int bnx2x_get_dump_flag(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); + dump->version = BNX2X_DUMP_VERSION; + dump->flag = bp->dump_preset_idx; /* Calculate the requested preset idx length */ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", bp->dump_preset_idx, dump->len); - - dump->flag = ETHTOOL_GET_DUMP_DATA; return 0; } -- cgit v1.2.3 From f3ad89217fa8088203b6c040584987028d524456 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 2 Jul 2013 08:57:47 +0800 Subject: net: ethernet: davinci_emac: remove redundant dev_err call in davinci_emac_probe() There is a error message within devm_ioremap_resource already, so remove the dev_err call to avoid redundant error message. Signed-off-by: Wei Yongjun Acked-by: Lad, Prabhakar Acked-by: Mugunthan V N Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_emac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index f118d7133128..07b176bcf929 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1870,7 +1870,6 @@ static int davinci_emac_probe(struct platform_device *pdev) priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; priv->remap_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->remap_addr)) { - dev_err(&pdev->dev, "unable to map IO\n"); rc = PTR_ERR(priv->remap_addr); goto no_pdata; } -- cgit v1.2.3 From 06efce71166b222b6715c73700f5e9b228d68999 Mon Sep 17 00:00:00 2001 From: Jim Baxter Date: Thu, 27 Jun 2013 19:25:08 +0100 Subject: net: fec: Fix Transmitted bytes counter The tx_bytes field was not being updated so the network card statistics showed 0.0B transmitted. Signed-off-by: Jim Baxter Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5664acd8e0f4..aff8a5cd17c7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -739,6 +739,7 @@ fec_enet_tx(struct net_device *ndev) ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; + ndev->stats.tx_bytes += bdp->cbd_datlen; } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && -- cgit v1.2.3 From 7cc47d139f9a815a91bd9e7377063238c69a0423 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 30 Jun 2013 14:37:11 +1000 Subject: cxgb3: Missing rtnl lock in error recovery When exercising error injection on IBM pseries machine, I hit the following warning: [ 251.450043] RTAS: event: 89, Type: Platform Error, Severity: 2 [ 253.549822] cxgb3 0006:01:00.0: enabling device (0140 -> 0142) [ 253.713560] cxgb3 0006:01:00.0: adapter recovering, PEX ERR 0x100 [ 254.895437] RTNL: assertion failed at net/core/dev.c (2031) [ 254.895467] CPU: 6 PID: 5449 Comm: eehd Tainted: G W 3.10.0-rc7-00157-gea461ab #19 [ 254.895474] Call Trace: [ 254.895483] [c000000fac56f7d0] [c000000000014dcc] .show_stack+0x7c/0x1f0 (unreliable) [ 254.895493] [c000000fac56f8a0] [c0000000007ba318] .dump_stack+0x28/0x3c [ 254.895500] [c000000fac56f910] [c0000000006c0384] .netif_set_real_num_tx_queues+0x224/0x230 [ 254.895515] [c000000fac56f9b0] [d00000000ef35510] .cxgb_open+0x80/0x3f0 [cxgb3] [ 254.895525] [c000000fac56fa50] [d00000000ef35914] .t3_resume_ports+0x94/0x100 [cxgb3] [ 254.895533] [c000000fac56fae0] [c00000000005fc8c] .eeh_report_resume+0x8c/0xd0 [ 254.895539] [c000000fac56fb60] [c00000000005e9fc] .eeh_pe_dev_traverse+0x9c/0x190 [ 254.895545] [c000000fac56fc10] [c000000000060000] .eeh_handle_event+0x110/0x330 [ 254.895551] [c000000fac56fca0] [c000000000060350] .eeh_event_handler+0x130/0x1a0 [ 254.895558] [c000000fac56fd30] [c0000000000ad758] .kthread+0xe8/0xf0 [ 254.895566] [c000000fac56fe30] [c00000000000a05c] .ret_from_kernel_thread+0x5c/0x80 It appears that t3_resume_ports() is called with the rtnl_lock held from the fatal error task but not from the PCI error callbacks. This fixes it. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 71497e835f42..b650951791dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev) CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", t3_read_reg(adapter, A_PCIE_PEX_ERR)); + rtnl_lock(); t3_resume_ports(adapter); + rtnl_unlock(); } static const struct pci_error_handlers t3_err_handler = { -- cgit v1.2.3 From b9eef55c2ab33053ae236b5d383965f9ee6a0094 Mon Sep 17 00:00:00 2001 From: Jim Baxter Date: Mon, 1 Jul 2013 14:57:54 +0100 Subject: net: fec: Fix RMON registers on imx6 commit 38ae92d "fec: Add support for reading RMON registers" causes the imx6Q to crash. This fixes it by only enabling the RMON registers, the registers are already cleared by the MAC being reset. Signed-off-by: Jim Baxter Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index aff8a5cd17c7..b6768821757b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -606,11 +606,8 @@ fec_restart(struct net_device *ndev, int duplex) ecntl |= (1 << 4); #ifndef CONFIG_M5272 - /* Disable, clear, and enable the MIB */ - writel(1 << 31, fep->hwp + FEC_MIB_CTRLSTAT); - for (i = RMON_T_DROP; i < IEEE_R_OCTETS_OK; i++) - writel(0, fep->hwp + i); - writel(0, fep->hwp + FEC_MIB_CTRLSTAT); + /* Enable the MIB statistic event counters */ + writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); #endif /* And last, enable the transmit and receive processing */ -- cgit v1.2.3 From 83d7af64ac9eaf4f4db7228677bc25f23c383790 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Tue, 2 Jul 2013 14:12:36 +0200 Subject: stmmac: dity-up and rework the driver debug levels Prior this patch, the internal debugging was based on ifdef and also some printk were useless because many info are exposed via ethtool. This patch remove all the ifdef defines and now we only use netif_msg_XXX levels. Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 10 -- .../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 57 ++++------ .../net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 10 +- .../net/ethernet/stmicro/stmmac/dwmac100_core.c | 4 - drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 8 +- drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 72 +++++------- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 95 +++++----------- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 4 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 124 +++++++-------------- 9 files changed, 117 insertions(+), 267 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7788fbe44f0a..9911b9323f00 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -38,16 +38,6 @@ #include "descs.h" #include "mmc.h" -#undef CHIP_DEBUG_PRINT -/* Turn-on extra printk debug for MAC core, dma and descriptors */ -/* #define CHIP_DEBUG_PRINT */ - -#ifdef CHIP_DEBUG_PRINT -#define CHIP_DBG(fmt, args...) printk(fmt, ## args) -#else -#define CHIP_DBG(fmt, args...) do { } while (0) -#endif - /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 7e05e8d0f1c2..cdd926832e27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -91,8 +91,8 @@ static void dwmac1000_set_filter(struct net_device *dev, int id) unsigned int value = 0; unsigned int perfect_addr_number; - CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", - __func__, netdev_mc_count(dev), netdev_uc_count(dev)); + pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, + netdev_mc_count(dev), netdev_uc_count(dev)); if (dev->flags & IFF_PROMISC) value = GMAC_FRAME_FILTER_PR; @@ -152,7 +152,7 @@ static void dwmac1000_set_filter(struct net_device *dev, int id) #endif writel(value, ioaddr + GMAC_FRAME_FILTER); - CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", + pr_debug("\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); } @@ -162,18 +162,18 @@ static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, { unsigned int flow = 0; - CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n"); + pr_debug("GMAC Flow-Control:\n"); if (fc & FLOW_RX) { - CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); + pr_debug("\tReceive Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_RFE; } if (fc & FLOW_TX) { - CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); + pr_debug("\tTransmit Flow-Control ON\n"); flow |= GMAC_FLOW_CTRL_TFE; } if (duplex) { - CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time); + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); } @@ -185,11 +185,11 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode) unsigned int pmt = 0; if (mode & WAKE_MAGIC) { - CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); + pr_debug("GMAC: WOL Magic frame\n"); pmt |= power_down | magic_pkt_en; } if (mode & WAKE_UCAST) { - CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); + pr_debug("GMAC: WOL on global unicast\n"); pmt |= global_unicast; } @@ -203,23 +203,13 @@ static int dwmac1000_irq_status(void __iomem *ioaddr, int ret = 0; /* Not used events (e.g. MMC interrupts) are not handled. */ - if ((intr_status & mmc_tx_irq)) { - CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n", - readl(ioaddr + GMAC_MMC_TX_INTR)); + if ((intr_status & mmc_tx_irq)) x->mmc_tx_irq_n++; - } - if (unlikely(intr_status & mmc_rx_irq)) { - CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n", - readl(ioaddr + GMAC_MMC_RX_INTR)); + if (unlikely(intr_status & mmc_rx_irq)) x->mmc_rx_irq_n++; - } - if (unlikely(intr_status & mmc_rx_csum_offload_irq)) { - CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n", - readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) x->mmc_rx_csum_offload_irq_n++; - } if (unlikely(intr_status & pmt_irq)) { - CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT status reg */ readl(ioaddr + GMAC_PMT); x->irq_receive_pmt_irq_n++; @@ -229,32 +219,22 @@ static int dwmac1000_irq_status(void __iomem *ioaddr, /* Clean LPI interrupt by reading the Reg 12 */ ret = readl(ioaddr + LPI_CTRL_STATUS); - if (ret & LPI_CTRL_STATUS_TLPIEN) { - CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n"); + if (ret & LPI_CTRL_STATUS_TLPIEN) x->irq_tx_path_in_lpi_mode_n++; - } - if (ret & LPI_CTRL_STATUS_TLPIEX) { - CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n"); + if (ret & LPI_CTRL_STATUS_TLPIEX) x->irq_tx_path_exit_lpi_mode_n++; - } - if (ret & LPI_CTRL_STATUS_RLPIEN) { - CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n"); + if (ret & LPI_CTRL_STATUS_RLPIEN) x->irq_rx_path_in_lpi_mode_n++; - } - if (ret & LPI_CTRL_STATUS_RLPIEX) { - CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n"); + if (ret & LPI_CTRL_STATUS_RLPIEX) x->irq_rx_path_exit_lpi_mode_n++; - } } if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { - CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n"); readl(ioaddr + GMAC_AN_STATUS); x->irq_pcs_ane_n++; } if (intr_status & rgmii_irq) { u32 status = readl(ioaddr + GMAC_S_R_GMII); - CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n"); x->irq_rgmii_n++; /* Save and dump the link status. */ @@ -271,11 +251,12 @@ static int dwmac1000_irq_status(void __iomem *ioaddr, x->pcs_speed = SPEED_10; x->pcs_link = 1; - pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed, + pr_debug("%s: Link is Up - %d/%s\n", __func__, + (int)x->pcs_speed, x->pcs_duplex ? "Full" : "Half"); } else { x->pcs_link = 0; - pr_debug("Link is Down\n"); + pr_debug("%s: Link is Down\n", __func__); } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 2c431b616058..0c2058a69fd2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -116,7 +116,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, u32 csr6 = readl(ioaddr + DMA_CONTROL); if (txmode == SF_DMA_MODE) { - CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n"); + pr_debug("GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ csr6 |= DMA_CONTROL_TSF; /* Operating on second frame increase the performance @@ -124,8 +124,7 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, */ csr6 |= DMA_CONTROL_OSF; } else { - CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n", - txmode); + pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; /* Set the transmit threshold */ @@ -142,11 +141,10 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, } if (rxmode == SF_DMA_MODE) { - CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n"); + pr_debug("GMAC: enable RX store and forward mode\n"); csr6 |= DMA_CONTROL_RSF; } else { - CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n", - rxmode); + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); csr6 &= ~DMA_CONTROL_RSF; csr6 &= DMA_CONTROL_TC_RX_MASK; if (rxmode <= 32) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 007bb2be3f10..5857d677dac1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -135,10 +135,6 @@ static void dwmac100_set_filter(struct net_device *dev, int id) } writel(value, ioaddr + MAC_CONTROL); - - CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n", - __func__, readl(ioaddr + MAC_CONTROL), - readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); } static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 67551c154138..7d1dce9e7ffc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -90,14 +90,14 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr) { int i; - CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); + pr_debug("DWMAC 100 DMA CSR\n"); for (i = 0; i < 9; i++) pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, (DMA_BUS_MODE + i * 4), readl(ioaddr + DMA_BUS_MODE + i * 4)); - CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", - DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); - CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", + + pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n", + DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR), DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 491d7e930603..484e3cf9c414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -24,13 +24,6 @@ #include "common.h" #include "dwmac_dma.h" -#undef DWMAC_DMA_DEBUG -#ifdef DWMAC_DMA_DEBUG -#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args) -#else -#define DWMAC_LIB_DBG(fmt, args...) do { } while (0) -#endif - #define GMAC_HI_REG_AE 0x80000000 /* CSR1 enables the transmit DMA to check for new descriptor */ @@ -85,24 +78,24 @@ static void show_tx_process_state(unsigned int status) switch (state) { case 0: - pr_info("- TX (Stopped): Reset or Stop command\n"); + pr_debug("- TX (Stopped): Reset or Stop command\n"); break; case 1: - pr_info("- TX (Running):Fetching the Tx desc\n"); + pr_debug("- TX (Running):Fetching the Tx desc\n"); break; case 2: - pr_info("- TX (Running): Waiting for end of tx\n"); + pr_debug("- TX (Running): Waiting for end of tx\n"); break; case 3: - pr_info("- TX (Running): Reading the data " + pr_debug("- TX (Running): Reading the data " "and queuing the data into the Tx buf\n"); break; case 6: - pr_info("- TX (Suspended): Tx Buff Underflow " + pr_debug("- TX (Suspended): Tx Buff Underflow " "or an unavailable Transmit descriptor\n"); break; case 7: - pr_info("- TX (Running): Closing Tx descriptor\n"); + pr_debug("- TX (Running): Closing Tx descriptor\n"); break; default: break; @@ -116,29 +109,29 @@ static void show_rx_process_state(unsigned int status) switch (state) { case 0: - pr_info("- RX (Stopped): Reset or Stop command\n"); + pr_debug("- RX (Stopped): Reset or Stop command\n"); break; case 1: - pr_info("- RX (Running): Fetching the Rx desc\n"); + pr_debug("- RX (Running): Fetching the Rx desc\n"); break; case 2: - pr_info("- RX (Running):Checking for end of pkt\n"); + pr_debug("- RX (Running):Checking for end of pkt\n"); break; case 3: - pr_info("- RX (Running): Waiting for Rx pkt\n"); + pr_debug("- RX (Running): Waiting for Rx pkt\n"); break; case 4: - pr_info("- RX (Suspended): Unavailable Rx buf\n"); + pr_debug("- RX (Suspended): Unavailable Rx buf\n"); break; case 5: - pr_info("- RX (Running): Closing Rx descriptor\n"); + pr_debug("- RX (Running): Closing Rx descriptor\n"); break; case 6: - pr_info("- RX(Running): Flushing the current frame" + pr_debug("- RX(Running): Flushing the current frame" " from the Rx buf\n"); break; case 7: - pr_info("- RX (Running): Queuing the Rx frame" + pr_debug("- RX (Running): Queuing the Rx frame" " from the Rx buf into memory\n"); break; default: @@ -154,51 +147,37 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); - DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status); #ifdef DWMAC_DMA_DEBUG - /* It displays the DMA process states (CSR5 register) */ + /* Enable it to monitor DMA rx/tx status in case of critical problems */ + pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status); show_tx_process_state(intr_status); show_rx_process_state(intr_status); #endif /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_STATUS_AIS)) { - DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: "); if (unlikely(intr_status & DMA_STATUS_UNF)) { - DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n"); ret = tx_hard_error_bump_tc; x->tx_undeflow_irq++; } - if (unlikely(intr_status & DMA_STATUS_TJT)) { - DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n"); + if (unlikely(intr_status & DMA_STATUS_TJT)) x->tx_jabber_irq++; - } - if (unlikely(intr_status & DMA_STATUS_OVF)) { - DWMAC_LIB_DBG(KERN_INFO "recv overflow\n"); + + if (unlikely(intr_status & DMA_STATUS_OVF)) x->rx_overflow_irq++; - } - if (unlikely(intr_status & DMA_STATUS_RU)) { - DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n"); + + if (unlikely(intr_status & DMA_STATUS_RU)) x->rx_buf_unav_irq++; - } - if (unlikely(intr_status & DMA_STATUS_RPS)) { - DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n"); + if (unlikely(intr_status & DMA_STATUS_RPS)) x->rx_process_stopped_irq++; - } - if (unlikely(intr_status & DMA_STATUS_RWT)) { - DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n"); + if (unlikely(intr_status & DMA_STATUS_RWT)) x->rx_watchdog_irq++; - } - if (unlikely(intr_status & DMA_STATUS_ETI)) { - DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n"); + if (unlikely(intr_status & DMA_STATUS_ETI)) x->tx_early_irq++; - } if (unlikely(intr_status & DMA_STATUS_TPS)) { - DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n"); x->tx_process_stopped_irq++; ret = tx_hard_error; } if (unlikely(intr_status & DMA_STATUS_FBI)) { - DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n"); x->fatal_bus_error_irq++; ret = tx_hard_error; } @@ -224,12 +203,11 @@ int dwmac_dma_interrupt(void __iomem *ioaddr, /* Optional hardware blocks, interrupts should be disabled */ if (unlikely(intr_status & (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) - pr_info("%s: unexpected status %08x\n", __func__, intr_status); + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); - DWMAC_LIB_DBG(KERN_INFO "\n\n"); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 0fbc8fafa706..7e6628a91514 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -33,54 +33,40 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.etx.error_summary)) { - CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); - if (unlikely(p->des01.etx.jabber_timeout)) { - CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); + if (unlikely(p->des01.etx.jabber_timeout)) x->tx_jabber++; - } if (unlikely(p->des01.etx.frame_flushed)) { - CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); x->tx_frame_flushed++; dwmac_dma_flush_tx_fifo(ioaddr); } if (unlikely(p->des01.etx.loss_carrier)) { - CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); x->tx_losscarrier++; stats->tx_carrier_errors++; } if (unlikely(p->des01.etx.no_carrier)) { - CHIP_DBG(KERN_ERR "\tno_carrier error\n"); x->tx_carrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.etx.late_collision)) { - CHIP_DBG(KERN_ERR "\tlate_collision error\n"); + if (unlikely(p->des01.etx.late_collision)) stats->collisions += p->des01.etx.collision_count; - } - if (unlikely(p->des01.etx.excessive_collisions)) { - CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); + + if (unlikely(p->des01.etx.excessive_collisions)) stats->collisions += p->des01.etx.collision_count; - } - if (unlikely(p->des01.etx.excessive_deferral)) { - CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); + + if (unlikely(p->des01.etx.excessive_deferral)) x->tx_deferred++; - } if (unlikely(p->des01.etx.underflow_error)) { - CHIP_DBG(KERN_ERR "\tunderflow error\n"); dwmac_dma_flush_tx_fifo(ioaddr); x->tx_underflow++; } - if (unlikely(p->des01.etx.ip_header_error)) { - CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); + if (unlikely(p->des01.etx.ip_header_error)) x->tx_ip_header_error++; - } if (unlikely(p->des01.etx.payload_error)) { - CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); x->tx_payload_error++; dwmac_dma_flush_tx_fifo(ioaddr); } @@ -88,15 +74,12 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, ret = -1; } - if (unlikely(p->des01.etx.deferred)) { - CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); + if (unlikely(p->des01.etx.deferred)) x->tx_deferred++; - } + #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.etx.vlan_frame) { - CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + if (p->des01.etx.vlan_frame) x->tx_vlan++; - } #endif return ret; @@ -123,30 +106,20 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) * 0 1 1 | COE bypassed.. no IPv4/6 frame * 0 1 0 | Reserved. */ - if (status == 0x0) { - CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); + if (status == 0x0) ret = llc_snap; - } else if (status == 0x4) { - CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); + else if (status == 0x4) ret = good_frame; - } else if (status == 0x5) { - CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); + else if (status == 0x5) ret = csum_none; - } else if (status == 0x6) { - CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); + else if (status == 0x6) ret = csum_none; - } else if (status == 0x7) { - CHIP_DBG(KERN_ERR - "RX Des0 status: IPv4/6 Header and Payload Error.\n"); + else if (status == 0x7) ret = csum_none; - } else if (status == 0x1) { - CHIP_DBG(KERN_ERR - "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); + else if (status == 0x1) ret = discard_frame; - } else if (status == 0x3) { - CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); + else if (status == 0x3) ret = discard_frame; - } return ret; } @@ -208,36 +181,26 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.erx.error_summary)) { - CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", - p->des01.erx); if (unlikely(p->des01.erx.descriptor_error)) { - CHIP_DBG(KERN_ERR "\tdescriptor error\n"); x->rx_desc++; stats->rx_length_errors++; } - if (unlikely(p->des01.erx.overflow_error)) { - CHIP_DBG(KERN_ERR "\toverflow error\n"); + if (unlikely(p->des01.erx.overflow_error)) x->rx_gmac_overflow++; - } if (unlikely(p->des01.erx.ipc_csum_error)) - CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); + pr_err("\tIPC Csum Error/Giant frame\n"); if (unlikely(p->des01.erx.late_collision)) { - CHIP_DBG(KERN_ERR "\tlate_collision error\n"); - stats->collisions++; stats->collisions++; } - if (unlikely(p->des01.erx.receive_watchdog)) { - CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); + if (unlikely(p->des01.erx.receive_watchdog)) x->rx_watchdog++; - } - if (unlikely(p->des01.erx.error_gmii)) { - CHIP_DBG(KERN_ERR "\tReceive Error\n"); + + if (unlikely(p->des01.erx.error_gmii)) x->rx_mii++; - } + if (unlikely(p->des01.erx.crc_error)) { - CHIP_DBG(KERN_ERR "\tCRC error\n"); x->rx_crc++; stats->rx_crc_errors++; } @@ -251,30 +214,24 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); - if (unlikely(p->des01.erx.dribbling)) { - CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); + if (unlikely(p->des01.erx.dribbling)) x->dribbling_bit++; - } + if (unlikely(p->des01.erx.sa_filter_fail)) { - CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); x->sa_rx_filter_fail++; ret = discard_frame; } if (unlikely(p->des01.erx.da_filter_fail)) { - CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); x->da_rx_filter_fail++; ret = discard_frame; } if (unlikely(p->des01.erx.length_error)) { - CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); x->rx_length++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.erx.vlan_tag) { - CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); + if (p->des01.erx.vlan_tag) x->rx_vlan++; - } #endif return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 11775b99afc5..35ad4f427ae2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -52,10 +52,8 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, ret = -1; } - if (p->des01.etx.vlan_frame) { - CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + if (p->des01.etx.vlan_frame) x->tx_vlan++; - } if (unlikely(p->des01.tx.deferred)) x->tx_deferred++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 520693385d8d..62e31054bd24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -51,32 +51,6 @@ #include "stmmac_ptp.h" #include "stmmac.h" -#undef STMMAC_DEBUG -/*#define STMMAC_DEBUG*/ -#ifdef STMMAC_DEBUG -#define DBG(nlevel, klevel, fmt, args...) \ - ((void)(netif_msg_##nlevel(priv) && \ - printk(KERN_##klevel fmt, ## args))) -#else -#define DBG(nlevel, klevel, fmt, args...) do { } while (0) -#endif - -#undef STMMAC_RX_DEBUG -/*#define STMMAC_RX_DEBUG*/ -#ifdef STMMAC_RX_DEBUG -#define RX_DBG(fmt, args...) printk(fmt, ## args) -#else -#define RX_DBG(fmt, args...) do { } while (0) -#endif - -#undef STMMAC_XMIT_DEBUG -/*#define STMMAC_XMIT_DEBUG*/ -#ifdef STMMAC_XMIT_DEBUG -#define TX_DBG(fmt, args...) printk(fmt, ## args) -#else -#define TX_DBG(fmt, args...) do { } while (0) -#endif - #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) #define JUMBO_LEN 9000 @@ -214,19 +188,17 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) } } -#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) static void print_pkt(unsigned char *buf, int len) { int j; - pr_info("len = %d byte, buf addr: 0x%p", len, buf); + pr_debug("len = %d byte, buf addr: 0x%p", len, buf); for (j = 0; j < len; j++) { if ((j % 16) == 0) - pr_info("\n %03x:", j); - pr_info(" %02x", buf[j]); + pr_debug("\n %03x:", j); + pr_debug(" %02x", buf[j]); } - pr_info("\n"); + pr_debug("\n"); } -#endif /* minimum number of free TX descriptors required to wake up TX process */ #define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) @@ -698,9 +670,6 @@ static void stmmac_adjust_link(struct net_device *dev) if (phydev == NULL) return; - DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", - phydev->addr, phydev->link); - spin_lock_irqsave(&priv->lock, flags); if (phydev->link) { @@ -772,8 +741,6 @@ static void stmmac_adjust_link(struct net_device *dev) stmmac_eee_adjust(priv); spin_unlock_irqrestore(&priv->lock, flags); - - DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); } /** @@ -1014,8 +981,9 @@ static void init_dma_desc_rings(struct net_device *dev) if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); - DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", - txsize, rxsize, bfsize); + if (netif_msg_probe(priv)) + pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, + txsize, rxsize, bfsize); if (priv->extend_desc) { priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * @@ -1051,12 +1019,13 @@ static void init_dma_desc_rings(struct net_device *dev) GFP_KERNEL); priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), GFP_KERNEL); - if (netif_msg_drv(priv)) + if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); - /* RX INITIALIZATION */ - DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n"); + /* RX INITIALIZATION */ + pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); + } for (i = 0; i < rxsize; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1067,8 +1036,10 @@ static void init_dma_desc_rings(struct net_device *dev) if (stmmac_init_rx_buffers(priv, p, i)) break; - DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], - priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); + if (netif_msg_probe(priv)) + pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], + priv->rx_skbuff[i]->data, + (unsigned int)priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; priv->dirty_rx = (unsigned int)(i - rxsize); @@ -1243,8 +1214,9 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) stmmac_get_tx_hwtstamp(priv, entry, skb); } - TX_DBG("%s: curr %d, dirty %d\n", __func__, - priv->cur_tx, priv->dirty_tx); + if (netif_msg_tx_done(priv)) + pr_debug("%s: curr %d, dirty %d\n", __func__, + priv->cur_tx, priv->dirty_tx); if (likely(priv->tx_skbuff_dma[entry])) { dma_unmap_single(priv->device, @@ -1269,7 +1241,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { - TX_DBG("%s: restart transmit\n", __func__); + if (netif_msg_tx_done(priv)) + pr_debug("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); } netif_tx_unlock(priv->dev); @@ -1658,7 +1631,7 @@ static int stmmac_open(struct net_device *dev) pr_warn("%s: failed debugFS registration\n", __func__); #endif /* Start the ball rolling... */ - DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); + pr_debug("%s: DMA RX/TX processes started...\n", dev->name); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); @@ -1800,16 +1773,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) entry = priv->cur_tx % txsize; -#ifdef STMMAC_XMIT_DEBUG - if ((skb->len > ETH_FRAME_LEN) || nfrags) - pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n" - "\tn_frags: %d - ip_summed: %d - %s gso\n" - "\ttx_count_frames %d\n", __func__, entry, - skb, skb->len, nopaged_len, nfrags, skb->ip_summed, - !skb_is_gso(skb) ? "isn't" : "is", - priv->tx_count_frames); -#endif - csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); if (priv->extend_desc) @@ -1819,12 +1782,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; -#ifdef STMMAC_XMIT_DEBUG - if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) - pr_debug("\tskb len: %d, nopaged_len: %d,\n" - "\t\tn_frags: %d, ip_summed: %d\n", - skb->len, nopaged_len, nfrags, skb->ip_summed); -#endif priv->tx_skbuff[entry] = skb; /* To program the descriptors according to the size of the frame */ @@ -1860,7 +1817,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) else desc = priv->dma_tx + entry; - TX_DBG("\t[entry %d] segment len: %d\n", entry, len); desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; @@ -1884,8 +1840,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->tx_coal_frames > priv->tx_count_frames) { priv->hw->desc->clear_tx_ic(desc); priv->xstats.tx_reset_ic_bit++; - TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry, - priv->tx_count_frames); mod_timer(&priv->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); } else @@ -1897,22 +1851,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->cur_tx++; -#ifdef STMMAC_XMIT_DEBUG if (netif_msg_pktdata(priv)) { - pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", + pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", __func__, (priv->cur_tx % txsize), (priv->dirty_tx % txsize), entry, first, nfrags); + if (priv->extend_desc) stmmac_display_ring((void *)priv->dma_etx, txsize, 1); else stmmac_display_ring((void *)priv->dma_tx, txsize, 0); - pr_info(">>> frame to be transmitted: "); + pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } -#endif if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { - TX_DBG("%s: stop transmitted packets\n", __func__); + if (netif_msg_hw(priv)) + pr_debug("%s: stop transmitted packets\n", __func__); netif_stop_queue(dev); } @@ -1972,7 +1926,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->hw->ring->refill_desc3(priv, p); - RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); + if (netif_msg_rx_status(priv)) + pr_debug("\trefill entry #%d\n", entry); } wmb(); priv->hw->desc->set_rx_owner(p); @@ -1995,15 +1950,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) unsigned int count = 0; int coe = priv->plat->rx_coe; -#ifdef STMMAC_RX_DEBUG - if (netif_msg_hw(priv)) { - pr_debug(">>> stmmac_rx: descriptor ring:\n"); + if (netif_msg_rx_status(priv)) { + pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); else stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); } -#endif while (count < limit) { int status; struct dma_desc *p; @@ -2057,15 +2010,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) */ if (unlikely(status != llc_snap)) frame_len -= ETH_FCS_LEN; -#ifdef STMMAC_RX_DEBUG - if (frame_len > ETH_FRAME_LEN) - pr_debug("\tRX frame size %d, COE status: %d\n", - frame_len, status); - if (netif_msg_hw(priv)) + if (netif_msg_rx_status(priv)) { pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", p, entry, p->des2); -#endif + if (frame_len > ETH_FRAME_LEN) + pr_debug("\tframe size %d, COE: %d\n", + frame_len, status); + } skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { pr_err("%s: Inconsistent Rx descriptor chain\n", @@ -2082,12 +2034,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) dma_unmap_single(priv->device, priv->rx_skbuff_dma[entry], priv->dma_buf_sz, DMA_FROM_DEVICE); -#ifdef STMMAC_RX_DEBUG + if (netif_msg_pktdata(priv)) { - pr_info(" frame received (%dbytes)", frame_len); + pr_debug("frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); } -#endif + skb->protocol = eth_type_trans(skb, priv->dev); if (unlikely(!coe)) -- cgit v1.2.3 From bc2bebe8de8ed4ba6482c9cc370b0dd72ffe8cd2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 3 Jul 2013 21:48:11 +0200 Subject: alx: remove WoL support Unfortunately, WoL is broken and the system will immediately resume after suspending, and I can't seem to figure out why. Remove WoL support until the issue can be found. Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/ethtool.c | 36 ------- drivers/net/ethernet/atheros/alx/hw.c | 155 ----------------------------- drivers/net/ethernet/atheros/alx/hw.h | 5 - drivers/net/ethernet/atheros/alx/main.c | 142 +++----------------------- 4 files changed, 15 insertions(+), 323 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c index 926100626d60..45b36507abc1 100644 --- a/drivers/net/ethernet/atheros/alx/ethtool.c +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -201,40 +201,6 @@ static void alx_set_msglevel(struct net_device *netdev, u32 data) alx->msg_enable = data; } -static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - wol->supported = WAKE_MAGIC | WAKE_PHY; - wol->wolopts = 0; - - if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) - wol->wolopts |= WAKE_MAGIC; - if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) - wol->wolopts |= WAKE_PHY; -} - -static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) -{ - struct alx_priv *alx = netdev_priv(netdev); - struct alx_hw *hw = &alx->hw; - - if (wol->wolopts & ~(WAKE_MAGIC | WAKE_PHY)) - return -EOPNOTSUPP; - - hw->sleep_ctrl = 0; - - if (wol->wolopts & WAKE_MAGIC) - hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; - if (wol->wolopts & WAKE_PHY) - hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; - - device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl); - - return 0; -} - const struct ethtool_ops alx_ethtool_ops = { .get_settings = alx_get_settings, .set_settings = alx_set_settings, @@ -242,7 +208,5 @@ const struct ethtool_ops alx_ethtool_ops = { .set_pauseparam = alx_set_pauseparam, .get_msglevel = alx_get_msglevel, .set_msglevel = alx_set_msglevel, - .get_wol = alx_get_wol, - .set_wol = alx_set_wol, .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c index ea99e5d8743f..1e8c24a3cb4e 100644 --- a/drivers/net/ethernet/atheros/alx/hw.c +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -332,16 +332,6 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) alx_write_mem32(hw, ALX_STAD1, val); } -static void alx_enable_osc(struct alx_hw *hw) -{ - u32 val; - - /* rising edge */ - val = alx_read_mem32(hw, ALX_MISC); - alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN); - alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); -} - static void alx_reset_osc(struct alx_hw *hw, u8 rev) { u32 val, val2; @@ -858,66 +848,6 @@ void alx_post_phy_link(struct alx_hw *hw) } } - -/* NOTE: - * 1. phy link must be established before calling this function - * 2. wol option (pattern,magic,link,etc.) is configed before call it. - */ -int alx_pre_suspend(struct alx_hw *hw, int speed, u8 duplex) -{ - u32 master, mac, phy, val; - int err = 0; - - master = alx_read_mem32(hw, ALX_MASTER); - master &= ~ALX_MASTER_PCLKSEL_SRDS; - mac = hw->rx_ctrl; - /* 10/100 half */ - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100); - mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); - - phy = alx_read_mem32(hw, ALX_PHY_CTRL); - phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS); - phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE | - ALX_PHY_CTRL_HIB_EN; - - /* without any activity */ - if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) { - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN; - } else { - if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS)) - mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; - if (hw->sleep_ctrl & ALX_SLEEP_CIFS) - mac |= ALX_MAC_CTRL_TX_EN; - if (duplex == DUPLEX_FULL) - mac |= ALX_MAC_CTRL_FULLD; - if (speed == SPEED_1000) - ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, - ALX_MAC_CTRL_SPEED_1000); - phy |= ALX_PHY_CTRL_DSPRST_OUT; - err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, - ALX_MIIEXT_S3DIG10, - ALX_MIIEXT_S3DIG10_SL); - if (err) - return err; - } - - alx_enable_osc(hw); - hw->rx_ctrl = mac; - alx_write_mem32(hw, ALX_MASTER, master); - alx_write_mem32(hw, ALX_MAC_CTRL, mac); - alx_write_mem32(hw, ALX_PHY_CTRL, phy); - - /* set val of PDLL D3PLLOFF */ - val = alx_read_mem32(hw, ALX_PDLL_TRNS1); - val |= ALX_PDLL_TRNS1_D3PLLOFF_EN; - alx_write_mem32(hw, ALX_PDLL_TRNS1, val); - - return 0; -} - bool alx_phy_configured(struct alx_hw *hw) { u32 cfg, hw_cfg; @@ -990,26 +920,6 @@ int alx_clear_phy_intr(struct alx_hw *hw) return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); } -int alx_config_wol(struct alx_hw *hw) -{ - u32 wol = 0; - int err = 0; - - /* turn on magic packet event */ - if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) - wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN; - - /* turn on link up event */ - if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) { - wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK; - /* only link up can wake up */ - err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP); - } - alx_write_mem32(hw, ALX_WOL0, wol); - - return err; -} - void alx_disable_rss(struct alx_hw *hw) { u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); @@ -1121,71 +1031,6 @@ void alx_configure_basic(struct alx_hw *hw) alx_write_mem32(hw, ALX_WRR, val); } -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed, u8 *duplex) -{ - int i, err; - u16 lpa; - - err = alx_read_phy_link(hw); - if (err) - return err; - - if (hw->link_speed == SPEED_UNKNOWN) { - *speed = SPEED_UNKNOWN; - *duplex = DUPLEX_UNKNOWN; - return 0; - } - - err = alx_read_phy_reg(hw, MII_LPA, &lpa); - if (err) - return err; - - if (!(lpa & LPA_LPACK)) { - *speed = hw->link_speed; - return 0; - } - - if (lpa & LPA_10FULL) { - *speed = SPEED_10; - *duplex = DUPLEX_FULL; - } else if (lpa & LPA_10HALF) { - *speed = SPEED_10; - *duplex = DUPLEX_HALF; - } else if (lpa & LPA_100FULL) { - *speed = SPEED_100; - *duplex = DUPLEX_FULL; - } else { - *speed = SPEED_100; - *duplex = DUPLEX_HALF; - } - - if (*speed == hw->link_speed && *duplex == hw->duplex) - return 0; - err = alx_write_phy_reg(hw, ALX_MII_IER, 0); - if (err) - return err; - err = alx_setup_speed_duplex(hw, alx_speed_to_ethadv(*speed, *duplex) | - ADVERTISED_Autoneg, ALX_FC_ANEG | - ALX_FC_RX | ALX_FC_TX); - if (err) - return err; - - /* wait for linkup */ - for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { - msleep(100); - - err = alx_read_phy_link(hw); - if (err < 0) - return err; - if (hw->link_speed != SPEED_UNKNOWN) - break; - } - if (i == ALX_MAX_SETUP_LNK_CYCLE) - return -ETIMEDOUT; - - return 0; -} - bool alx_get_phy_info(struct alx_hw *hw) { u16 devs1, devs2; diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h index a60e35c1c1b5..96f3b4381e17 100644 --- a/drivers/net/ethernet/atheros/alx/hw.h +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -418,8 +418,6 @@ struct alx_hw { u8 flowctrl; u32 adv_cfg; - u32 sleep_ctrl; - spinlock_t mdio_lock; struct mdio_if_info mdio; u16 phy_id[2]; @@ -479,14 +477,12 @@ void alx_reset_pcie(struct alx_hw *hw); void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); void alx_post_phy_link(struct alx_hw *hw); -int alx_pre_suspend(struct alx_hw *hw, int speed, u8 duplex); int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); int alx_read_phy_link(struct alx_hw *hw); int alx_clear_phy_intr(struct alx_hw *hw); -int alx_config_wol(struct alx_hw *hw); void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); void alx_start_mac(struct alx_hw *hw); int alx_reset_mac(struct alx_hw *hw); @@ -494,7 +490,6 @@ void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); bool alx_phy_configured(struct alx_hw *hw); void alx_configure_basic(struct alx_hw *hw); void alx_disable_rss(struct alx_hw *hw); -int alx_select_powersaving_speed(struct alx_hw *hw, int *speed, u8 *duplex); bool alx_get_phy_info(struct alx_hw *hw); static inline u32 alx_speed_to_ethadv(int speed, u8 duplex) diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 148b4b976474..0e0b242a9dd4 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -706,7 +706,6 @@ static int alx_init_sw(struct alx_priv *alx) alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); alx->tx_ringsz = 256; alx->rx_ringsz = 512; - hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; hw->imt = 200; alx->int_mask = ALX_ISR_MISC; hw->dma_chnl = hw->max_dma_chnl; @@ -961,66 +960,6 @@ static int alx_stop(struct net_device *netdev) return 0; } -static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) -{ - struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - struct alx_hw *hw = &alx->hw; - int err, speed; - u8 duplex; - - netif_device_detach(netdev); - - if (netif_running(netdev)) - __alx_stop(alx); - -#ifdef CONFIG_PM_SLEEP - err = pci_save_state(pdev); - if (err) - return err; -#endif - - err = alx_select_powersaving_speed(hw, &speed, &duplex); - if (err) - return err; - err = alx_clear_phy_intr(hw); - if (err) - return err; - err = alx_pre_suspend(hw, speed, duplex); - if (err) - return err; - err = alx_config_wol(hw); - if (err) - return err; - - *wol_en = false; - if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { - netif_info(alx, wol, netdev, - "wol: ctrl=%X, speed=%X\n", - hw->sleep_ctrl, speed); - device_set_wakeup_enable(&pdev->dev, true); - *wol_en = true; - } - - pci_disable_device(pdev); - - return 0; -} - -static void alx_shutdown(struct pci_dev *pdev) -{ - int err; - bool wol_en; - - err = __alx_shutdown(pdev, &wol_en); - if (!err) { - pci_wake_from_d3(pdev, wol_en); - pci_set_power_state(pdev, PCI_D3hot); - } else { - dev_err(&pdev->dev, "shutdown fail %d\n", err); - } -} - static void alx_link_check(struct work_struct *work) { struct alx_priv *alx; @@ -1399,8 +1338,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_unmap; } - device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); - netdev_info(netdev, "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", netdev->dev_addr); @@ -1445,22 +1382,12 @@ static void alx_remove(struct pci_dev *pdev) static int alx_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - int err; - bool wol_en; - - err = __alx_shutdown(pdev, &wol_en); - if (err) { - dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); - return err; - } - - if (wol_en) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } + struct alx_priv *alx = pci_get_drvdata(pdev); + if (!netif_running(alx->dev)) + return 0; + netif_device_detach(alx->dev); + __alx_stop(alx); return 0; } @@ -1468,49 +1395,20 @@ static int alx_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct alx_priv *alx = pci_get_drvdata(pdev); - struct net_device *netdev = alx->dev; - struct alx_hw *hw = &alx->hw; - int err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); - - hw->link_speed = SPEED_UNKNOWN; - alx->int_mask = ALX_ISR_MISC; - - alx_reset_pcie(hw); - alx_reset_phy(hw); - - err = alx_reset_mac(hw); - if (err) { - netif_err(alx, hw, alx->dev, - "resume:reset_mac fail %d\n", err); - return -EIO; - } - - err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); - if (err) { - netif_err(alx, hw, alx->dev, - "resume:setup_speed_duplex fail %d\n", err); - return -EIO; - } - - if (netif_running(netdev)) { - err = __alx_open(alx, true); - if (err) - return err; - } - - netif_device_attach(netdev); - return err; + if (!netif_running(alx->dev)) + return 0; + netif_device_attach(alx->dev); + return __alx_open(alx, true); } + +static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +#define ALX_PM_OPS (&alx_pm_ops) +#else +#define ALX_PM_OPS NULL #endif + static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -1553,8 +1451,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) } pci_set_master(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); alx_reset_pcie(hw); if (!alx_reset_mac(hw)) @@ -1590,13 +1486,6 @@ static const struct pci_error_handlers alx_err_handlers = { .resume = alx_pci_error_resume, }; -#ifdef CONFIG_PM_SLEEP -static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); -#define ALX_PM_OPS (&alx_pm_ops) -#else -#define ALX_PM_OPS NULL -#endif - static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, @@ -1614,7 +1503,6 @@ static struct pci_driver alx_driver = { .id_table = alx_pci_tbl, .probe = alx_probe, .remove = alx_remove, - .shutdown = alx_shutdown, .err_handler = &alx_err_handlers, .driver.pm = ALX_PM_OPS, }; -- cgit v1.2.3 From cdffcf1bc721032d261a7a9da353b41f0d830410 Mon Sep 17 00:00:00 2001 From: Jim Baxter Date: Tue, 2 Jul 2013 22:52:56 +0100 Subject: net: fec: Add VLAN receive HW support. This enables the driver to take advantage of the FEC VLAN indicator to improve performance. Signed-off-by: Jim Baxter Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 3 ++ drivers/net/ethernet/freescale/fec_main.c | 62 ++++++++++++++++++++++++++----- 2 files changed, 55 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 8362a0399afb..2b0a0ea4f8e7 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -203,6 +203,9 @@ struct bufdesc_ex { #define BD_ENET_RX_CL ((ushort)0x0001) #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ +/* Enhanced buffer descriptor control/status used by Ethernet receive */ +#define BD_ENET_RX_VLAN 0x00000004 + /* Buffer descriptor control/status used by Ethernet transmit. */ #define BD_ENET_TX_READY ((ushort)0x8000) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 1f7ff2268bd0..d3ad5ea711d3 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -54,6 +54,7 @@ #include #include #include +#include #include @@ -90,6 +91,8 @@ static void set_multicast_list(struct net_device *ndev); #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) /* Controller has hardware checksum support */ #define FEC_QUIRK_HAS_CSUM (1 << 5) +/* Controller has hardware vlan support */ +#define FEC_QUIRK_HAS_VLAN (1 << 6) static struct platform_device_id fec_devtype[] = { { @@ -108,7 +111,8 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN, }, { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, @@ -179,11 +183,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) -/* The FEC stores dest/src/type, data, and checksum for receive packets. +/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. */ -#define PKT_MAXBUF_SIZE 1518 +#define PKT_MAXBUF_SIZE 1522 #define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1520 +#define PKT_MAXBLR_SIZE 1536 /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) @@ -806,6 +810,9 @@ fec_enet_rx(struct net_device *ndev, int budget) ushort pkt_len; __u8 *data; int pkt_received = 0; + struct bufdesc_ex *ebdp = NULL; + bool vlan_packet_rcvd = false; + u16 vlan_tag; #ifdef CONFIG_M532x flush_cache_all(); @@ -869,6 +876,24 @@ fec_enet_rx(struct net_device *ndev, int budget) if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); + /* Extract the enhanced buffer descriptor */ + ebdp = NULL; + if (fep->bufdesc_ex) + ebdp = (struct bufdesc_ex *)bdp; + + /* If this is a VLAN packet remove the VLAN Tag */ + vlan_packet_rcvd = false; + if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + /* Push and remove the vlan tag */ + struct vlan_hdr *vlan_header = + (struct vlan_hdr *) (data + ETH_HLEN); + vlan_tag = ntohs(vlan_header->h_vlan_TCI); + pkt_len -= VLAN_HLEN; + + vlan_packet_rcvd = true; + } + /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up @@ -879,9 +904,18 @@ fec_enet_rx(struct net_device *ndev, int budget) if (unlikely(!skb)) { ndev->stats.rx_dropped++; } else { + int payload_offset = (2 * ETH_ALEN); skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len - 4); /* Make room */ - skb_copy_to_linear_data(skb, data, pkt_len - 4); + + /* Extract the frame data without the VLAN header. */ + skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); + if (vlan_packet_rcvd) + payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; + skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), + data + payload_offset, + pkt_len - 4 - (2 * ETH_ALEN)); + skb->protocol = eth_type_trans(skb, ndev); /* Get receive timestamp from the skb */ @@ -889,8 +923,6 @@ fec_enet_rx(struct net_device *ndev, int budget) struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); unsigned long flags; - struct bufdesc_ex *ebdp = - (struct bufdesc_ex *)bdp; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); @@ -901,9 +933,7 @@ fec_enet_rx(struct net_device *ndev, int budget) } if (fep->bufdesc_ex && - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - struct bufdesc_ex *ebdp = - (struct bufdesc_ex *)bdp; + (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { /* don't check it */ skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -912,6 +942,12 @@ fec_enet_rx(struct net_device *ndev, int budget) } } + /* Handle received VLAN packets */ + if (vlan_packet_rcvd) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + if (!skb_defer_rx_timestamp(skb)) napi_gro_receive(&fep->napi, skb); } @@ -1924,6 +1960,12 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); + if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { + /* enable hw VLAN support */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + } + if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { /* enable hw accelerator */ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM -- cgit v1.2.3 From 9caf83c32be1dabf000ab7dc8430fba4f7d89e65 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 3 Jul 2013 12:37:46 +0300 Subject: net/mlx4: fix small memory leak on error "work" needs to be freed before returning on this error path. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 707a7d0d08e1..299d0184f983 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1572,6 +1572,7 @@ int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, vp_admin->default_vlan, &admin_vlan_ix); if (err) { + kfree(work); mlx4_warn((&priv->dev), "No vlan resources slave %d, port %d\n", slave, port); -- cgit v1.2.3 From c9324d1870f3a7d13297ae6b787c567606ab3c89 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Thu, 4 Jul 2013 06:18:07 +0200 Subject: net:stmmac: fix memleak in the open method This patch is to fix a memory leak in the open method, it reviews error conditions freeing the resources previously allocated and not freed in cased of DMA failure. Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2d283d2528f..f2ccb36e8685 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1552,7 +1552,7 @@ static int stmmac_open(struct net_device *dev) if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - goto open_error; + goto phy_error; } } @@ -1566,7 +1566,7 @@ static int stmmac_open(struct net_device *dev) ret = stmmac_init_dma_engine(priv); if (ret < 0) { pr_err("%s: DMA initialization failed\n", __func__); - goto open_error; + goto init_error; } /* Copy the MAC addr into the HW */ @@ -1585,7 +1585,7 @@ static int stmmac_open(struct net_device *dev) if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", __func__, dev->irq, ret); - goto open_error; + goto init_error; } /* Request the Wake IRQ in case of another line is used for WoL */ @@ -1595,7 +1595,7 @@ static int stmmac_open(struct net_device *dev) if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", __func__, priv->wol_irq, ret); - goto open_error_wolirq; + goto wolirq_error; } } @@ -1606,7 +1606,7 @@ static int stmmac_open(struct net_device *dev) if (unlikely(ret < 0)) { pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", __func__, priv->lpi_irq, ret); - goto open_error_lpiirq; + goto lpiirq_error; } } @@ -1664,17 +1664,17 @@ static int stmmac_open(struct net_device *dev) return 0; -open_error_lpiirq: +lpiirq_error: if (priv->wol_irq != dev->irq) free_irq(priv->wol_irq, dev); - -open_error_wolirq: +wolirq_error: free_irq(dev->irq, dev); -open_error: +init_error: + free_dma_desc_resources(priv); if (priv->phydev) phy_disconnect(priv->phydev); - +phy_error: clk_disable_unprepare(priv->stmmac_clk); return ret; -- cgit v1.2.3 From d741434c3e0ae5592b72a0f4833a42b4be1b2fa1 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 4 Jul 2013 10:35:35 +0100 Subject: dt:net:stmmac: Allocate platform data only if its NULL. In some DT use-cases platform data might be already allocated and passed via AUXDATA. These are the cases where machine level code populates few callbacks in the platform data. This patch adds check and reuses platform_data if its valid, before allocating a new one. Signed-off-by: Srinivas Kandagatla Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 17bc7827e7ca..adfb9a3ce6c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -92,8 +92,10 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) if (IS_ERR(addr)) return PTR_ERR(addr); + plat_dat = pdev->dev.platform_data; if (pdev->dev.of_node) { - plat_dat = devm_kzalloc(&pdev->dev, + if (!plat_dat) + plat_dat = devm_kzalloc(&pdev->dev, sizeof(struct plat_stmmacenet_data), GFP_KERNEL); if (!plat_dat) { @@ -106,8 +108,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) pr_err("%s: main dt probe failed", __func__); return ret; } - } else { - plat_dat = pdev->dev.platform_data; } /* Custom initialisation (if needed)*/ -- cgit v1.2.3 From 25c83b5c2e82560406d5265f42cd147f1eb441d0 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 4 Jul 2013 10:35:41 +0100 Subject: dt:net:stmmac: Add support to dwmac version 3.610 and 3.710 This patch adds dt support to dwmac version 3.610 and 3.710 these versions are integrated in STiH415 and STiH416 ARM A9 SOCs. To support these IP version, some of the device tree properties are extended. Signed-off-by: Srinivas Kandagatla Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 4 ++++ .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 26 ++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 060bbf098ef3..e1ddfcc46a4e 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -12,6 +12,10 @@ Required properties: property - phy-mode: String, operation mode of the PHY interface. Supported values are: "mii", "rmii", "gmii", "rgmii". +- snps,phy-addr phy address to connect to. +- snps,pbl Programmable Burst Length +- snps,fixed-burst Program the DMA to use the fixed burst mode +- snps,mixed-burst Program the DMA to use the mixed burst mode Optional properties: - mac-address: 6 bytes, mac address diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index adfb9a3ce6c5..03de76c7a177 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -34,12 +34,20 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { struct device_node *np = pdev->dev.of_node; + struct stmmac_dma_cfg *dma_cfg; if (!np) return -ENODEV; *mac = of_get_mac_address(np); plat->interface = of_get_phy_mode(np); + + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + + of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr); + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); @@ -56,6 +64,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, plat->pmt = 1; } + if (of_device_is_compatible(np, "snps,dwmac-3.610") || + of_device_is_compatible(np, "snps,dwmac-3.710")) { + plat->enh_desc = 1; + plat->bugged_jumbo = 1; + plat->force_sf_dma_mode = 1; + } + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); + return 0; } #else @@ -228,7 +252,9 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops; static const struct of_device_id stmmac_dt_ids[] = { { .compatible = "st,spear600-gmac"}, + { .compatible = "snps,dwmac-3.610"}, { .compatible = "snps,dwmac-3.70a"}, + { .compatible = "snps,dwmac-3.710"}, { .compatible = "snps,dwmac"}, { /* sentinel */ } }; -- cgit v1.2.3 From 0e0764715d8116484d808f5b3985ca043080788e Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 4 Jul 2013 10:35:48 +0100 Subject: dt:net:stmmac: Add dt specific phy reset callback support. This patch adds phy reset callback support for stmmac driver via device trees. It adds three new properties to gmac device tree bindings to define the reset signal via gpio. With this patch users can conveniently pass reset gpio number with pre, pulse and post delay in micro secs via DTs. active low: _________ ____________ | | | | |_______________| active high: ________________ | | | | ________| |___________ Signed-off-by: Srinivas Kandagatla Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 6 +++ drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 48 ++++++++++++++++++++++- include/linux/stmmac.h | 4 ++ 3 files changed, 56 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index e1ddfcc46a4e..261c563b5f06 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -13,6 +13,12 @@ Required properties: - phy-mode: String, operation mode of the PHY interface. Supported values are: "mii", "rmii", "gmii", "rgmii". - snps,phy-addr phy address to connect to. +- snps,reset-gpio gpio number for phy reset. +- snps,reset-active-low boolean flag to indicate if phy reset is active low. +- snps,reset-delays-us is triplet of delays + The 1st cell is reset pre-delay in micro seconds. + The 2nd cell is reset pulse in micro seconds. + The 3rd cell is reset post-delay in micro seconds. - snps,pbl Programmable Burst Length - snps,fixed-burst Program the DMA to use the fixed burst mode - snps,mixed-burst Program the DMA to use the mixed burst mode diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index cc15039eaa47..fe7bc9903867 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -27,6 +27,9 @@ #include #include #include +#include +#include + #include #include "stmmac.h" @@ -131,10 +134,46 @@ static int stmmac_mdio_reset(struct mii_bus *bus) struct net_device *ndev = bus->priv; struct stmmac_priv *priv = netdev_priv(ndev); unsigned int mii_address = priv->hw->mii.addr; + struct stmmac_mdio_bus_data *data = priv->plat->mdio_bus_data; + +#ifdef CONFIG_OF + if (priv->device->of_node) { + int reset_gpio, active_low; + + if (data->reset_gpio < 0) { + struct device_node *np = priv->device->of_node; + if (!np) + return 0; + + data->reset_gpio = of_get_named_gpio(np, + "snps,reset-gpio", 0); + if (data->reset_gpio < 0) + return 0; + + data->active_low = of_property_read_bool(np, + "snps,reset-active-low"); + of_property_read_u32_array(np, + "snps,reset-delays-us", data->delays, 3); + } + + reset_gpio = data->reset_gpio; + active_low = data->active_low; + + if (!gpio_request(reset_gpio, "mdio-reset")) { + gpio_direction_output(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[0]); + gpio_set_value(reset_gpio, active_low ? 0 : 1); + udelay(data->delays[1]); + gpio_set_value(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[2]); + gpio_free(reset_gpio); + } + } +#endif - if (priv->plat->mdio_bus_data->phy_reset) { + if (data->phy_reset) { pr_debug("stmmac_mdio_reset: calling phy_reset\n"); - priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv); + data->phy_reset(priv->plat->bsp_priv); } /* This is a workaround for problems with the STE101P PHY. @@ -172,6 +211,11 @@ int stmmac_mdio_register(struct net_device *ndev) else irqlist = priv->mii_irq; +#ifdef CONFIG_OF + if (priv->device->of_node) + mdio_bus_data->reset_gpio = -1; +#endif + new_bus->name = "stmmac"; new_bus->read = &stmmac_mdio_read; new_bus->write = &stmmac_mdio_write; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c1b3ed3fb787..9e495d31516e 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -80,6 +80,10 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; int *irqs; int probed_phy_irq; +#ifdef CONFIG_OF + int reset_gpio, active_low; + u32 delays[3]; +#endif }; struct stmmac_dma_cfg { -- cgit v1.2.3 From 734d4e159b283a4ae4d007b7e7a91d84398ccb92 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 4 Jul 2013 23:48:46 +0100 Subject: sfc: Fix memory leak when discarding scattered packets Commit 2768935a4660 ('sfc: reuse pages to avoid DMA mapping/unmapping costs') did not fully take account of DMA scattering which was introduced immediately before. If a received packet is invalid and must be discarded, we only drop a reference to the first buffer's page, but we need to drop a reference for each buffer the packet used. I think this bug was missed partly because efx_recycle_rx_buffers() was not renamed and so no longer does what its name says. It does not change the state of buffers, but only prepares the underlying pages for recycling. Rename it accordingly. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/rx.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 65646cd7af8e..6af9cfda50fb 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -282,9 +282,9 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, } /* Recycle the pages that are used by buffers that have just been received. */ -static void efx_recycle_rx_buffers(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - unsigned int n_frags) +static void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); @@ -294,6 +294,20 @@ static void efx_recycle_rx_buffers(struct efx_channel *channel, } while (--n_frags); } +static void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + efx_recycle_rx_pages(channel, rx_buf, n_frags); + + do { + efx_free_rx_buffer(rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +} + /** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue @@ -533,8 +547,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, */ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { efx_rx_flush_packet(channel); - put_page(rx_buf->page); - efx_recycle_rx_buffers(channel, rx_buf, n_frags); + efx_discard_rx_packet(channel, rx_buf, n_frags); return; } @@ -570,9 +583,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); } - /* All fragments have been DMA-synced, so recycle buffers and pages. */ + /* All fragments have been DMA-synced, so recycle pages. */ rx_buf = efx_rx_buffer(rx_queue, index); - efx_recycle_rx_buffers(channel, rx_buf, n_frags); + efx_recycle_rx_pages(channel, rx_buf, n_frags); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. -- cgit v1.2.3 From e126ba97dba9edeb6fafa3665b5f8497fc9cdf8c Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Sun, 7 Jul 2013 17:25:49 +0300 Subject: mlx5: Add driver for Mellanox Connect-IB adapters The driver is comprised of two kernel modules: mlx5_ib and mlx5_core. This partitioning resembles what we have for mlx4, except that mlx5_ib is the pci device driver and not mlx5_core. mlx5_core is essentially a library that provides general functionality that is intended to be used by other Mellanox devices that will be introduced in the future. mlx5_ib has a similar role as any hardware device under drivers/infiniband/hw. Signed-off-by: Eli Cohen Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz [ Merge in coccinelle fixes from Fengguang Wu . - Roland ] Signed-off-by: Roland Dreier --- MAINTAINERS | 22 + drivers/infiniband/Kconfig | 1 + drivers/infiniband/Makefile | 1 + drivers/infiniband/hw/mlx5/Kconfig | 10 + drivers/infiniband/hw/mlx5/Makefile | 3 + drivers/infiniband/hw/mlx5/ah.c | 92 + drivers/infiniband/hw/mlx5/cq.c | 843 +++++++ drivers/infiniband/hw/mlx5/doorbell.c | 100 + drivers/infiniband/hw/mlx5/mad.c | 139 ++ drivers/infiniband/hw/mlx5/main.c | 1504 ++++++++++++ drivers/infiniband/hw/mlx5/mem.c | 162 ++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 545 +++++ drivers/infiniband/hw/mlx5/mr.c | 1007 ++++++++ drivers/infiniband/hw/mlx5/qp.c | 2524 ++++++++++++++++++++ drivers/infiniband/hw/mlx5/srq.c | 473 ++++ drivers/infiniband/hw/mlx5/user.h | 121 + drivers/net/ethernet/mellanox/Kconfig | 1 + drivers/net/ethernet/mellanox/Makefile | 1 + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 18 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 5 + drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 238 ++ drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 1515 ++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/cq.c | 224 ++ drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 587 +++++ drivers/net/ethernet/mellanox/mlx5/core/eq.c | 521 ++++ drivers/net/ethernet/mellanox/mlx5/core/fw.c | 185 ++ drivers/net/ethernet/mellanox/mlx5/core/health.c | 217 ++ drivers/net/ethernet/mellanox/mlx5/core/mad.c | 78 + drivers/net/ethernet/mellanox/mlx5/core/main.c | 475 ++++ drivers/net/ethernet/mellanox/mlx5/core/mcg.c | 106 + .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 73 + drivers/net/ethernet/mellanox/mlx5/core/mr.c | 136 ++ .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 435 ++++ drivers/net/ethernet/mellanox/mlx5/core/pd.c | 101 + drivers/net/ethernet/mellanox/mlx5/core/port.c | 104 + drivers/net/ethernet/mellanox/mlx5/core/qp.c | 301 +++ drivers/net/ethernet/mellanox/mlx5/core/srq.c | 223 ++ drivers/net/ethernet/mellanox/mlx5/core/uar.c | 223 ++ include/linux/mlx5/cmd.h | 51 + include/linux/mlx5/cq.h | 165 ++ include/linux/mlx5/device.h | 893 +++++++ include/linux/mlx5/doorbell.h | 79 + include/linux/mlx5/driver.h | 769 ++++++ include/linux/mlx5/qp.h | 467 ++++ include/linux/mlx5/srq.h | 41 + 45 files changed, 15779 insertions(+) create mode 100644 drivers/infiniband/hw/mlx5/Kconfig create mode 100644 drivers/infiniband/hw/mlx5/Makefile create mode 100644 drivers/infiniband/hw/mlx5/ah.c create mode 100644 drivers/infiniband/hw/mlx5/cq.c create mode 100644 drivers/infiniband/hw/mlx5/doorbell.c create mode 100644 drivers/infiniband/hw/mlx5/mad.c create mode 100644 drivers/infiniband/hw/mlx5/main.c create mode 100644 drivers/infiniband/hw/mlx5/mem.c create mode 100644 drivers/infiniband/hw/mlx5/mlx5_ib.h create mode 100644 drivers/infiniband/hw/mlx5/mr.c create mode 100644 drivers/infiniband/hw/mlx5/qp.c create mode 100644 drivers/infiniband/hw/mlx5/srq.c create mode 100644 drivers/infiniband/hw/mlx5/user.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/Kconfig create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/alloc.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/cmd.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/cq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fw.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/health.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mad.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/main.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mcg.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mr.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/pd.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/port.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/qp.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/srq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/uar.c create mode 100644 include/linux/mlx5/cmd.h create mode 100644 include/linux/mlx5/cq.h create mode 100644 include/linux/mlx5/device.h create mode 100644 include/linux/mlx5/doorbell.h create mode 100644 include/linux/mlx5/driver.h create mode 100644 include/linux/mlx5/qp.h create mode 100644 include/linux/mlx5/srq.h (limited to 'drivers/net/ethernet') diff --git a/MAINTAINERS b/MAINTAINERS index 60d6a3393500..b4265364ed27 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5365,6 +5365,28 @@ W: http://linuxtv.org S: Odd Fixes F: drivers/media/radio/radio-miropcm20* +Mellanox MLX5 core VPI driver +M: Eli Cohen +L: netdev@vger.kernel.org +L: linux-rdma@vger.kernel.org +W: http://www.mellanox.com +Q: http://patchwork.ozlabs.org/project/netdev/list/ +Q: http://patchwork.kernel.org/project/linux-rdma/list/ +T: git://openfabrics.org/~eli/connect-ib.git +S: Supported +F: drivers/net/ethernet/mellanox/mlx5/core/ +F: include/linux/mlx5/ + +Mellanox MLX5 IB driver +M: Eli Cohen +L: linux-rdma@vger.kernel.org +W: http://www.mellanox.com +Q: http://patchwork.kernel.org/project/linux-rdma/list/ +T: git://openfabrics.org/~eli/connect-ib.git +S: Supported +F: include/linux/mlx5/ +F: drivers/infiniband/hw/mlx5/ + MODULE SUPPORT M: Rusty Russell S: Maintained diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index c85b56c28099..5ceda710f516 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -50,6 +50,7 @@ source "drivers/infiniband/hw/amso1100/Kconfig" source "drivers/infiniband/hw/cxgb3/Kconfig" source "drivers/infiniband/hw/cxgb4/Kconfig" source "drivers/infiniband/hw/mlx4/Kconfig" +source "drivers/infiniband/hw/mlx5/Kconfig" source "drivers/infiniband/hw/nes/Kconfig" source "drivers/infiniband/hw/ocrdma/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index b126fefe0b1c..1fe69888515f 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ +obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/ obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig new file mode 100644 index 000000000000..8e6aebfaf8a4 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -0,0 +1,10 @@ +config MLX5_INFINIBAND + tristate "Mellanox Connect-IB HCA support" + depends on NETDEVICES && ETHERNET && PCI && X86 + select NET_VENDOR_MELLANOX + select MLX5_CORE + ---help--- + This driver provides low-level InfiniBand support for + Mellanox Connect-IB PCI Express host channel adapters (HCAs). + This is required to use InfiniBand protocols such as + IP-over-IB or SRP with these devices. diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile new file mode 100644 index 000000000000..4ea0135af484 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o + +mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c new file mode 100644 index 000000000000..39ab0caefdf9 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx5_ib.h" + +struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, + struct mlx5_ib_ah *ah) +{ + if (ah_attr->ah_flags & IB_AH_GRH) { + memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16); + ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label | + (1 << 30) | + ah_attr->grh.sgid_index << 20); + ah->av.hop_limit = ah_attr->grh.hop_limit; + ah->av.tclass = ah_attr->grh.traffic_class; + } + + ah->av.rlid = cpu_to_be16(ah_attr->dlid); + ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f; + ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf); + + return &ah->ibah; +} + +struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +{ + struct mlx5_ib_ah *ah; + + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + return create_ib_ah(ah_attr, ah); /* never fails */ +} + +int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) +{ + struct mlx5_ib_ah *ah = to_mah(ibah); + u32 tmp; + + memset(ah_attr, 0, sizeof(*ah_attr)); + + tmp = be32_to_cpu(ah->av.grh_gid_fl); + if (tmp & (1 << 30)) { + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->grh.sgid_index = (tmp >> 20) & 0xff; + ah_attr->grh.flow_label = tmp & 0xfffff; + memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16); + ah_attr->grh.hop_limit = ah->av.hop_limit; + ah_attr->grh.traffic_class = ah->av.tclass; + } + ah_attr->dlid = be16_to_cpu(ah->av.rlid); + ah_attr->static_rate = ah->av.stat_rate_sl >> 4; + ah_attr->sl = ah->av.stat_rate_sl & 0xf; + + return 0; +} + +int mlx5_ib_destroy_ah(struct ib_ah *ah) +{ + kfree(to_mah(ah)); + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c new file mode 100644 index 000000000000..344ab03948a3 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -0,0 +1,843 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_ib.h" +#include "user.h" + +static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) +{ + struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; + + ibcq->comp_handler(ibcq, ibcq->cq_context); +} + +static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) +{ + struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct ib_cq *ibcq = &cq->ibcq; + struct ib_event event; + + if (type != MLX5_EVENT_TYPE_CQ_ERROR) { + mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n", + type, mcq->cqn); + return; + } + + if (ibcq->event_handler) { + event.device = &dev->ib_dev; + event.event = IB_EVENT_CQ_ERR; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } +} + +static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size) +{ + return mlx5_buf_offset(&buf->buf, n * size); +} + +static void *get_cqe(struct mlx5_ib_cq *cq, int n) +{ + return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); +} + +static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) +{ + void *cqe = get_cqe(cq, n & cq->ibcq.cqe); + struct mlx5_cqe64 *cqe64; + + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ + !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; +} + +static void *next_cqe_sw(struct mlx5_ib_cq *cq) +{ + return get_sw_cqe(cq, cq->mcq.cons_index); +} + +static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) +{ + switch (wq->wr_data[idx]) { + case MLX5_IB_WR_UMR: + return 0; + + case IB_WR_LOCAL_INV: + return IB_WC_LOCAL_INV; + + case IB_WR_FAST_REG_MR: + return IB_WC_FAST_REG_MR; + + default: + pr_warn("unknown completion status\n"); + return 0; + } +} + +static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + struct mlx5_ib_wq *wq, int idx) +{ + wc->wc_flags = 0; + switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { + case MLX5_OPCODE_RDMA_WRITE_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + case MLX5_OPCODE_RDMA_WRITE: + wc->opcode = IB_WC_RDMA_WRITE; + break; + case MLX5_OPCODE_SEND_IMM: + wc->wc_flags |= IB_WC_WITH_IMM; + case MLX5_OPCODE_SEND: + case MLX5_OPCODE_SEND_INVAL: + wc->opcode = IB_WC_SEND; + break; + case MLX5_OPCODE_RDMA_READ: + wc->opcode = IB_WC_RDMA_READ; + wc->byte_len = be32_to_cpu(cqe->byte_cnt); + break; + case MLX5_OPCODE_ATOMIC_CS: + wc->opcode = IB_WC_COMP_SWAP; + wc->byte_len = 8; + break; + case MLX5_OPCODE_ATOMIC_FA: + wc->opcode = IB_WC_FETCH_ADD; + wc->byte_len = 8; + break; + case MLX5_OPCODE_ATOMIC_MASKED_CS: + wc->opcode = IB_WC_MASKED_COMP_SWAP; + wc->byte_len = 8; + break; + case MLX5_OPCODE_ATOMIC_MASKED_FA: + wc->opcode = IB_WC_MASKED_FETCH_ADD; + wc->byte_len = 8; + break; + case MLX5_OPCODE_BIND_MW: + wc->opcode = IB_WC_BIND_MW; + break; + case MLX5_OPCODE_UMR: + wc->opcode = get_umr_comp(wq, idx); + break; + } +} + +enum { + MLX5_GRH_IN_BUFFER = 1, + MLX5_GRH_IN_CQE = 2, +}; + +static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, + struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); + struct mlx5_ib_srq *srq; + struct mlx5_ib_wq *wq; + u16 wqe_ctr; + u8 g; + + if (qp->ibqp.srq || qp->ibqp.xrcd) { + struct mlx5_core_srq *msrq = NULL; + + if (qp->ibqp.xrcd) { + msrq = mlx5_core_get_srq(&dev->mdev, + be32_to_cpu(cqe->srqn)); + srq = to_mibsrq(msrq); + } else { + srq = to_msrq(qp->ibqp.srq); + } + if (srq) { + wqe_ctr = be16_to_cpu(cqe->wqe_counter); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx5_ib_free_srq_wqe(srq, wqe_ctr); + if (msrq && atomic_dec_and_test(&msrq->refcount)) + complete(&msrq->free); + } + } else { + wq = &qp->rq; + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + ++wq->tail; + } + wc->byte_len = be32_to_cpu(cqe->byte_cnt); + + switch (cqe->op_own >> 4) { + case MLX5_CQE_RESP_WR_IMM: + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; + wc->wc_flags = IB_WC_WITH_IMM; + wc->ex.imm_data = cqe->imm_inval_pkey; + break; + case MLX5_CQE_RESP_SEND: + wc->opcode = IB_WC_RECV; + wc->wc_flags = 0; + break; + case MLX5_CQE_RESP_SEND_IMM: + wc->opcode = IB_WC_RECV; + wc->wc_flags = IB_WC_WITH_IMM; + wc->ex.imm_data = cqe->imm_inval_pkey; + break; + case MLX5_CQE_RESP_SEND_INV: + wc->opcode = IB_WC_RECV; + wc->wc_flags = IB_WC_WITH_INVALIDATE; + wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); + break; + } + wc->slid = be16_to_cpu(cqe->slid); + wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; + wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; + wc->dlid_path_bits = cqe->ml_path; + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; + wc->wc_flags |= g ? IB_WC_GRH : 0; + wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; +} + +static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) +{ + __be32 *p = (__be32 *)cqe; + int i; + + mlx5_ib_warn(dev, "dump error cqe\n"); + for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) + pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), + be32_to_cpu(p[3])); +} + +static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, + struct mlx5_err_cqe *cqe, + struct ib_wc *wc) +{ + int dump = 1; + + switch (cqe->syndrome) { + case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR: + wc->status = IB_WC_LOC_LEN_ERR; + break; + case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR: + wc->status = IB_WC_LOC_QP_OP_ERR; + break; + case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR: + wc->status = IB_WC_LOC_PROT_ERR; + break; + case MLX5_CQE_SYNDROME_WR_FLUSH_ERR: + dump = 0; + wc->status = IB_WC_WR_FLUSH_ERR; + break; + case MLX5_CQE_SYNDROME_MW_BIND_ERR: + wc->status = IB_WC_MW_BIND_ERR; + break; + case MLX5_CQE_SYNDROME_BAD_RESP_ERR: + wc->status = IB_WC_BAD_RESP_ERR; + break; + case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR: + wc->status = IB_WC_LOC_ACCESS_ERR; + break; + case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: + wc->status = IB_WC_REM_INV_REQ_ERR; + break; + case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR: + wc->status = IB_WC_REM_ACCESS_ERR; + break; + case MLX5_CQE_SYNDROME_REMOTE_OP_ERR: + wc->status = IB_WC_REM_OP_ERR; + break; + case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: + wc->status = IB_WC_RETRY_EXC_ERR; + dump = 0; + break; + case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR: + wc->status = IB_WC_RNR_RETRY_EXC_ERR; + dump = 0; + break; + case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR: + wc->status = IB_WC_REM_ABORT_ERR; + break; + default: + wc->status = IB_WC_GENERAL_ERR; + break; + } + + wc->vendor_err = cqe->vendor_err_synd; + if (dump) + dump_cqe(dev, cqe); +} + +static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) +{ + /* TBD: waiting decision + */ + return 0; +} + +static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) +{ + struct mlx5_wqe_data_seg *dpseg; + void *addr; + + dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_atomic_seg); + addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); + return addr; +} + +static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, + uint16_t idx) +{ + void *addr; + int byte_count; + int i; + + if (!is_atomic_response(qp, idx)) + return; + + byte_count = be32_to_cpu(cqe64->byte_cnt); + addr = mlx5_get_atomic_laddr(qp, idx); + + if (byte_count == 4) { + *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr)); + } else { + for (i = 0; i < byte_count; i += 8) { + *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); + addr += 8; + } + } + + return; +} + +static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, + u16 tail, u16 head) +{ + int idx; + + do { + idx = tail & (qp->sq.wqe_cnt - 1); + handle_atomic(qp, cqe64, idx); + if (idx == head) + break; + + tail = qp->sq.w_list[idx].next; + } while (1); + tail = qp->sq.w_list[idx].next; + qp->sq.last_poll = tail; +} + +static int mlx5_poll_one(struct mlx5_ib_cq *cq, + struct mlx5_ib_qp **cur_qp, + struct ib_wc *wc) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct mlx5_err_cqe *err_cqe; + struct mlx5_cqe64 *cqe64; + struct mlx5_core_qp *mqp; + struct mlx5_ib_wq *wq; + uint8_t opcode; + uint32_t qpn; + u16 wqe_ctr; + void *cqe; + int idx; + + cqe = next_cqe_sw(cq); + if (!cqe) + return -EAGAIN; + + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + + ++cq->mcq.cons_index; + + /* Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + /* TBD: resize CQ */ + + qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; + if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + /* We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + mqp = __mlx5_qp_lookup(&dev->mdev, qpn); + if (unlikely(!mqp)) { + mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", + cq->mcq.cqn, qpn); + return -EINVAL; + } + + *cur_qp = to_mibqp(mqp); + } + + wc->qp = &(*cur_qp)->ibqp; + opcode = cqe64->op_own >> 4; + switch (opcode) { + case MLX5_CQE_REQ: + wq = &(*cur_qp)->sq; + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + idx = wqe_ctr & (wq->wqe_cnt - 1); + handle_good_req(wc, cqe64, wq, idx); + handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + wc->status = IB_WC_SUCCESS; + break; + case MLX5_CQE_RESP_WR_IMM: + case MLX5_CQE_RESP_SEND: + case MLX5_CQE_RESP_SEND_IMM: + case MLX5_CQE_RESP_SEND_INV: + handle_responder(wc, cqe64, *cur_qp); + wc->status = IB_WC_SUCCESS; + break; + case MLX5_CQE_RESIZE_CQ: + break; + case MLX5_CQE_REQ_ERR: + case MLX5_CQE_RESP_ERR: + err_cqe = (struct mlx5_err_cqe *)cqe64; + mlx5_handle_error_cqe(dev, err_cqe, wc); + mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n", + opcode == MLX5_CQE_REQ_ERR ? + "Requestor" : "Responder", cq->mcq.cqn); + mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", + err_cqe->syndrome, err_cqe->vendor_err_synd); + if (opcode == MLX5_CQE_REQ_ERR) { + wq = &(*cur_qp)->sq; + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + idx = wqe_ctr & (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + } else { + struct mlx5_ib_srq *srq; + + if ((*cur_qp)->ibqp.srq) { + srq = to_msrq((*cur_qp)->ibqp.srq); + wqe_ctr = be16_to_cpu(cqe64->wqe_counter); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx5_ib_free_srq_wqe(srq, wqe_ctr); + } else { + wq = &(*cur_qp)->rq; + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + ++wq->tail; + } + } + break; + } + + return 0; +} + +int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct mlx5_ib_cq *cq = to_mcq(ibcq); + struct mlx5_ib_qp *cur_qp = NULL; + unsigned long flags; + int npolled; + int err = 0; + + spin_lock_irqsave(&cq->lock, flags); + + for (npolled = 0; npolled < num_entries; npolled++) { + err = mlx5_poll_one(cq, &cur_qp, wc + npolled); + if (err) + break; + } + + if (npolled) + mlx5_cq_set_ci(&cq->mcq); + + spin_unlock_irqrestore(&cq->lock, flags); + + if (err == 0 || err == -EAGAIN) + return npolled; + else + return err; +} + +int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + mlx5_cq_arm(&to_mcq(ibcq)->mcq, + (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? + MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, + to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map, + MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock)); + + return 0; +} + +static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, + int nent, int cqe_size) +{ + int err; + + err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size, + PAGE_SIZE * 2, &buf->buf); + if (err) + return err; + + buf->cqe_size = cqe_size; + + return 0; +} + +static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) +{ + mlx5_buf_free(&dev->mdev, &buf->buf); +} + +static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, + struct ib_ucontext *context, struct mlx5_ib_cq *cq, + int entries, struct mlx5_create_cq_mbox_in **cqb, + int *cqe_size, int *index, int *inlen) +{ + struct mlx5_ib_create_cq ucmd; + int page_shift; + int npages; + int ncont; + int err; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) + return -EINVAL; + + *cqe_size = ucmd.cqe_size; + + cq->buf.umem = ib_umem_get(context, ucmd.buf_addr, + entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(cq->buf.umem)) { + err = PTR_ERR(cq->buf.umem); + return err; + } + + err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr, + &cq->db); + if (err) + goto err_umem; + + mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", + ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont; + *cqb = mlx5_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_db; + } + mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0); + (*cqb)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + + *index = to_mucontext(context)->uuari.uars[0].index; + + return 0; + +err_db: + mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + +err_umem: + ib_umem_release(cq->buf.umem); + return err; +} + +static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) +{ + mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); + ib_umem_release(cq->buf.umem); +} + +static void init_cq_buf(struct mlx5_ib_cq *cq, int nent) +{ + int i; + void *cqe; + struct mlx5_cqe64 *cqe64; + + for (i = 0; i < nent; i++) { + cqe = get_cqe(cq, i); + cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64; + cqe64->op_own = 0xf1; + } +} + +static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + int entries, int cqe_size, + struct mlx5_create_cq_mbox_in **cqb, + int *index, int *inlen) +{ + int err; + + err = mlx5_db_alloc(&dev->mdev, &cq->db); + if (err) + return err; + + cq->mcq.set_ci_db = cq->db.db; + cq->mcq.arm_db = cq->db.db + 1; + *cq->mcq.set_ci_db = 0; + *cq->mcq.arm_db = 0; + cq->mcq.cqe_sz = cqe_size; + + err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + if (err) + goto err_db; + + init_cq_buf(cq, entries); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; + *cqb = mlx5_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_buf; + } + mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); + + (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - PAGE_SHIFT; + *index = dev->mdev.priv.uuari.uars[0].index; + + return 0; + +err_buf: + free_cq_buf(dev, &cq->buf); + +err_db: + mlx5_db_free(&dev->mdev, &cq->db); + return err; +} + +static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) +{ + free_cq_buf(dev, &cq->buf); + mlx5_db_free(&dev->mdev, &cq->db); +} + +struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx5_create_cq_mbox_in *cqb = NULL; + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_ib_cq *cq; + int uninitialized_var(index); + int uninitialized_var(inlen); + int cqe_size; + int irqn; + int eqn; + int err; + + entries = roundup_pow_of_two(entries + 1); + if (entries < 1 || entries > dev->mdev.caps.max_cqes) + return ERR_PTR(-EINVAL); + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return ERR_PTR(-ENOMEM); + + cq->ibcq.cqe = entries - 1; + mutex_init(&cq->resize_mutex); + spin_lock_init(&cq->lock); + cq->resize_buf = NULL; + cq->resize_umem = NULL; + + if (context) { + err = create_cq_user(dev, udata, context, cq, entries, + &cqb, &cqe_size, &index, &inlen); + if (err) + goto err_create; + } else { + /* for now choose 64 bytes till we have a proper interface */ + cqe_size = 64; + err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, + &index, &inlen); + if (err) + goto err_create; + } + + cq->cqe_size = cqe_size; + cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; + cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); + err = mlx5_vector2eqn(dev, vector, &eqn, &irqn); + if (err) + goto err_cqb; + + cqb->ctx.c_eqn = cpu_to_be16(eqn); + cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); + + err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen); + if (err) + goto err_cqb; + + mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); + cq->mcq.irqn = irqn; + cq->mcq.comp = mlx5_ib_cq_comp; + cq->mcq.event = mlx5_ib_cq_event; + + if (context) + if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { + err = -EFAULT; + goto err_cmd; + } + + + mlx5_vfree(cqb); + return &cq->ibcq; + +err_cmd: + mlx5_core_destroy_cq(&dev->mdev, &cq->mcq); + +err_cqb: + mlx5_vfree(cqb); + if (context) + destroy_cq_user(cq, context); + else + destroy_cq_kernel(dev, cq); + +err_create: + kfree(cq); + + return ERR_PTR(err); +} + + +int mlx5_ib_destroy_cq(struct ib_cq *cq) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->device); + struct mlx5_ib_cq *mcq = to_mcq(cq); + struct ib_ucontext *context = NULL; + + if (cq->uobject) + context = cq->uobject->context; + + mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq); + if (context) + destroy_cq_user(mcq, context); + else + destroy_cq_kernel(dev, mcq); + + kfree(mcq); + + return 0; +} + +static int is_equal_rsn(struct mlx5_cqe64 *cqe64, struct mlx5_ib_srq *srq, + u32 rsn) +{ + u32 lrsn; + + if (srq) + lrsn = be32_to_cpu(cqe64->srqn) & 0xffffff; + else + lrsn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff; + + return rsn == lrsn; +} + +void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) +{ + struct mlx5_cqe64 *cqe64, *dest64; + void *cqe, *dest; + u32 prod_index; + int nfreed = 0; + u8 owner_bit; + + if (!cq) + return; + + /* First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) + if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) + break; + + /* Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { + cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + if (is_equal_rsn(cqe64, srq, rsn)) { + if (srq) + mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); + ++nfreed; + } else if (nfreed) { + dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); + dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; + owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK; + memcpy(dest, cqe, cq->mcq.cqe_sz); + dest64->op_own = owner_bit | + (dest64->op_own & ~MLX5_CQE_OWNER_MASK); + } + } + + if (nfreed) { + cq->mcq.cons_index += nfreed; + /* Make sure update of buffer contents is done before + * updating consumer index. + */ + wmb(); + mlx5_cq_set_ci(&cq->mcq); + } +} + +void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) +{ + if (!cq) + return; + + spin_lock_irq(&cq->lock); + __mlx5_ib_cq_clean(cq, qpn, srq); + spin_unlock_irq(&cq->lock); +} + +int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) +{ + return -ENOSYS; +} + +int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) +{ + return -ENOSYS; +} + +int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) +{ + struct mlx5_ib_cq *cq; + + if (!ibcq) + return 128; + + cq = to_mcq(ibcq); + return cq->cqe_size; +} diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c new file mode 100644 index 000000000000..256a23344f28 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "mlx5_ib.h" + +struct mlx5_ib_user_db_page { + struct list_head list; + struct ib_umem *umem; + unsigned long user_virt; + int refcnt; +}; + +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, + struct mlx5_db *db) +{ + struct mlx5_ib_user_db_page *page; + struct ib_umem_chunk *chunk; + int err = 0; + + mutex_lock(&context->db_page_mutex); + + list_for_each_entry(page, &context->db_page_list, list) + if (page->user_virt == (virt & PAGE_MASK)) + goto found; + + page = kmalloc(sizeof(*page), GFP_KERNEL); + if (!page) { + err = -ENOMEM; + goto out; + } + + page->user_virt = (virt & PAGE_MASK); + page->refcnt = 0; + page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, + PAGE_SIZE, 0, 0); + if (IS_ERR(page->umem)) { + err = PTR_ERR(page->umem); + kfree(page); + goto out; + } + + list_add(&page->list, &context->db_page_list); + +found: + chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); + db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); + db->u.user_page = page; + ++page->refcnt; + +out: + mutex_unlock(&context->db_page_mutex); + + return err; +} + +void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) +{ + mutex_lock(&context->db_page_mutex); + + if (!--db->u.user_page->refcnt) { + list_del(&db->u.user_page->list); + ib_umem_release(db->u.user_page->umem); + kfree(db->u.user_page); + } + + mutex_unlock(&context->db_page_mutex); +} diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c new file mode 100644 index 000000000000..5c8938be0e08 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_ib.h" + +enum { + MLX5_IB_VENDOR_CLASS1 = 0x9, + MLX5_IB_VENDOR_CLASS2 = 0xa +}; + +int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad) +{ + u8 op_modifier = 0; + + /* Key check traps can't be generated unless we have in_wc to + * tell us where to send the trap. + */ + if (ignore_mkey || !in_wc) + op_modifier |= 0x1; + if (ignore_bkey || !in_wc) + op_modifier |= 0x2; + + return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port); +} + +int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + u16 slid; + int err; + + slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) + return IB_MAD_RESULT_SUCCESS; + + /* Don't process SMInfo queries -- the SMA can't handle them. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) + return IB_MAD_RESULT_SUCCESS; + } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || + in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || + in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) + return IB_MAD_RESULT_SUCCESS; + } else { + return IB_MAD_RESULT_SUCCESS; + } + + err = mlx5_MAD_IFC(to_mdev(ibdev), + mad_flags & IB_MAD_IGNORE_MKEY, + mad_flags & IB_MAD_IGNORE_BKEY, + port_num, in_wc, in_grh, in_mad, out_mad); + if (err) + return IB_MAD_RESULT_FAILURE; + + /* set return bit in status of directed route responses */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) + /* no response for trap repress */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u16 packet_error; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + + packet_error = be16_to_cpu(out_mad->status); + + dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ? + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c new file mode 100644 index 000000000000..6b1007f9bc29 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/main.c @@ -0,0 +1,1504 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "user.h" +#include "mlx5_ib.h" + +#define DRIVER_NAME "mlx5_ib" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "June 2013" + +MODULE_AUTHOR("Eli Cohen "); +MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +static int prof_sel = 2; +module_param_named(prof_sel, prof_sel, int, 0444); +MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); + +static char mlx5_version[] = + DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" + DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + +struct mlx5_profile profile[] = { + [0] = { + .mask = 0, + }, + [1] = { + .mask = MLX5_PROF_MASK_QP_SIZE, + .log_max_qp = 12, + }, + [2] = { + .mask = MLX5_PROF_MASK_QP_SIZE | + MLX5_PROF_MASK_MR_CACHE, + .log_max_qp = 17, + .mr_cache[0] = { + .size = 500, + .limit = 250 + }, + .mr_cache[1] = { + .size = 500, + .limit = 250 + }, + .mr_cache[2] = { + .size = 500, + .limit = 250 + }, + .mr_cache[3] = { + .size = 500, + .limit = 250 + }, + .mr_cache[4] = { + .size = 500, + .limit = 250 + }, + .mr_cache[5] = { + .size = 500, + .limit = 250 + }, + .mr_cache[6] = { + .size = 500, + .limit = 250 + }, + .mr_cache[7] = { + .size = 500, + .limit = 250 + }, + .mr_cache[8] = { + .size = 500, + .limit = 250 + }, + .mr_cache[9] = { + .size = 500, + .limit = 250 + }, + .mr_cache[10] = { + .size = 500, + .limit = 250 + }, + .mr_cache[11] = { + .size = 500, + .limit = 250 + }, + .mr_cache[12] = { + .size = 64, + .limit = 32 + }, + .mr_cache[13] = { + .size = 32, + .limit = 16 + }, + .mr_cache[14] = { + .size = 16, + .limit = 8 + }, + .mr_cache[15] = { + .size = 8, + .limit = 4 + }, + }, +}; + +int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) +{ + struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq *eq, *n; + int err = -ENOENT; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} + +static int alloc_comp_eqs(struct mlx5_ib_dev *dev) +{ + struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq *eq, *n; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&dev->eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = MLX5_COMP_EQ_SIZE; + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + err = mlx5_create_map_eq(&dev->mdev, eq, + i + MLX5_EQ_VEC_COMP_BASE, nent, 0, + eq->name, + &dev->mdev.priv.uuari.uars[0]); + if (err) { + kfree(eq); + goto clean; + } + mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn); + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &dev->eqs_list); + spin_unlock(&table->lock); + } + + dev->num_comp_vectors = ncomp_vec; + return 0; + +clean: + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) + mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); + return err; +} + +static void free_comp_eqs(struct mlx5_ib_dev *dev) +{ + struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; + struct mlx5_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) + mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int mlx5_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + int max_rq_sg; + int max_sq_sg; + u64 flags; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memset(props, 0, sizeof(*props)); + + props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) | + (fw_rev_min(&dev->mdev) << 16) | + fw_rev_sub(&dev->mdev); + props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; + flags = dev->mdev.caps.flags; + if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + if (flags & MLX5_DEV_CAP_FLAG_APM) + props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; + if (flags & MLX5_DEV_CAP_FLAG_XRC) + props->device_cap_flags |= IB_DEVICE_XRC; + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + + props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & + 0xffffff; + props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30)); + props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32)); + memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + + props->max_mr_size = ~0ull; + props->page_size_cap = dev->mdev.caps.min_page_sz; + props->max_qp = 1 << dev->mdev.caps.log_max_qp; + props->max_qp_wr = dev->mdev.caps.max_wqes; + max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); + max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / + sizeof(struct mlx5_wqe_data_seg); + props->max_sge = min(max_rq_sg, max_sq_sg); + props->max_cq = 1 << dev->mdev.caps.log_max_cq; + props->max_cqe = dev->mdev.caps.max_cqes - 1; + props->max_mr = 1 << dev->mdev.caps.log_max_mkey; + props->max_pd = 1 << dev->mdev.caps.log_max_pd; + props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp; + props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = 1 << dev->mdev.caps.log_max_srq; + props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1; + props->max_srq_sge = max_rq_sg - 1; + props->max_fast_reg_page_list_len = (unsigned int)-1; + props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; + props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_HCA; + props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); + props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; + props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} + +int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int ext_active_speed; + int err = -ENOMEM; + + if (port < 1 || port > dev->mdev.caps.num_ports) { + mlx5_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + memset(props, 0, sizeof(*props)); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) { + mlx5_ib_warn(dev, "err %d\n", err); + goto out; + } + + + props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); + props->gid_tbl_len = out_mad->data[50]; + props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg; + props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len; + props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + props->max_mtu = out_mad->data[41] & 0xf; + props->active_mtu = out_mad->data[36] >> 4; + props->subnet_timeout = out_mad->data[51] & 0x1f; + props->max_vl_num = out_mad->data[37] >> 4; + props->init_type_reply = out_mad->data[41] >> 4; + + /* Check if extended speeds (EDR/FDR/...) are supported */ + if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { + ext_active_speed = out_mad->data[62] >> 4; + + switch (ext_active_speed) { + case 1: + props->active_speed = 16; /* FDR */ + break; + case 2: + props->active_speed = 32; /* EDR */ + break; + } + } + + /* If reported active speed is QDR, check if is FDR-10 */ + if (props->active_speed == 4) { + if (dev->mdev.caps.ext_port_cap[port - 1] & + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { + init_query_mad(in_mad); + in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, + NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + /* Checking LinkSpeedActive for FDR-10 */ + if (out_mad->data[15] & 0x1) + props->active_speed = 8; + } + } + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} + +static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(gid->raw, out_mad->data + 8, 8); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cpu_to_be32(index / 8); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +struct mlx5_reg_node_desc { + u8 desc[64]; +}; + +static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_reg_node_desc in; + struct mlx5_reg_node_desc out; + int err; + + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) + return 0; + + /* + * If possible, pass node desc to FW, so it can generate + * a 144 trap. If cmd fails, just ignore. + */ + memcpy(&in, props->node_desc, 64); + err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out, + sizeof(out), MLX5_REG_NODE_DESC, 0, 1); + if (err) + return err; + + memcpy(ibdev->node_desc, props->node_desc, 64); + + return err; +} + +static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct ib_port_attr attr; + u32 tmp; + int err; + + mutex_lock(&dev->cap_mask_mutex); + + err = mlx5_ib_query_port(ibdev, port, &attr); + if (err) + goto out; + + tmp = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = mlx5_set_port_caps(&dev->mdev, port, tmp); + +out: + mutex_unlock(&dev->cap_mask_mutex); + return err; +} + +static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_ib_alloc_ucontext_req req; + struct mlx5_ib_alloc_ucontext_resp resp; + struct mlx5_ib_ucontext *context; + struct mlx5_uuar_info *uuari; + struct mlx5_uar *uars; + int num_uars; + int uuarn; + int err; + int i; + + if (!dev->ib_active) + return ERR_PTR(-EAGAIN); + + err = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err) + return ERR_PTR(err); + + if (req.total_num_uuars > MLX5_MAX_UUARS) + return ERR_PTR(-ENOMEM); + + if (req.total_num_uuars == 0) + return ERR_PTR(-EINVAL); + + req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE); + if (req.num_low_latency_uuars > req.total_num_uuars - 1) + return ERR_PTR(-EINVAL); + + num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE; + resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; + resp.bf_reg_size = dev->mdev.caps.bf_reg_size; + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz; + resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz; + resp.max_send_wqebb = dev->mdev.caps.max_wqes; + resp.max_recv_wr = dev->mdev.caps.max_wqes; + resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes; + + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + uuari = &context->uuari; + mutex_init(&uuari->lock); + uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); + if (!uars) { + err = -ENOMEM; + goto out_ctx; + } + + uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars), + sizeof(*uuari->bitmap), + GFP_KERNEL); + if (!uuari->bitmap) { + err = -ENOMEM; + goto out_uar_ctx; + } + /* + * clear all fast path uuars + */ + for (i = 0; i < req.total_num_uuars; i++) { + uuarn = i & 3; + if (uuarn == 2 || uuarn == 3) + set_bit(i, uuari->bitmap); + } + + uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL); + if (!uuari->count) { + err = -ENOMEM; + goto out_bitmap; + } + + for (i = 0; i < num_uars; i++) { + err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index); + if (err) + goto out_count; + } + + INIT_LIST_HEAD(&context->db_page_list); + mutex_init(&context->db_page_mutex); + + resp.tot_uuars = req.total_num_uuars; + resp.num_ports = dev->mdev.caps.num_ports; + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) + goto out_uars; + + uuari->num_low_latency_uuars = req.num_low_latency_uuars; + uuari->uars = uars; + uuari->num_uars = num_uars; + return &context->ibucontext; + +out_uars: + for (i--; i >= 0; i--) + mlx5_cmd_free_uar(&dev->mdev, uars[i].index); +out_count: + kfree(uuari->count); + +out_bitmap: + kfree(uuari->bitmap); + +out_uar_ctx: + kfree(uars); + +out_ctx: + kfree(context); + return ERR_PTR(err); +} + +static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +{ + struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); + struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); + struct mlx5_uuar_info *uuari = &context->uuari; + int i; + + for (i = 0; i < uuari->num_uars; i++) { + if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index)) + mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); + } + + kfree(uuari->count); + kfree(uuari->bitmap); + kfree(uuari->uars); + kfree(context); + + return 0; +} + +static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) +{ + return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index; +} + +static int get_command(unsigned long offset) +{ + return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; +} + +static int get_arg(unsigned long offset) +{ + return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); +} + +static int get_index(unsigned long offset) +{ + return get_arg(offset); +} + +static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); + struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); + struct mlx5_uuar_info *uuari = &context->uuari; + unsigned long command; + unsigned long idx; + phys_addr_t pfn; + + command = get_command(vma->vm_pgoff); + switch (command) { + case MLX5_IB_MMAP_REGULAR_PAGE: + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + idx = get_index(vma->vm_pgoff); + pfn = uar_index2pfn(dev, uuari->uars[idx].index); + mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx, + (unsigned long long)pfn); + + if (idx >= uuari->num_uars) + return -EINVAL; + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, pfn, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n", + vma->vm_start, + (unsigned long long)pfn << PAGE_SHIFT); + break; + + case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: + return -ENOSYS; + + default: + return -EINVAL; + } + + return 0; +} + +static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) +{ + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_mkey_seg *seg; + struct mlx5_core_mr mr; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + seg = &in->seg; + seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; + seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + seg->start_addr = 0; + + err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in)); + if (err) { + mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); + goto err_in; + } + + kfree(in); + *key = mr.key; + + return 0; + +err_in: + kfree(in); + + return err; +} + +static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) +{ + struct mlx5_core_mr mr; + int err; + + memset(&mr, 0, sizeof(mr)); + mr.key = key; + err = mlx5_core_destroy_mkey(&dev->mdev, &mr); + if (err) + mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); +} + +static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx5_ib_alloc_pd_resp resp; + struct mlx5_ib_pd *pd; + int err; + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn); + if (err) { + kfree(pd); + return ERR_PTR(err); + } + + if (context) { + resp.pdn = pd->pdn; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } else { + err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); + if (err) { + mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); + kfree(pd); + return ERR_PTR(err); + } + } + + return &pd->ibpd; +} + +static int mlx5_ib_dealloc_pd(struct ib_pd *pd) +{ + struct mlx5_ib_dev *mdev = to_mdev(pd->device); + struct mlx5_ib_pd *mpd = to_mpd(pd); + + if (!pd->uobject) + free_pa_mkey(mdev, mpd->pa_lkey); + + mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn); + kfree(mpd); + + return 0; +} + +static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + int err; + + err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num); + if (err) + mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", + ibqp->qp_num, gid->raw); + + return err; +} + +static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + int err; + + err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num); + if (err) + mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", + ibqp->qp_num, gid->raw); + + return err; +} + +static int init_node_data(struct mlx5_ib_dev *dev) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(dev->ib_dev.node_desc, out_mad->data, 64); + + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); + memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + + return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages); +} + +static ssize_t show_reg_pages(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + + return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages); +} + +static ssize_t show_hca(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "MT%d\n", dev->mdev.pdev->device); +} + +static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev), + fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev)); +} + +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "%x\n", dev->mdev.rev_id); +} + +static ssize_t show_board(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct mlx5_ib_dev *dev = + container_of(device, struct mlx5_ib_dev, ib_dev.dev); + return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, + dev->mdev.board_id); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); +static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); +static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); + +static struct device_attribute *mlx5_class_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_fw_ver, + &dev_attr_hca_type, + &dev_attr_board_id, + &dev_attr_fw_pages, + &dev_attr_reg_pages, +}; + +static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, + void *data) +{ + struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev); + struct ib_event ibev; + u8 port = 0; + + switch (event) { + case MLX5_DEV_EVENT_SYS_ERROR: + ibdev->ib_active = false; + ibev.event = IB_EVENT_DEVICE_FATAL; + break; + + case MLX5_DEV_EVENT_PORT_UP: + ibev.event = IB_EVENT_PORT_ACTIVE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_PORT_DOWN: + ibev.event = IB_EVENT_PORT_ERR; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_PORT_INITIALIZED: + /* not used by ULPs */ + return; + + case MLX5_DEV_EVENT_LID_CHANGE: + ibev.event = IB_EVENT_LID_CHANGE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_PKEY_CHANGE: + ibev.event = IB_EVENT_PKEY_CHANGE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_GUID_CHANGE: + ibev.event = IB_EVENT_GID_CHANGE; + port = *(u8 *)data; + break; + + case MLX5_DEV_EVENT_CLIENT_REREG: + ibev.event = IB_EVENT_CLIENT_REREGISTER; + port = *(u8 *)data; + break; + } + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = port; + + if (ibdev->ib_active) + ib_dispatch_event(&ibev); +} + +static void get_ext_port_caps(struct mlx5_ib_dev *dev) +{ + int port; + + for (port = 1; port <= dev->mdev.caps.num_ports; port++) + mlx5_query_ext_port_caps(dev, port); +} + +static int get_port_caps(struct mlx5_ib_dev *dev) +{ + struct ib_device_attr *dprops = NULL; + struct ib_port_attr *pprops = NULL; + int err = 0; + int port; + + pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); + if (!pprops) + goto out; + + dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); + if (!dprops) + goto out; + + err = mlx5_ib_query_device(&dev->ib_dev, dprops); + if (err) { + mlx5_ib_warn(dev, "query_device failed %d\n", err); + goto out; + } + + for (port = 1; port <= dev->mdev.caps.num_ports; port++) { + err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); + if (err) { + mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); + break; + } + dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys; + dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", + dprops->max_pkeys, pprops->gid_tbl_len); + } + +out: + kfree(pprops); + kfree(dprops); + + return err; +} + +static void destroy_umrc_res(struct mlx5_ib_dev *dev) +{ + int err; + + err = mlx5_mr_cache_cleanup(dev); + if (err) + mlx5_ib_warn(dev, "mr cache cleanup failed\n"); + + mlx5_ib_destroy_qp(dev->umrc.qp); + ib_destroy_cq(dev->umrc.cq); + ib_dereg_mr(dev->umrc.mr); + ib_dealloc_pd(dev->umrc.pd); +} + +enum { + MAX_UMR_WR = 128, +}; + +static int create_umr_res(struct mlx5_ib_dev *dev) +{ + struct ib_qp_init_attr *init_attr = NULL; + struct ib_qp_attr *attr = NULL; + struct ib_pd *pd; + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_mr *mr; + int ret; + + attr = kzalloc(sizeof(*attr), GFP_KERNEL); + init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); + if (!attr || !init_attr) { + ret = -ENOMEM; + goto error_0; + } + + pd = ib_alloc_pd(&dev->ib_dev); + if (IS_ERR(pd)) { + mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); + ret = PTR_ERR(pd); + goto error_0; + } + + mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(mr)) { + mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n"); + ret = PTR_ERR(mr); + goto error_1; + } + + cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128, + 0); + if (IS_ERR(cq)) { + mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); + ret = PTR_ERR(cq); + goto error_2; + } + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + + init_attr->send_cq = cq; + init_attr->recv_cq = cq; + init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; + init_attr->cap.max_send_wr = MAX_UMR_WR; + init_attr->cap.max_send_sge = 1; + init_attr->qp_type = MLX5_IB_QPT_REG_UMR; + init_attr->port_num = 1; + qp = mlx5_ib_create_qp(pd, init_attr, NULL); + if (IS_ERR(qp)) { + mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); + ret = PTR_ERR(qp); + goto error_3; + } + qp->device = &dev->ib_dev; + qp->real_qp = qp; + qp->uobject = NULL; + qp->qp_type = MLX5_IB_QPT_REG_UMR; + + attr->qp_state = IB_QPS_INIT; + attr->port_num = 1; + ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | + IB_QP_PORT, NULL); + if (ret) { + mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); + goto error_4; + } + + memset(attr, 0, sizeof(*attr)); + attr->qp_state = IB_QPS_RTR; + attr->path_mtu = IB_MTU_256; + + ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + if (ret) { + mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); + goto error_4; + } + + memset(attr, 0, sizeof(*attr)); + attr->qp_state = IB_QPS_RTS; + ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); + if (ret) { + mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); + goto error_4; + } + + dev->umrc.qp = qp; + dev->umrc.cq = cq; + dev->umrc.mr = mr; + dev->umrc.pd = pd; + + sema_init(&dev->umrc.sem, MAX_UMR_WR); + ret = mlx5_mr_cache_init(dev); + if (ret) { + mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); + goto error_4; + } + + kfree(attr); + kfree(init_attr); + + return 0; + +error_4: + mlx5_ib_destroy_qp(qp); + +error_3: + ib_destroy_cq(cq); + +error_2: + ib_dereg_mr(mr); + +error_1: + ib_dealloc_pd(pd); + +error_0: + kfree(attr); + kfree(init_attr); + return ret; +} + +static int create_dev_resources(struct mlx5_ib_resources *devr) +{ + struct ib_srq_init_attr attr; + struct mlx5_ib_dev *dev; + int ret = 0; + + dev = container_of(devr, struct mlx5_ib_dev, devr); + + devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); + if (IS_ERR(devr->p0)) { + ret = PTR_ERR(devr->p0); + goto error0; + } + devr->p0->device = &dev->ib_dev; + devr->p0->uobject = NULL; + atomic_set(&devr->p0->usecnt, 0); + + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL); + if (IS_ERR(devr->c0)) { + ret = PTR_ERR(devr->c0); + goto error1; + } + devr->c0->device = &dev->ib_dev; + devr->c0->uobject = NULL; + devr->c0->comp_handler = NULL; + devr->c0->event_handler = NULL; + devr->c0->cq_context = NULL; + atomic_set(&devr->c0->usecnt, 0); + + devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + if (IS_ERR(devr->x0)) { + ret = PTR_ERR(devr->x0); + goto error2; + } + devr->x0->device = &dev->ib_dev; + devr->x0->inode = NULL; + atomic_set(&devr->x0->usecnt, 0); + mutex_init(&devr->x0->tgt_qp_mutex); + INIT_LIST_HEAD(&devr->x0->tgt_qp_list); + + devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); + if (IS_ERR(devr->x1)) { + ret = PTR_ERR(devr->x1); + goto error3; + } + devr->x1->device = &dev->ib_dev; + devr->x1->inode = NULL; + atomic_set(&devr->x1->usecnt, 0); + mutex_init(&devr->x1->tgt_qp_mutex); + INIT_LIST_HEAD(&devr->x1->tgt_qp_list); + + memset(&attr, 0, sizeof(attr)); + attr.attr.max_sge = 1; + attr.attr.max_wr = 1; + attr.srq_type = IB_SRQT_XRC; + attr.ext.xrc.cq = devr->c0; + attr.ext.xrc.xrcd = devr->x0; + + devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); + if (IS_ERR(devr->s0)) { + ret = PTR_ERR(devr->s0); + goto error4; + } + devr->s0->device = &dev->ib_dev; + devr->s0->pd = devr->p0; + devr->s0->uobject = NULL; + devr->s0->event_handler = NULL; + devr->s0->srq_context = NULL; + devr->s0->srq_type = IB_SRQT_XRC; + devr->s0->ext.xrc.xrcd = devr->x0; + devr->s0->ext.xrc.cq = devr->c0; + atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); + atomic_inc(&devr->s0->ext.xrc.cq->usecnt); + atomic_inc(&devr->p0->usecnt); + atomic_set(&devr->s0->usecnt, 0); + + return 0; + +error4: + mlx5_ib_dealloc_xrcd(devr->x1); +error3: + mlx5_ib_dealloc_xrcd(devr->x0); +error2: + mlx5_ib_destroy_cq(devr->c0); +error1: + mlx5_ib_dealloc_pd(devr->p0); +error0: + return ret; +} + +static void destroy_dev_resources(struct mlx5_ib_resources *devr) +{ + mlx5_ib_destroy_srq(devr->s0); + mlx5_ib_dealloc_xrcd(devr->x0); + mlx5_ib_dealloc_xrcd(devr->x1); + mlx5_ib_destroy_cq(devr->c0); + mlx5_ib_dealloc_pd(devr->p0); +} + +static int init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct mlx5_core_dev *mdev; + struct mlx5_ib_dev *dev; + int err; + int i; + + printk_once(KERN_INFO "%s", mlx5_version); + + dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) + return -ENOMEM; + + mdev = &dev->mdev; + mdev->event = mlx5_ib_event; + if (prof_sel >= ARRAY_SIZE(profile)) { + pr_warn("selected pofile out of range, selceting default\n"); + prof_sel = 0; + } + mdev->profile = &profile[prof_sel]; + err = mlx5_dev_init(mdev, pdev); + if (err) + goto err_free; + + err = get_port_caps(dev); + if (err) + goto err_cleanup; + + get_ext_port_caps(dev); + + err = alloc_comp_eqs(dev); + if (err) + goto err_cleanup; + + MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); + + strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); + dev->ib_dev.owner = THIS_MODULE; + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey; + dev->num_ports = mdev->caps.num_ports; + dev->ib_dev.phys_port_cnt = dev->num_ports; + dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; + dev->ib_dev.dma_device = &mdev->pdev->dev; + + dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; + dev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | + (1ull << IB_USER_VERBS_CMD_OPEN_QP); + + dev->ib_dev.query_device = mlx5_ib_query_device; + dev->ib_dev.query_port = mlx5_ib_query_port; + dev->ib_dev.query_gid = mlx5_ib_query_gid; + dev->ib_dev.query_pkey = mlx5_ib_query_pkey; + dev->ib_dev.modify_device = mlx5_ib_modify_device; + dev->ib_dev.modify_port = mlx5_ib_modify_port; + dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; + dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; + dev->ib_dev.mmap = mlx5_ib_mmap; + dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; + dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; + dev->ib_dev.create_ah = mlx5_ib_create_ah; + dev->ib_dev.query_ah = mlx5_ib_query_ah; + dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; + dev->ib_dev.create_srq = mlx5_ib_create_srq; + dev->ib_dev.modify_srq = mlx5_ib_modify_srq; + dev->ib_dev.query_srq = mlx5_ib_query_srq; + dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; + dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; + dev->ib_dev.create_qp = mlx5_ib_create_qp; + dev->ib_dev.modify_qp = mlx5_ib_modify_qp; + dev->ib_dev.query_qp = mlx5_ib_query_qp; + dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; + dev->ib_dev.post_send = mlx5_ib_post_send; + dev->ib_dev.post_recv = mlx5_ib_post_recv; + dev->ib_dev.create_cq = mlx5_ib_create_cq; + dev->ib_dev.modify_cq = mlx5_ib_modify_cq; + dev->ib_dev.resize_cq = mlx5_ib_resize_cq; + dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; + dev->ib_dev.poll_cq = mlx5_ib_poll_cq; + dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; + dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; + dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; + dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; + dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; + dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; + dev->ib_dev.process_mad = mlx5_ib_process_mad; + dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; + dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; + dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; + + if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { + dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; + dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; + dev->ib_dev.uverbs_cmd_mask |= + (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | + (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); + } + + err = init_node_data(dev); + if (err) + goto err_eqs; + + mutex_init(&dev->cap_mask_mutex); + spin_lock_init(&dev->mr_lock); + + err = create_dev_resources(&dev->devr); + if (err) + goto err_eqs; + + if (ib_register_device(&dev->ib_dev, NULL)) + goto err_rsrc; + + err = create_umr_res(dev); + if (err) + goto err_dev; + + for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { + if (device_create_file(&dev->ib_dev.dev, + mlx5_class_attributes[i])) + goto err_umrc; + } + + dev->ib_active = true; + + return 0; + +err_umrc: + destroy_umrc_res(dev); + +err_dev: + ib_unregister_device(&dev->ib_dev); + +err_rsrc: + destroy_dev_resources(&dev->devr); + +err_eqs: + free_comp_eqs(dev); + +err_cleanup: + mlx5_dev_cleanup(mdev); + +err_free: + ib_dealloc_device((struct ib_device *)dev); + + return err; +} + +static void remove_one(struct pci_dev *pdev) +{ + struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev); + + destroy_umrc_res(dev); + ib_unregister_device(&dev->ib_dev); + destroy_dev_resources(&dev->devr); + free_comp_eqs(dev); + mlx5_dev_cleanup(&dev->mdev); + ib_dealloc_device(&dev->ib_dev); +} + +static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = { + { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table); + +static struct pci_driver mlx5_ib_driver = { + .name = DRIVER_NAME, + .id_table = mlx5_ib_pci_table, + .probe = init_one, + .remove = remove_one +}; + +static int __init mlx5_ib_init(void) +{ + return pci_register_driver(&mlx5_ib_driver); +} + +static void __exit mlx5_ib_cleanup(void) +{ + pci_unregister_driver(&mlx5_ib_driver); +} + +module_init(mlx5_ib_init); +module_exit(mlx5_ib_cleanup); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c new file mode 100644 index 000000000000..3a5322870b96 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_ib.h" + +/* @umem: umem object to scan + * @addr: ib virtual address requested by the user + * @count: number of PAGE_SIZE pages covered by umem + * @shift: page shift for the compound pages found in the region + * @ncont: number of compund pages + * @order: log2 of the number of compound pages + */ +void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order) +{ + struct ib_umem_chunk *chunk; + unsigned long tmp; + unsigned long m; + int i, j, k; + u64 base = 0; + int p = 0; + int skip; + int mask; + u64 len; + u64 pfn; + + addr = addr >> PAGE_SHIFT; + tmp = (unsigned long)addr; + m = find_first_bit(&tmp, sizeof(tmp)); + skip = 1 << m; + mask = skip - 1; + i = 0; + list_for_each_entry(chunk, &umem->chunk_list, list) + for (j = 0; j < chunk->nmap; j++) { + len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; + pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; + for (k = 0; k < len; k++) { + if (!(i & mask)) { + tmp = (unsigned long)pfn; + m = min(m, find_first_bit(&tmp, sizeof(tmp))); + skip = 1 << m; + mask = skip - 1; + base = pfn; + p = 0; + } else { + if (base + p != pfn) { + tmp = (unsigned long)p; + m = find_first_bit(&tmp, sizeof(tmp)); + skip = 1 << m; + mask = skip - 1; + base = pfn; + p = 0; + } + } + p++; + i++; + } + } + + if (i) { + m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); + + if (order) + *order = ilog2(roundup_pow_of_two(i) >> m); + + *ncont = DIV_ROUND_UP(i, (1 << m)); + } else { + m = 0; + + if (order) + *order = 0; + + *ncont = 0; + } + *shift = PAGE_SHIFT + m; + *count = i; +} + +void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int umr) +{ + int shift = page_shift - PAGE_SHIFT; + int mask = (1 << shift) - 1; + struct ib_umem_chunk *chunk; + int i, j, k; + u64 cur = 0; + u64 base; + int len; + + i = 0; + list_for_each_entry(chunk, &umem->chunk_list, list) + for (j = 0; j < chunk->nmap; j++) { + len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; + base = sg_dma_address(&chunk->page_list[j]); + for (k = 0; k < len; k++) { + if (!(i & mask)) { + cur = base + (k << PAGE_SHIFT); + if (umr) + cur |= 3; + + pas[i >> shift] = cpu_to_be64(cur); + mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", + i >> shift, be64_to_cpu(pas[i >> shift])); + } else + mlx5_ib_dbg(dev, "=====> 0x%llx\n", + base + (k << PAGE_SHIFT)); + i++; + } + } +} + +int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) +{ + u64 page_size; + u64 page_mask; + u64 off_size; + u64 off_mask; + u64 buf_off; + + page_size = 1 << page_shift; + page_mask = page_size - 1; + buf_off = addr & page_mask; + off_size = page_size >> 6; + off_mask = off_size - 1; + + if (buf_off & off_mask) + return -EINVAL; + + *offset = buf_off >> ilog2(off_size); + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h new file mode 100644 index 000000000000..836be9157242 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -0,0 +1,545 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_IB_H +#define MLX5_IB_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define mlx5_ib_dbg(dev, format, arg...) \ +pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ + __LINE__, current->pid, ##arg) + +#define mlx5_ib_err(dev, format, arg...) \ +pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ + __LINE__, current->pid, ##arg) + +#define mlx5_ib_warn(dev, format, arg...) \ +pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ + __LINE__, current->pid, ##arg) + +enum { + MLX5_IB_MMAP_CMD_SHIFT = 8, + MLX5_IB_MMAP_CMD_MASK = 0xff, +}; + +enum mlx5_ib_mmap_cmd { + MLX5_IB_MMAP_REGULAR_PAGE = 0, + MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, /* always last */ +}; + +enum { + MLX5_RES_SCAT_DATA32_CQE = 0x1, + MLX5_RES_SCAT_DATA64_CQE = 0x2, + MLX5_REQ_SCAT_DATA32_CQE = 0x11, + MLX5_REQ_SCAT_DATA64_CQE = 0x22, +}; + +enum mlx5_ib_latency_class { + MLX5_IB_LATENCY_CLASS_LOW, + MLX5_IB_LATENCY_CLASS_MEDIUM, + MLX5_IB_LATENCY_CLASS_HIGH, + MLX5_IB_LATENCY_CLASS_FAST_PATH +}; + +enum mlx5_ib_mad_ifc_flags { + MLX5_MAD_IFC_IGNORE_MKEY = 1, + MLX5_MAD_IFC_IGNORE_BKEY = 2, + MLX5_MAD_IFC_NET_VIEW = 4, +}; + +struct mlx5_ib_ucontext { + struct ib_ucontext ibucontext; + struct list_head db_page_list; + + /* protect doorbell record alloc/free + */ + struct mutex db_page_mutex; + struct mlx5_uuar_info uuari; +}; + +static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext); +} + +struct mlx5_ib_pd { + struct ib_pd ibpd; + u32 pdn; + u32 pa_lkey; +}; + +/* Use macros here so that don't have to duplicate + * enum ib_send_flags and enum ib_qp_type for low-level driver + */ + +#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START +#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 +#define MLX5_IB_WR_UMR IB_WR_RESERVED1 + +struct wr_list { + u16 opcode; + u16 next; +}; + +struct mlx5_ib_wq { + u64 *wrid; + u32 *wr_data; + struct wr_list *w_list; + unsigned *wqe_head; + u16 unsig_count; + + /* serialize post to the work queue + */ + spinlock_t lock; + int wqe_cnt; + int max_post; + int max_gs; + int offset; + int wqe_shift; + unsigned head; + unsigned tail; + u16 cur_post; + u16 last_poll; + void *qend; +}; + +enum { + MLX5_QP_USER, + MLX5_QP_KERNEL, + MLX5_QP_EMPTY +}; + +struct mlx5_ib_qp { + struct ib_qp ibqp; + struct mlx5_core_qp mqp; + struct mlx5_buf buf; + + struct mlx5_db db; + struct mlx5_ib_wq rq; + + u32 doorbell_qpn; + u8 sq_signal_bits; + u8 fm_cache; + int sq_max_wqes_per_wr; + int sq_spare_wqes; + struct mlx5_ib_wq sq; + + struct ib_umem *umem; + int buf_size; + + /* serialize qp state modifications + */ + struct mutex mutex; + u16 xrcdn; + u32 flags; + u8 port; + u8 alt_port; + u8 atomic_rd_en; + u8 resp_depth; + u8 state; + int mlx_type; + int wq_sig; + int scat_cqe; + int max_inline_data; + struct mlx5_bf *bf; + int has_rq; + + /* only for user space QPs. For kernel + * we have it from the bf object + */ + int uuarn; + + int create_type; + u32 pa_lkey; +}; + +struct mlx5_ib_cq_buf { + struct mlx5_buf buf; + struct ib_umem *umem; + int cqe_size; +}; + +enum mlx5_ib_qp_flags { + MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, + MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1, +}; + +struct mlx5_shared_mr_info { + int mr_id; + struct ib_umem *umem; +}; + +struct mlx5_ib_cq { + struct ib_cq ibcq; + struct mlx5_core_cq mcq; + struct mlx5_ib_cq_buf buf; + struct mlx5_db db; + + /* serialize access to the CQ + */ + spinlock_t lock; + + /* protect resize cq + */ + struct mutex resize_mutex; + struct mlx5_ib_cq_resize *resize_buf; + struct ib_umem *resize_umem; + int cqe_size; +}; + +struct mlx5_ib_srq { + struct ib_srq ibsrq; + struct mlx5_core_srq msrq; + struct mlx5_buf buf; + struct mlx5_db db; + u64 *wrid; + /* protect SRQ hanlding + */ + spinlock_t lock; + int head; + int tail; + u16 wqe_ctr; + struct ib_umem *umem; + /* serialize arming a SRQ + */ + struct mutex mutex; + int wq_sig; +}; + +struct mlx5_ib_xrcd { + struct ib_xrcd ibxrcd; + u32 xrcdn; +}; + +struct mlx5_ib_mr { + struct ib_mr ibmr; + struct mlx5_core_mr mmr; + struct ib_umem *umem; + struct mlx5_shared_mr_info *smr_info; + struct list_head list; + int order; + int umred; + __be64 *pas; + dma_addr_t dma; + int npages; + struct completion done; + enum ib_wc_status status; +}; + +struct mlx5_ib_fast_reg_page_list { + struct ib_fast_reg_page_list ibfrpl; + __be64 *mapped_page_list; + dma_addr_t map; +}; + +struct umr_common { + struct ib_pd *pd; + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_mr *mr; + /* control access to UMR QP + */ + struct semaphore sem; +}; + +enum { + MLX5_FMR_INVALID, + MLX5_FMR_VALID, + MLX5_FMR_BUSY, +}; + +struct mlx5_ib_fmr { + struct ib_fmr ibfmr; + struct mlx5_core_mr mr; + int access_flags; + int state; + /* protect fmr state + */ + spinlock_t lock; + u64 wrid; + struct ib_send_wr wr[2]; + u8 page_shift; + struct ib_fast_reg_page_list page_list; +}; + +struct mlx5_cache_ent { + struct list_head head; + /* sync access to the cahce entry + */ + spinlock_t lock; + + + struct dentry *dir; + char name[4]; + u32 order; + u32 size; + u32 cur; + u32 miss; + u32 limit; + + struct dentry *fsize; + struct dentry *fcur; + struct dentry *fmiss; + struct dentry *flimit; + + struct mlx5_ib_dev *dev; + struct work_struct work; + struct delayed_work dwork; +}; + +struct mlx5_mr_cache { + struct workqueue_struct *wq; + struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + int stopped; + struct dentry *root; + unsigned long last_add; +}; + +struct mlx5_ib_resources { + struct ib_cq *c0; + struct ib_xrcd *x0; + struct ib_xrcd *x1; + struct ib_pd *p0; + struct ib_srq *s0; +}; + +struct mlx5_ib_dev { + struct ib_device ib_dev; + struct mlx5_core_dev mdev; + MLX5_DECLARE_DOORBELL_LOCK(uar_lock); + struct list_head eqs_list; + int num_ports; + int num_comp_vectors; + /* serialize update of capability mask + */ + struct mutex cap_mask_mutex; + bool ib_active; + struct umr_common umrc; + /* sync used page count stats + */ + spinlock_t mr_lock; + struct mlx5_ib_resources devr; + struct mlx5_mr_cache cache; +}; + +static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) +{ + return container_of(mcq, struct mlx5_ib_cq, mcq); +} + +static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd); +} + +static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct mlx5_ib_dev, ib_dev); +} + +static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr); +} + +static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct mlx5_ib_cq, ibcq); +} + +static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) +{ + return container_of(mqp, struct mlx5_ib_qp, mqp); +} + +static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct mlx5_ib_pd, ibpd); +} + +static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mlx5_ib_srq, ibsrq); +} + +static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct mlx5_ib_qp, ibqp); +} + +static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) +{ + return container_of(msrq, struct mlx5_ib_srq, msrq); +} + +static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct mlx5_ib_mr, ibmr); +} + +static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl) +{ + return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl); +} + +struct mlx5_ib_ah { + struct ib_ah ibah; + struct mlx5_av av; +}; + +static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mlx5_ib_ah, ibah); +} + +static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev) +{ + return container_of(dev, struct mlx5_ib_dev, mdev); +} + +static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev) +{ + return mlx5_core2ibdev(pci2mlx5_core_dev(pdev)); +} + +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, + struct mlx5_db *db); +void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); +void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); +void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); +void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); +int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, + void *in_mad, void *response_mad); +struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, + struct mlx5_ib_ah *ah); +struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); +int mlx5_ib_destroy_ah(struct ib_ah *ah); +struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); +int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); +int mlx5_ib_destroy_srq(struct ib_srq *srq); +int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); +int mlx5_ib_destroy_qp(struct ib_qp *qp); +int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); +struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *context, + struct ib_udata *udata); +int mlx5_ib_destroy_cq(struct ib_cq *cq); +int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); +int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); +struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); +struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int mlx5_ib_dereg_mr(struct ib_mr *ibmr); +struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, + int max_page_list_len); +struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, + int page_list_len); +void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); +struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc, + struct ib_fmr_attr *fmr_attr); +int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int npages, u64 iova); +int mlx5_ib_unmap_fmr(struct list_head *fmr_list); +int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr); +int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + struct ib_wc *in_wc, struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad); +struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); +int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); +int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn); +int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); +int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); +int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); +int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); +void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); +void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order); +void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int umr); +void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); +int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); +int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); +int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); +int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); +void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); + +static inline void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +static inline u8 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | + MLX5_PERM_LOCAL_READ; +} + +#endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c new file mode 100644 index 000000000000..e2daa8f02476 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -0,0 +1,1007 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include +#include +#include +#include +#include +#include "mlx5_ib.h" + +enum { + DEF_CACHE_SIZE = 10, +}; + +static __be64 *mr_align(__be64 *ptr, int align) +{ + unsigned long mask = align - 1; + + return (__be64 *)(((unsigned long)ptr + mask) & ~mask); +} + +static int order2idx(struct mlx5_ib_dev *dev, int order) +{ + struct mlx5_mr_cache *cache = &dev->cache; + + if (order < cache->ent[0].order) + return 0; + else + return order - cache->ent[0].order; +} + +static int add_keys(struct mlx5_ib_dev *dev, int c, int num) +{ + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_ib_mr *mr; + int npages = 1 << ent->order; + int size = sizeof(u64) * npages; + int err = 0; + int i; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + for (i = 0; i < num; i++) { + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto out; + } + mr->order = ent->order; + mr->umred = 1; + mr->pas = kmalloc(size + 0x3f, GFP_KERNEL); + if (!mr->pas) { + kfree(mr); + err = -ENOMEM; + goto out; + } + mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size, + DMA_TO_DEVICE); + if (dma_mapping_error(ddev, mr->dma)) { + kfree(mr->pas); + kfree(mr); + err = -ENOMEM; + goto out; + } + + in->seg.status = 1 << 6; + in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN; + in->seg.log2_page_size = 12; + + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, + sizeof(*in)); + if (err) { + mlx5_ib_warn(dev, "create mkey failed %d\n", err); + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); + kfree(mr->pas); + kfree(mr); + goto out; + } + cache->last_add = jiffies; + + spin_lock(&ent->lock); + list_add_tail(&mr->list, &ent->head); + ent->cur++; + ent->size++; + spin_unlock(&ent->lock); + } + +out: + kfree(in); + return err; +} + +static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) +{ + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_ib_mr *mr; + int size; + int err; + int i; + + for (i = 0; i < num; i++) { + spin_lock(&ent->lock); + if (list_empty(&ent->head)) { + spin_unlock(&ent->lock); + return; + } + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->cur--; + ent->size--; + spin_unlock(&ent->lock); + err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); + if (err) { + mlx5_ib_warn(dev, "failed destroy mkey\n"); + } else { + size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); + kfree(mr->pas); + kfree(mr); + } + } +} + +static ssize_t size_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + struct mlx5_ib_dev *dev = ent->dev; + char lbuf[20]; + u32 var; + int err; + int c; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EPERM; + + c = order2idx(dev, ent->order); + lbuf[sizeof(lbuf) - 1] = 0; + + if (sscanf(lbuf, "%u", &var) != 1) + return -EINVAL; + + if (var < ent->limit) + return -EINVAL; + + if (var > ent->size) { + err = add_keys(dev, c, var - ent->size); + if (err) + return err; + } else if (var < ent->size) { + remove_keys(dev, c, ent->size - var); + } + + return count; +} + +static ssize_t size_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + char lbuf[20]; + int err; + + if (*pos) + return 0; + + err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); + if (err < 0) + return err; + + if (copy_to_user(buf, lbuf, err)) + return -EPERM; + + *pos += err; + + return err; +} + +static const struct file_operations size_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = size_write, + .read = size_read, +}; + +static ssize_t limit_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + struct mlx5_ib_dev *dev = ent->dev; + char lbuf[20]; + u32 var; + int err; + int c; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EPERM; + + c = order2idx(dev, ent->order); + lbuf[sizeof(lbuf) - 1] = 0; + + if (sscanf(lbuf, "%u", &var) != 1) + return -EINVAL; + + if (var > ent->size) + return -EINVAL; + + ent->limit = var; + + if (ent->cur < ent->limit) { + err = add_keys(dev, c, 2 * ent->limit - ent->cur); + if (err) + return err; + } + + return count; +} + +static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cache_ent *ent = filp->private_data; + char lbuf[20]; + int err; + + if (*pos) + return 0; + + err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); + if (err < 0) + return err; + + if (copy_to_user(buf, lbuf, err)) + return -EPERM; + + *pos += err; + + return err; +} + +static const struct file_operations limit_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = limit_write, + .read = limit_read, +}; + +static int someone_adding(struct mlx5_mr_cache *cache) +{ + int i; + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + if (cache->ent[i].cur < cache->ent[i].limit) + return 1; + } + + return 0; +} + +static void __cache_work_func(struct mlx5_cache_ent *ent) +{ + struct mlx5_ib_dev *dev = ent->dev; + struct mlx5_mr_cache *cache = &dev->cache; + int i = order2idx(dev, ent->order); + + if (cache->stopped) + return; + + ent = &dev->cache.ent[i]; + if (ent->cur < 2 * ent->limit) { + add_keys(dev, i, 1); + if (ent->cur < 2 * ent->limit) + queue_work(cache->wq, &ent->work); + } else if (ent->cur > 2 * ent->limit) { + if (!someone_adding(cache) && + time_after(jiffies, cache->last_add + 60 * HZ)) { + remove_keys(dev, i, 1); + if (ent->cur > ent->limit) + queue_work(cache->wq, &ent->work); + } else { + queue_delayed_work(cache->wq, &ent->dwork, 60 * HZ); + } + } +} + +static void delayed_cache_work_func(struct work_struct *work) +{ + struct mlx5_cache_ent *ent; + + ent = container_of(work, struct mlx5_cache_ent, dwork.work); + __cache_work_func(ent); +} + +static void cache_work_func(struct work_struct *work) +{ + struct mlx5_cache_ent *ent; + + ent = container_of(work, struct mlx5_cache_ent, work); + __cache_work_func(ent); +} + +static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_ib_mr *mr = NULL; + struct mlx5_cache_ent *ent; + int c; + int i; + + c = order2idx(dev, order); + if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { + mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); + return NULL; + } + + for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) { + ent = &cache->ent[i]; + + mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); + + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, + list); + list_del(&mr->list); + ent->cur--; + spin_unlock(&ent->lock); + if (ent->cur < ent->limit) + queue_work(cache->wq, &ent->work); + break; + } + spin_unlock(&ent->lock); + + queue_work(cache->wq, &ent->work); + + if (mr) + break; + } + + if (!mr) + cache->ent[c].miss++; + + return mr; +} + +static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent; + int shrink = 0; + int c; + + c = order2idx(dev, mr->order); + if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { + mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); + return; + } + ent = &cache->ent[c]; + spin_lock(&ent->lock); + list_add_tail(&mr->list, &ent->head); + ent->cur++; + if (ent->cur > 2 * ent->limit) + shrink = 1; + spin_unlock(&ent->lock); + + if (shrink) + queue_work(cache->wq, &ent->work); +} + +static void clean_keys(struct mlx5_ib_dev *dev, int c) +{ + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent = &cache->ent[c]; + struct mlx5_ib_mr *mr; + int size; + int err; + + while (1) { + spin_lock(&ent->lock); + if (list_empty(&ent->head)) { + spin_unlock(&ent->lock); + return; + } + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->cur--; + ent->size--; + spin_unlock(&ent->lock); + err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); + if (err) { + mlx5_ib_warn(dev, "failed destroy mkey\n"); + } else { + size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40); + dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); + kfree(mr->pas); + kfree(mr); + } + } +} + +static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent; + int i; + + if (!mlx5_debugfs_root) + return 0; + + cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root); + if (!cache->root) + return -ENOMEM; + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + ent = &cache->ent[i]; + sprintf(ent->name, "%d", ent->order); + ent->dir = debugfs_create_dir(ent->name, cache->root); + if (!ent->dir) + return -ENOMEM; + + ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, + &size_fops); + if (!ent->fsize) + return -ENOMEM; + + ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, + &limit_fops); + if (!ent->flimit) + return -ENOMEM; + + ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, + &ent->cur); + if (!ent->fcur) + return -ENOMEM; + + ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, + &ent->miss); + if (!ent->fmiss) + return -ENOMEM; + } + + return 0; +} + +static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->cache.root); +} + +int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_cache_ent *ent; + int limit; + int size; + int err; + int i; + + cache->wq = create_singlethread_workqueue("mkey_cache"); + if (!cache->wq) { + mlx5_ib_warn(dev, "failed to create work queue\n"); + return -ENOMEM; + } + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + INIT_LIST_HEAD(&cache->ent[i].head); + spin_lock_init(&cache->ent[i].lock); + + ent = &cache->ent[i]; + INIT_LIST_HEAD(&ent->head); + spin_lock_init(&ent->lock); + ent->order = i + 2; + ent->dev = dev; + + if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) { + size = dev->mdev.profile->mr_cache[i].size; + limit = dev->mdev.profile->mr_cache[i].limit; + } else { + size = DEF_CACHE_SIZE; + limit = 0; + } + INIT_WORK(&ent->work, cache_work_func); + INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); + ent->limit = limit; + queue_work(cache->wq, &ent->work); + } + + err = mlx5_mr_cache_debugfs_init(dev); + if (err) + mlx5_ib_warn(dev, "cache debugfs failure\n"); + + return 0; +} + +int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) +{ + int i; + + dev->cache.stopped = 1; + destroy_workqueue(dev->cache.wq); + + mlx5_mr_cache_debugfs_cleanup(dev); + + for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) + clean_keys(dev, i); + + return 0; +} + +struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_core_dev *mdev = &dev->mdev; + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_mkey_seg *seg; + struct mlx5_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + seg = &in->seg; + seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA; + seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64); + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + seg->start_addr = 0; + + err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in)); + if (err) + goto err_in; + + kfree(in); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; + +err_in: + kfree(in); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +static int get_octo_len(u64 addr, u64 len, int page_size) +{ + u64 offset; + int npages; + + offset = addr & (page_size - 1); + npages = ALIGN(len + offset, page_size) >> ilog2(page_size); + return (npages + 1) / 2; +} + +static int use_umr(int order) +{ + return order <= 17; +} + +static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, + struct ib_sge *sg, u64 dma, int n, u32 key, + int page_shift, u64 virt_addr, u64 len, + int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_mr *mr = dev->umrc.mr; + + sg->addr = dma; + sg->length = ALIGN(sizeof(u64) * n, 64); + sg->lkey = mr->lkey; + + wr->next = NULL; + wr->send_flags = 0; + wr->sg_list = sg; + if (n) + wr->num_sge = 1; + else + wr->num_sge = 0; + + wr->opcode = MLX5_IB_WR_UMR; + wr->wr.fast_reg.page_list_len = n; + wr->wr.fast_reg.page_shift = page_shift; + wr->wr.fast_reg.rkey = key; + wr->wr.fast_reg.iova_start = virt_addr; + wr->wr.fast_reg.length = len; + wr->wr.fast_reg.access_flags = access_flags; + wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd; +} + +static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, + struct ib_send_wr *wr, u32 key) +{ + wr->send_flags = MLX5_IB_SEND_UMR_UNREG; + wr->opcode = MLX5_IB_WR_UMR; + wr->wr.fast_reg.rkey = key; +} + +void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) +{ + struct mlx5_ib_mr *mr; + struct ib_wc wc; + int err; + + while (1) { + err = ib_poll_cq(cq, 1, &wc); + if (err < 0) { + pr_warn("poll cq error %d\n", err); + return; + } + if (err == 0) + break; + + mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; + mr->status = wc.status; + complete(&mr->done); + } + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); +} + +static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, + u64 virt_addr, u64 len, int npages, + int page_shift, int order, int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct umr_common *umrc = &dev->umrc; + struct ib_send_wr wr, *bad; + struct mlx5_ib_mr *mr; + struct ib_sge sg; + int err; + int i; + + for (i = 0; i < 10; i++) { + mr = alloc_cached_mr(dev, order); + if (mr) + break; + + err = add_keys(dev, order2idx(dev, order), 1); + if (err) { + mlx5_ib_warn(dev, "add_keys failed\n"); + break; + } + } + + if (!mr) + return ERR_PTR(-EAGAIN); + + mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); + + memset(&wr, 0, sizeof(wr)); + wr.wr_id = (u64)(unsigned long)mr; + prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); + + /* We serialize polls so one process does not kidnap another's + * completion. This is not a problem since wr is completed in + * around 1 usec + */ + down(&umrc->sem); + init_completion(&mr->done); + err = ib_post_send(umrc->qp, &wr, &bad); + if (err) { + mlx5_ib_warn(dev, "post send failed, err %d\n", err); + up(&umrc->sem); + goto error; + } + wait_for_completion(&mr->done); + up(&umrc->sem); + + if (mr->status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "reg umr failed\n"); + err = -EFAULT; + goto error; + } + + return mr; + +error: + free_cached_mr(dev, mr); + return ERR_PTR(err); +} + +static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, + u64 length, struct ib_umem *umem, + int npages, int page_shift, + int access_flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_ib_mr *mr; + int inlen; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2; + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_1; + } + mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0); + + in->seg.flags = convert_access(access_flags) | + MLX5_ACCESS_MODE_MTT; + in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); + in->seg.start_addr = cpu_to_be64(virt_addr); + in->seg.len = cpu_to_be64(length); + in->seg.bsfs_octo_size = 0; + in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); + in->seg.log2_page_size = page_shift; + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen); + if (err) { + mlx5_ib_warn(dev, "create mkey failed\n"); + goto err_2; + } + mr->umem = umem; + mlx5_vfree(in); + + mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); + + return mr; + +err_2: + mlx5_vfree(in); + +err_1: + kfree(mr); + + return ERR_PTR(err); +} + +struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_mr *mr = NULL; + struct ib_umem *umem; + int page_shift; + int npages; + int ncont; + int order; + int err; + + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", + start, virt_addr, length); + umem = ib_umem_get(pd->uobject->context, start, length, access_flags, + 0); + if (IS_ERR(umem)) { + mlx5_ib_dbg(dev, "umem get failed\n"); + return (void *)umem; + } + + mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order); + if (!npages) { + mlx5_ib_warn(dev, "avoid zero region\n"); + err = -EINVAL; + goto error; + } + + mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", + npages, ncont, order, page_shift); + + if (use_umr(order)) { + mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, + order, access_flags); + if (PTR_ERR(mr) == -EAGAIN) { + mlx5_ib_dbg(dev, "cache empty for order %d", order); + mr = NULL; + } + } + + if (!mr) + mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift, + access_flags); + + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto error; + } + + mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key); + + mr->umem = umem; + mr->npages = npages; + spin_lock(&dev->mr_lock); + dev->mdev.priv.reg_pages += npages; + spin_unlock(&dev->mr_lock); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + + return &mr->ibmr; + +error: + ib_umem_release(umem); + return ERR_PTR(err); +} + +static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) +{ + struct umr_common *umrc = &dev->umrc; + struct ib_send_wr wr, *bad; + int err; + + memset(&wr, 0, sizeof(wr)); + wr.wr_id = (u64)(unsigned long)mr; + prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); + + down(&umrc->sem); + init_completion(&mr->done); + err = ib_post_send(umrc->qp, &wr, &bad); + if (err) { + up(&umrc->sem); + mlx5_ib_dbg(dev, "err %d\n", err); + goto error; + } + wait_for_completion(&mr->done); + up(&umrc->sem); + if (mr->status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "unreg umr failed\n"); + err = -EFAULT; + goto error; + } + return 0; + +error: + return err; +} + +int mlx5_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibmr->device); + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct ib_umem *umem = mr->umem; + int npages = mr->npages; + int umred = mr->umred; + int err; + + if (!umred) { + err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); + if (err) { + mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", + mr->mmr.key, err); + return err; + } + } else { + err = unreg_umr(dev, mr); + if (err) { + mlx5_ib_warn(dev, "failed unregister\n"); + return err; + } + free_cached_mr(dev, mr); + } + + if (umem) { + ib_umem_release(umem); + spin_lock(&dev->mr_lock); + dev->mdev.priv.reg_pages -= npages; + spin_unlock(&dev->mr_lock); + } + + if (!umred) + kfree(mr); + + return 0; +} + +struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd, + int max_page_list_len) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + in->seg.status = 1 << 6; /* free */ + in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT; + in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); + /* + * TBD not needed - issue 197292 */ + in->seg.log2_page_size = PAGE_SHIFT; + + err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in)); + kfree(in); + if (err) + goto err_free; + + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; + +err_free: + kfree(mr); + return ERR_PTR(err); +} + +struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, + int page_list_len) +{ + struct mlx5_ib_fast_reg_page_list *mfrpl; + int size = page_list_len * sizeof(u64); + + mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL); + if (!mfrpl) + return ERR_PTR(-ENOMEM); + + mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); + if (!mfrpl->ibfrpl.page_list) + goto err_free; + + mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device, + size, &mfrpl->map, + GFP_KERNEL); + if (!mfrpl->mapped_page_list) + goto err_free; + + WARN_ON(mfrpl->map & 0x3f); + + return &mfrpl->ibfrpl; + +err_free: + kfree(mfrpl->ibfrpl.page_list); + kfree(mfrpl); + return ERR_PTR(-ENOMEM); +} + +void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) +{ + struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); + struct mlx5_ib_dev *dev = to_mdev(page_list->device); + int size = page_list->max_page_list_len * sizeof(u64); + + dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list, + mfrpl->map); + kfree(mfrpl->ibfrpl.page_list); + kfree(mfrpl); +} diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c new file mode 100644 index 000000000000..16ac54c9819f --- /dev/null +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -0,0 +1,2524 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "mlx5_ib.h" +#include "user.h" + +/* not supported currently */ +static int wq_signature; + +enum { + MLX5_IB_ACK_REQ_FREQ = 8, +}; + +enum { + MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, + MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, + MLX5_IB_LINK_TYPE_IB = 0, + MLX5_IB_LINK_TYPE_ETH = 1 +}; + +enum { + MLX5_IB_SQ_STRIDE = 6, + MLX5_IB_CACHE_LINE_SIZE = 64, +}; + +static const u32 mlx5_ib_opcode[] = { + [IB_WR_SEND] = MLX5_OPCODE_SEND, + [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, + [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, + [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, + [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, + [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, + [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, + [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR, + [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, + [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, + [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, +}; + +struct umr_wr { + u64 virt_addr; + struct ib_pd *pd; + unsigned int page_shift; + unsigned int npages; + u32 length; + int access_flags; + u32 mkey; +}; + +static int is_qp0(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_SMI; +} + +static int is_qp1(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_GSI; +} + +static int is_sqp(enum ib_qp_type qp_type) +{ + return is_qp0(qp_type) || is_qp1(qp_type); +} + +static void *get_wqe(struct mlx5_ib_qp *qp, int offset) +{ + return mlx5_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); +} + +static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) +{ + struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; + struct ib_event event; + + if (type == MLX5_EVENT_TYPE_PATH_MIG) + to_mibqp(qp)->port = to_mibqp(qp)->alt_port; + + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + switch (type) { + case MLX5_EVENT_TYPE_PATH_MIG: + event.event = IB_EVENT_PATH_MIG; + break; + case MLX5_EVENT_TYPE_COMM_EST: + event.event = IB_EVENT_COMM_EST; + break; + case MLX5_EVENT_TYPE_SQ_DRAINED: + event.event = IB_EVENT_SQ_DRAINED; + break; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + break; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + event.event = IB_EVENT_PATH_MIG_ERR; + break; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); + return; + } + + ibqp->event_handler(&event, ibqp->qp_context); + } +} + +static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, + int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) +{ + int wqe_size; + int wq_size; + + /* Sanity check RQ size before proceeding */ + if (cap->max_recv_wr > dev->mdev.caps.max_wqes) + return -EINVAL; + + if (!has_rq) { + qp->rq.max_gs = 0; + qp->rq.wqe_cnt = 0; + qp->rq.wqe_shift = 0; + } else { + if (ucmd) { + qp->rq.wqe_cnt = ucmd->rq_wqe_count; + qp->rq.wqe_shift = ucmd->rq_wqe_shift; + qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; + qp->rq.max_post = qp->rq.wqe_cnt; + } else { + wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; + wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); + wqe_size = roundup_pow_of_two(wqe_size); + wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; + wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); + qp->rq.wqe_cnt = wq_size / wqe_size; + if (wqe_size > dev->mdev.caps.max_rq_desc_sz) { + mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", + wqe_size, + dev->mdev.caps.max_rq_desc_sz); + return -EINVAL; + } + qp->rq.wqe_shift = ilog2(wqe_size); + qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; + qp->rq.max_post = qp->rq.wqe_cnt; + } + } + + return 0; +} + +static int sq_overhead(enum ib_qp_type qp_type) +{ + int size; + + switch (qp_type) { + case IB_QPT_XRC_INI: + size = sizeof(struct mlx5_wqe_xrc_seg); + /* fall through */ + case IB_QPT_RC: + size += sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_atomic_seg) + + sizeof(struct mlx5_wqe_raddr_seg); + break; + + case IB_QPT_UC: + size = sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_raddr_seg); + break; + + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + size = sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_datagram_seg); + break; + + case MLX5_IB_QPT_REG_UMR: + size = sizeof(struct mlx5_wqe_ctrl_seg) + + sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg); + break; + + default: + return -EINVAL; + } + + return size; +} + +static int calc_send_wqe(struct ib_qp_init_attr *attr) +{ + int inl_size = 0; + int size; + + size = sq_overhead(attr->qp_type); + if (size < 0) + return size; + + if (attr->cap.max_inline_data) { + inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + + attr->cap.max_inline_data; + } + + size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); + + return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); +} + +static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, + struct mlx5_ib_qp *qp) +{ + int wqe_size; + int wq_size; + + if (!attr->cap.max_send_wr) + return 0; + + wqe_size = calc_send_wqe(attr); + mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); + if (wqe_size < 0) + return wqe_size; + + if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { + mlx5_ib_dbg(dev, "\n"); + return -EINVAL; + } + + qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - + sizeof(struct mlx5_wqe_inline_seg); + attr->cap.max_inline_data = qp->max_inline_data; + + wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); + qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; + qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); + qp->sq.max_gs = attr->cap.max_send_sge; + qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); + + return wq_size; +} + +static int set_user_buf_size(struct mlx5_ib_dev *dev, + struct mlx5_ib_qp *qp, + struct mlx5_ib_create_qp *ucmd) +{ + int desc_sz = 1 << qp->sq.wqe_shift; + + if (desc_sz > dev->mdev.caps.max_sq_desc_sz) { + mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", + desc_sz, dev->mdev.caps.max_sq_desc_sz); + return -EINVAL; + } + + if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) { + mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n", + ucmd->sq_wqe_count, ucmd->sq_wqe_count); + return -EINVAL; + } + + qp->sq.wqe_cnt = ucmd->sq_wqe_count; + + if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { + mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", + qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); + return -EINVAL; + } + + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + + (qp->sq.wqe_cnt << 6); + + return 0; +} + +static int qp_has_rq(struct ib_qp_init_attr *attr) +{ + if (attr->qp_type == IB_QPT_XRC_INI || + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + attr->qp_type == MLX5_IB_QPT_REG_UMR || + !attr->cap.max_recv_wr) + return 0; + + return 1; +} + +static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) +{ + int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; + int start_uuar; + int i; + + start_uuar = nuuars - uuari->num_low_latency_uuars; + for (i = start_uuar; i < nuuars; i++) { + if (!test_bit(i, uuari->bitmap)) { + set_bit(i, uuari->bitmap); + uuari->count[i]++; + return i; + } + } + + return -ENOMEM; +} + +static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) +{ + int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; + int minidx = 1; + int uuarn; + int end; + int i; + + end = nuuars - uuari->num_low_latency_uuars; + + for (i = 1; i < end; i++) { + uuarn = i & 3; + if (uuarn == 2 || uuarn == 3) + continue; + + if (uuari->count[i] < uuari->count[minidx]) + minidx = i; + } + + uuari->count[minidx]++; + return minidx; +} + +static int alloc_uuar(struct mlx5_uuar_info *uuari, + enum mlx5_ib_latency_class lat) +{ + int uuarn = -EINVAL; + + mutex_lock(&uuari->lock); + switch (lat) { + case MLX5_IB_LATENCY_CLASS_LOW: + uuarn = 0; + uuari->count[uuarn]++; + break; + + case MLX5_IB_LATENCY_CLASS_MEDIUM: + uuarn = alloc_med_class_uuar(uuari); + break; + + case MLX5_IB_LATENCY_CLASS_HIGH: + uuarn = alloc_high_class_uuar(uuari); + break; + + case MLX5_IB_LATENCY_CLASS_FAST_PATH: + uuarn = 2; + break; + } + mutex_unlock(&uuari->lock); + + return uuarn; +} + +static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) +{ + clear_bit(uuarn, uuari->bitmap); + --uuari->count[uuarn]; +} + +static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) +{ + clear_bit(uuarn, uuari->bitmap); + --uuari->count[uuarn]; +} + +static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) +{ + int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; + int high_uuar = nuuars - uuari->num_low_latency_uuars; + + mutex_lock(&uuari->lock); + if (uuarn == 0) { + --uuari->count[uuarn]; + goto out; + } + + if (uuarn < high_uuar) { + free_med_class_uuar(uuari, uuarn); + goto out; + } + + free_high_class_uuar(uuari, uuarn); + +out: + mutex_unlock(&uuari->lock); +} + +static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return MLX5_QP_STATE_RST; + case IB_QPS_INIT: return MLX5_QP_STATE_INIT; + case IB_QPS_RTR: return MLX5_QP_STATE_RTR; + case IB_QPS_RTS: return MLX5_QP_STATE_RTS; + case IB_QPS_SQD: return MLX5_QP_STATE_SQD; + case IB_QPS_SQE: return MLX5_QP_STATE_SQER; + case IB_QPS_ERR: return MLX5_QP_STATE_ERR; + default: return -1; + } +} + +static int to_mlx5_st(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_RC: return MLX5_QP_ST_RC; + case IB_QPT_UC: return MLX5_QP_ST_UC; + case IB_QPT_UD: return MLX5_QP_ST_UD; + case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; + case IB_QPT_XRC_INI: + case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; + case IB_QPT_SMI: return MLX5_QP_ST_QP0; + case IB_QPT_GSI: return MLX5_QP_ST_QP1; + case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; + case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: return -EINVAL; + } +} + +static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) +{ + return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; +} + +static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct mlx5_ib_qp *qp, struct ib_udata *udata, + struct mlx5_create_qp_mbox_in **in, + struct mlx5_ib_create_qp_resp *resp, int *inlen) +{ + struct mlx5_ib_ucontext *context; + struct mlx5_ib_create_qp ucmd; + int page_shift; + int uar_index; + int npages; + u32 offset; + int uuarn; + int ncont; + int err; + + err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + if (err) { + mlx5_ib_dbg(dev, "copy failed\n"); + return err; + } + + context = to_mucontext(pd->uobject->context); + /* + * TBD: should come from the verbs when we have the API + */ + uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); + if (uuarn < 0) { + mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); + mlx5_ib_dbg(dev, "reverting to high latency\n"); + uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); + if (uuarn < 0) { + mlx5_ib_dbg(dev, "uuar allocation failed\n"); + return uuarn; + } + } + + uar_index = uuarn_to_uar_index(&context->uuari, uuarn); + mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); + + err = set_user_buf_size(dev, qp, &ucmd); + if (err) + goto err_uuar; + + qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, + qp->buf_size, 0, 0); + if (IS_ERR(qp->umem)) { + mlx5_ib_dbg(dev, "umem_get failed\n"); + err = PTR_ERR(qp->umem); + goto err_uuar; + } + + mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); + if (err) { + mlx5_ib_warn(dev, "bad offset\n"); + goto err_umem; + } + mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", + ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); + + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; + *in = mlx5_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_umem; + } + mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); + (*in)->ctx.log_pg_sz_remote_qpn = + cpu_to_be32((page_shift - PAGE_SHIFT) << 24); + (*in)->ctx.params2 = cpu_to_be32(offset << 6); + + (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); + resp->uuar_index = uuarn; + qp->uuarn = uuarn; + + err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); + if (err) { + mlx5_ib_dbg(dev, "map failed\n"); + goto err_free; + } + + err = ib_copy_to_udata(udata, resp, sizeof(*resp)); + if (err) { + mlx5_ib_dbg(dev, "copy failed\n"); + goto err_unmap; + } + qp->create_type = MLX5_QP_USER; + + return 0; + +err_unmap: + mlx5_ib_db_unmap_user(context, &qp->db); + +err_free: + mlx5_vfree(*in); + +err_umem: + ib_umem_release(qp->umem); + +err_uuar: + free_uuar(&context->uuari, uuarn); + return err; +} + +static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_ucontext *context; + + context = to_mucontext(pd->uobject->context); + mlx5_ib_db_unmap_user(context, &qp->db); + ib_umem_release(qp->umem); + free_uuar(&context->uuari, qp->uuarn); +} + +static int create_kernel_qp(struct mlx5_ib_dev *dev, + struct ib_qp_init_attr *init_attr, + struct mlx5_ib_qp *qp, + struct mlx5_create_qp_mbox_in **in, int *inlen) +{ + enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; + struct mlx5_uuar_info *uuari; + int uar_index; + int uuarn; + int err; + + uuari = &dev->mdev.priv.uuari; + if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) + qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; + + if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) + lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; + + uuarn = alloc_uuar(uuari, lc); + if (uuarn < 0) { + mlx5_ib_dbg(dev, "\n"); + return -ENOMEM; + } + + qp->bf = &uuari->bfs[uuarn]; + uar_index = qp->bf->uar->index; + + err = calc_sq_size(dev, init_attr, qp); + if (err < 0) { + mlx5_ib_dbg(dev, "err %d\n", err); + goto err_uuar; + } + + qp->rq.offset = 0; + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; + qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); + + err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + goto err_uuar; + } + + qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; + *in = mlx5_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); + (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - PAGE_SHIFT) << 24); + /* Set "fast registration enabled" for all kernel QPs */ + (*in)->ctx.params1 |= cpu_to_be32(1 << 11); + (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); + + mlx5_fill_page_array(&qp->buf, (*in)->pas); + + err = mlx5_db_alloc(&dev->mdev, &qp->db); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + goto err_free; + } + + qp->db.db[0] = 0; + qp->db.db[1] = 0; + + qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); + qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); + qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); + qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); + qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); + + if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || + !qp->sq.w_list || !qp->sq.wqe_head) { + err = -ENOMEM; + goto err_wrid; + } + qp->create_type = MLX5_QP_KERNEL; + + return 0; + +err_wrid: + mlx5_db_free(&dev->mdev, &qp->db); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + +err_free: + mlx5_vfree(*in); + +err_buf: + mlx5_buf_free(&dev->mdev, &qp->buf); + +err_uuar: + free_uuar(&dev->mdev.priv.uuari, uuarn); + return err; +} + +static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) +{ + mlx5_db_free(&dev->mdev, &qp->db); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + mlx5_buf_free(&dev->mdev, &qp->buf); + free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn); +} + +static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) +{ + if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || + (attr->qp_type == IB_QPT_XRC_INI)) + return cpu_to_be32(MLX5_SRQ_RQ); + else if (!qp->has_rq) + return cpu_to_be32(MLX5_ZERO_LEN_RQ); + else + return cpu_to_be32(MLX5_NON_ZERO_RQ); +} + +static int is_connected(enum ib_qp_type qp_type) +{ + if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) + return 1; + + return 0; +} + +static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_resources *devr = &dev->devr; + struct mlx5_ib_create_qp_resp resp; + struct mlx5_create_qp_mbox_in *in; + struct mlx5_ib_create_qp ucmd; + int inlen = sizeof(*in); + int err; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; + + if (pd && pd->uobject) { + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + mlx5_ib_dbg(dev, "copy failed\n"); + return -EFAULT; + } + + qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); + qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); + } else { + qp->wq_sig = !!wq_signature; + } + + qp->has_rq = qp_has_rq(init_attr); + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, + qp, (pd && pd->uobject) ? &ucmd : NULL); + if (err) { + mlx5_ib_dbg(dev, "err %d\n", err); + return err; + } + + if (pd) { + if (pd->uobject) { + mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); + if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || + ucmd.rq_wqe_count != qp->rq.wqe_cnt) { + mlx5_ib_dbg(dev, "invalid rq params\n"); + return -EINVAL; + } + if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) { + mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", + ucmd.sq_wqe_count, dev->mdev.caps.max_wqes); + return -EINVAL; + } + err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); + if (err) + mlx5_ib_dbg(dev, "err %d\n", err); + } else { + err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); + if (err) + mlx5_ib_dbg(dev, "err %d\n", err); + else + qp->pa_lkey = to_mpd(pd)->pa_lkey; + } + + if (err) + return err; + } else { + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + qp->create_type = MLX5_QP_EMPTY; + } + + if (is_sqp(init_attr->qp_type)) + qp->port = init_attr->port_num; + + in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | + MLX5_QP_PM_MIGRATED << 11); + + if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) + in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); + else + in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); + + if (qp->wq_sig) + in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); + + if (qp->scat_cqe && is_connected(init_attr->qp_type)) { + int rcqe_sz; + int scqe_sz; + + rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq); + scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); + + if (rcqe_sz == 128) + in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; + else + in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { + if (scqe_sz == 128) + in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; + else + in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; + } + } + + if (qp->rq.wqe_cnt) { + in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); + in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; + } + + in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); + + if (qp->sq.wqe_cnt) + in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); + else + in->ctx.sq_crq_size |= cpu_to_be16(0x8000); + + /* Set default resources */ + switch (init_attr->qp_type) { + case IB_QPT_XRC_TGT: + in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); + in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); + break; + case IB_QPT_XRC_INI: + in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); + in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + break; + default: + if (init_attr->srq) { + in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); + } else { + in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); + in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + } + } + + if (init_attr->send_cq) + in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); + + if (init_attr->recv_cq) + in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); + + in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); + + err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen); + if (err) { + mlx5_ib_dbg(dev, "create qp failed\n"); + goto err_create; + } + + mlx5_vfree(in); + /* Hardware wants QPN written in big-endian order (after + * shifting) for send doorbell. Precompute this value to save + * a little bit when posting sends. + */ + qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); + + qp->mqp.event = mlx5_ib_qp_event; + + return 0; + +err_create: + if (qp->create_type == MLX5_QP_USER) + destroy_qp_user(pd, qp); + else if (qp->create_type == MLX5_QP_KERNEL) + destroy_qp_kernel(dev, qp); + + mlx5_vfree(in); + return err; +} + +static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) + __acquires(&send_cq->lock) __acquires(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, + SINGLE_DEPTH_NESTING); + } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, + SINGLE_DEPTH_NESTING); + } + } else { + spin_lock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_lock_irq(&recv_cq->lock); + } +} + +static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) + __releases(&send_cq->lock) __releases(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { + __release(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } + } else { + spin_unlock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_unlock_irq(&recv_cq->lock); + } +} + +static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) +{ + return to_mpd(qp->ibqp.pd); +} + +static void get_cqs(struct mlx5_ib_qp *qp, + struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) +{ + switch (qp->ibqp.qp_type) { + case IB_QPT_XRC_TGT: + *send_cq = NULL; + *recv_cq = NULL; + break; + case MLX5_IB_QPT_REG_UMR: + case IB_QPT_XRC_INI: + *send_cq = to_mcq(qp->ibqp.send_cq); + *recv_cq = NULL; + break; + + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + *send_cq = to_mcq(qp->ibqp.send_cq); + *recv_cq = to_mcq(qp->ibqp.recv_cq); + break; + + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + *send_cq = NULL; + *recv_cq = NULL; + break; + } +} + +static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) +{ + struct mlx5_ib_cq *send_cq, *recv_cq; + struct mlx5_modify_qp_mbox_in *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return; + if (qp->state != IB_QPS_RESET) + if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state), + MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) + mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", + qp->mqp.qpn); + + get_cqs(qp, &send_cq, &recv_cq); + + if (qp->create_type == MLX5_QP_KERNEL) { + mlx5_ib_lock_cqs(send_cq, recv_cq); + __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + if (send_cq != recv_cq) + __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); + mlx5_ib_unlock_cqs(send_cq, recv_cq); + } + + err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp); + if (err) + mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); + kfree(in); + + + if (qp->create_type == MLX5_QP_KERNEL) + destroy_qp_kernel(dev, qp); + else if (qp->create_type == MLX5_QP_USER) + destroy_qp_user(&get_pd(qp)->ibpd, qp); +} + +static const char *ib_qp_type_str(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_SMI: + return "IB_QPT_SMI"; + case IB_QPT_GSI: + return "IB_QPT_GSI"; + case IB_QPT_RC: + return "IB_QPT_RC"; + case IB_QPT_UC: + return "IB_QPT_UC"; + case IB_QPT_UD: + return "IB_QPT_UD"; + case IB_QPT_RAW_IPV6: + return "IB_QPT_RAW_IPV6"; + case IB_QPT_RAW_ETHERTYPE: + return "IB_QPT_RAW_ETHERTYPE"; + case IB_QPT_XRC_INI: + return "IB_QPT_XRC_INI"; + case IB_QPT_XRC_TGT: + return "IB_QPT_XRC_TGT"; + case IB_QPT_RAW_PACKET: + return "IB_QPT_RAW_PACKET"; + case MLX5_IB_QPT_REG_UMR: + return "MLX5_IB_QPT_REG_UMR"; + case IB_QPT_MAX: + default: + return "Invalid QP type"; + } +} + +struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev; + struct mlx5_ib_qp *qp; + u16 xrcdn = 0; + int err; + + if (pd) { + dev = to_mdev(pd->device); + } else { + /* being cautious here */ + if (init_attr->qp_type != IB_QPT_XRC_TGT && + init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { + pr_warn("%s: no PD for transport %s\n", __func__, + ib_qp_type_str(init_attr->qp_type)); + return ERR_PTR(-EINVAL); + } + dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); + } + + switch (init_attr->qp_type) { + case IB_QPT_XRC_TGT: + case IB_QPT_XRC_INI: + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { + mlx5_ib_dbg(dev, "XRC not supported\n"); + return ERR_PTR(-ENOSYS); + } + init_attr->recv_cq = NULL; + if (init_attr->qp_type == IB_QPT_XRC_TGT) { + xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; + init_attr->send_cq = NULL; + } + + /* fall through */ + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + case MLX5_IB_QPT_REG_UMR: + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + err = create_qp_common(dev, pd, init_attr, udata, qp); + if (err) { + mlx5_ib_dbg(dev, "create_qp_common failed\n"); + kfree(qp); + return ERR_PTR(err); + } + + if (is_qp0(init_attr->qp_type)) + qp->ibqp.qp_num = 0; + else if (is_qp1(init_attr->qp_type)) + qp->ibqp.qp_num = 1; + else + qp->ibqp.qp_num = qp->mqp.qpn; + + mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", + qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, + to_mcq(init_attr->send_cq)->mcq.cqn); + + qp->xrcdn = xrcdn; + + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + mlx5_ib_dbg(dev, "unsupported qp type %d\n", + init_attr->qp_type); + /* Don't support raw QPs */ + return ERR_PTR(-EINVAL); + } + + return &qp->ibqp; +} + +int mlx5_ib_destroy_qp(struct ib_qp *qp) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->device); + struct mlx5_ib_qp *mqp = to_mqp(qp); + + destroy_qp_common(dev, mqp); + + kfree(mqp); + + return 0; +} + +static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, + int attr_mask) +{ + u32 hw_access_flags = 0; + u8 dest_rd_atomic; + u32 access_flags; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + dest_rd_atomic = attr->max_dest_rd_atomic; + else + dest_rd_atomic = qp->resp_depth; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + access_flags = attr->qp_access_flags; + else + access_flags = qp->atomic_rd_en; + + if (!dest_rd_atomic) + access_flags &= IB_ACCESS_REMOTE_WRITE; + + if (access_flags & IB_ACCESS_REMOTE_READ) + hw_access_flags |= MLX5_QP_BIT_RRE; + if (access_flags & IB_ACCESS_REMOTE_ATOMIC) + hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX); + if (access_flags & IB_ACCESS_REMOTE_WRITE) + hw_access_flags |= MLX5_QP_BIT_RWE; + + return cpu_to_be32(hw_access_flags); +} + +enum { + MLX5_PATH_FLAG_FL = 1 << 0, + MLX5_PATH_FLAG_FREE_AR = 1 << 1, + MLX5_PATH_FLAG_COUNTER = 1 << 2, +}; + +static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) +{ + if (rate == IB_RATE_PORT_CURRENT) { + return 0; + } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { + return -EINVAL; + } else { + while (rate != IB_RATE_2_5_GBPS && + !(1 << (rate + MLX5_STAT_RATE_OFFSET) & + dev->mdev.caps.stat_rate_support)) + --rate; + } + + return rate + MLX5_STAT_RATE_OFFSET; +} + +static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, + struct mlx5_qp_path *path, u8 port, int attr_mask, + u32 path_flags, const struct ib_qp_attr *attr) +{ + int err; + + path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; + path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; + + if (attr_mask & IB_QP_PKEY_INDEX) + path->pkey_index = attr->pkey_index; + + path->grh_mlid = ah->src_path_bits & 0x7f; + path->rlid = cpu_to_be16(ah->dlid); + + if (ah->ah_flags & IB_AH_GRH) { + path->grh_mlid |= 1 << 7; + path->mgid_index = ah->grh.sgid_index; + path->hop_limit = ah->grh.hop_limit; + path->tclass_flowlabel = + cpu_to_be32((ah->grh.traffic_class << 20) | + (ah->grh.flow_label)); + memcpy(path->rgid, ah->grh.dgid.raw, 16); + } + + err = ib_rate_to_mlx5(dev, ah->static_rate); + if (err < 0) + return err; + path->static_rate = err; + path->port = port; + + if (ah->ah_flags & IB_AH_GRH) { + if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) { + pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", + ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len); + return -EINVAL; + } + + path->grh_mlid |= 1 << 7; + path->mgid_index = ah->grh.sgid_index; + path->hop_limit = ah->grh.hop_limit; + path->tclass_flowlabel = + cpu_to_be32((ah->grh.traffic_class << 20) | + (ah->grh.flow_label)); + memcpy(path->rgid, ah->grh.dgid.raw, 16); + } + + if (attr_mask & IB_QP_TIMEOUT) + path->ackto_lt = attr->timeout << 3; + + path->sl = ah->sl & 0xf; + + return 0; +} + +static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_PRI_PORT, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_PRI_PORT, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_Q_KEY | + MLX5_QP_OPTPAR_PRI_PORT, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PKEY_INDEX, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_Q_KEY, + [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | + MLX5_QP_OPTPAR_Q_KEY, + }, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PM_STATE | + MLX5_QP_OPTPAR_RNR_TIMEOUT, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PM_STATE, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, + }, + }, + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE | + MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_RNR_TIMEOUT | + MLX5_QP_OPTPAR_PM_STATE, + [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_PM_STATE, + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | + MLX5_QP_OPTPAR_SRQN | + MLX5_QP_OPTPAR_CQN_RCV, + }, + }, + [MLX5_QP_STATE_SQER] = { + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, + [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, + }, + }, +}; + +static int ib_nr_to_mlx5_nr(int ib_mask) +{ + switch (ib_mask) { + case IB_QP_STATE: + return 0; + case IB_QP_CUR_STATE: + return 0; + case IB_QP_EN_SQD_ASYNC_NOTIFY: + return 0; + case IB_QP_ACCESS_FLAGS: + return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | + MLX5_QP_OPTPAR_RAE; + case IB_QP_PKEY_INDEX: + return MLX5_QP_OPTPAR_PKEY_INDEX; + case IB_QP_PORT: + return MLX5_QP_OPTPAR_PRI_PORT; + case IB_QP_QKEY: + return MLX5_QP_OPTPAR_Q_KEY; + case IB_QP_AV: + return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | + MLX5_QP_OPTPAR_PRI_PORT; + case IB_QP_PATH_MTU: + return 0; + case IB_QP_TIMEOUT: + return MLX5_QP_OPTPAR_ACK_TIMEOUT; + case IB_QP_RETRY_CNT: + return MLX5_QP_OPTPAR_RETRY_COUNT; + case IB_QP_RNR_RETRY: + return MLX5_QP_OPTPAR_RNR_RETRY; + case IB_QP_RQ_PSN: + return 0; + case IB_QP_MAX_QP_RD_ATOMIC: + return MLX5_QP_OPTPAR_SRA_MAX; + case IB_QP_ALT_PATH: + return MLX5_QP_OPTPAR_ALT_ADDR_PATH; + case IB_QP_MIN_RNR_TIMER: + return MLX5_QP_OPTPAR_RNR_TIMEOUT; + case IB_QP_SQ_PSN: + return 0; + case IB_QP_MAX_DEST_RD_ATOMIC: + return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | + MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; + case IB_QP_PATH_MIG_STATE: + return MLX5_QP_OPTPAR_PM_STATE; + case IB_QP_CAP: + return 0; + case IB_QP_DEST_QPN: + return 0; + } + return 0; +} + +static int ib_mask_to_mlx5_opt(int ib_mask) +{ + int result = 0; + int i; + + for (i = 0; i < 8 * sizeof(int); i++) { + if ((1 << i) & ib_mask) + result |= ib_nr_to_mlx5_nr(1 << i); + } + + return result; +} + +static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state cur_state, enum ib_qp_state new_state) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_ib_cq *send_cq, *recv_cq; + struct mlx5_qp_context *context; + struct mlx5_modify_qp_mbox_in *in; + struct mlx5_ib_pd *pd; + enum mlx5_qp_state mlx5_cur, mlx5_new; + enum mlx5_qp_optpar optpar; + int sqd_event; + int mlx5_st; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + context = &in->ctx; + err = to_mlx5_st(ibqp->qp_type); + if (err < 0) + goto out; + + context->flags = cpu_to_be32(err << 16); + + if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { + context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); + } else { + switch (attr->path_mig_state) { + case IB_MIG_MIGRATED: + context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); + break; + case IB_MIG_REARM: + context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); + break; + case IB_MIG_ARMED: + context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); + break; + } + } + + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) { + context->mtu_msgmax = (IB_MTU_256 << 5) | 8; + } else if (ibqp->qp_type == IB_QPT_UD || + ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { + context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; + } else if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_256 || + attr->path_mtu > IB_MTU_4096) { + mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); + err = -EINVAL; + goto out; + } + context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg; + } + + if (attr_mask & IB_QP_DEST_QPN) + context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); + + if (attr_mask & IB_QP_PKEY_INDEX) + context->pri_path.pkey_index = attr->pkey_index; + + /* todo implement counter_index functionality */ + + if (is_sqp(ibqp->qp_type)) + context->pri_path.port = qp->port; + + if (attr_mask & IB_QP_PORT) + context->pri_path.port = attr->port_num; + + if (attr_mask & IB_QP_AV) { + err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path, + attr_mask & IB_QP_PORT ? attr->port_num : qp->port, + attr_mask, 0, attr); + if (err) + goto out; + } + + if (attr_mask & IB_QP_TIMEOUT) + context->pri_path.ackto_lt |= attr->timeout << 3; + + if (attr_mask & IB_QP_ALT_PATH) { + err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path, + attr->alt_port_num, attr_mask, 0, attr); + if (err) + goto out; + } + + pd = get_pd(qp); + get_cqs(qp, &send_cq, &recv_cq); + + context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); + context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; + context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; + context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); + + if (attr_mask & IB_QP_RNR_RETRY) + context->params1 |= cpu_to_be32(attr->rnr_retry << 13); + + if (attr_mask & IB_QP_RETRY_CNT) + context->params1 |= cpu_to_be32(attr->retry_cnt << 16); + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic) + context->params1 |= + cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); + } + + if (attr_mask & IB_QP_SQ_PSN) + context->next_send_psn = cpu_to_be32(attr->sq_psn); + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic) + context->params2 |= + cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); + } + + if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) + context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); + + if (attr_mask & IB_QP_MIN_RNR_TIMER) + context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); + + if (attr_mask & IB_QP_RQ_PSN) + context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); + + if (attr_mask & IB_QP_QKEY) + context->qkey = cpu_to_be32(attr->qkey); + + if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + context->db_rec_addr = cpu_to_be64(qp->db.dma); + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) + sqd_event = 1; + else + sqd_event = 0; + + if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) + context->sq_crq_size |= cpu_to_be16(1 << 4); + + + mlx5_cur = to_mlx5_state(cur_state); + mlx5_new = to_mlx5_state(new_state); + mlx5_st = to_mlx5_st(ibqp->qp_type); + if (mlx5_cur < 0 || mlx5_new < 0 || mlx5_st < 0) + goto out; + + optpar = ib_mask_to_mlx5_opt(attr_mask); + optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; + in->optparam = cpu_to_be32(optpar); + err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state), + to_mlx5_state(new_state), in, sqd_event, + &qp->mqp); + if (err) + goto out; + + qp->state = new_state; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) + qp->port = attr->port_num; + if (attr_mask & IB_QP_ALT_PATH) + qp->alt_port = attr->alt_port_num; + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IB_QPS_RESET && !ibqp->uobject) { + mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, + ibqp->srq ? to_msrq(ibqp->srq) : NULL); + if (send_cq != recv_cq) + mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq.cur_post = 0; + qp->sq.last_poll = 0; + qp->db.db[MLX5_RCV_DBR] = 0; + qp->db.db[MLX5_SND_DBR] = 0; + } + +out: + kfree(in); + return err; +} + +int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + enum ib_qp_state cur_state, new_state; + int err = -EINVAL; + int port; + + mutex_lock(&qp->mutex); + + cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && + !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) + goto out; + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports)) + goto out; + + if (attr_mask & IB_QP_PKEY_INDEX) { + port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len) + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp) + goto out; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp) + goto out; + + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + err = 0; + goto out; + } + + err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) +{ + struct mlx5_ib_cq *cq; + unsigned cur; + + cur = wq->head - wq->tail; + if (likely(cur + nreq < wq->max_post)) + return 0; + + cq = to_mcq(ib_cq); + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return cur + nreq >= wq->max_post; +} + +static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, + u64 remote_addr, u32 rkey) +{ + rseg->raddr = cpu_to_be64(remote_addr); + rseg->rkey = cpu_to_be32(rkey); + rseg->reserved = 0; +} + +static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr) +{ + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); + } else { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare = 0; + } +} + +static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg, + struct ib_send_wr *wr) +{ + aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); + aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); +} + +static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, + struct ib_send_wr *wr) +{ + memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); + dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); + dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); +} + +static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) +{ + dseg->byte_count = cpu_to_be32(sg->length); + dseg->lkey = cpu_to_be32(sg->lkey); + dseg->addr = cpu_to_be64(sg->addr); +} + +static __be16 get_klm_octo(int npages) +{ + return cpu_to_be16(ALIGN(npages, 8) / 2); +} + +static __be64 frwr_mkey_mask(void) +{ + u64 result; + + result = MLX5_MKEY_MASK_LEN | + MLX5_MKEY_MASK_PAGE_SIZE | + MLX5_MKEY_MASK_START_ADDR | + MLX5_MKEY_MASK_EN_RINVAL | + MLX5_MKEY_MASK_KEY | + MLX5_MKEY_MASK_LR | + MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_RR | + MLX5_MKEY_MASK_RW | + MLX5_MKEY_MASK_A | + MLX5_MKEY_MASK_SMALL_FENCE | + MLX5_MKEY_MASK_FREE; + + return cpu_to_be64(result); +} + +static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, + struct ib_send_wr *wr, int li) +{ + memset(umr, 0, sizeof(*umr)); + + if (li) { + umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + umr->flags = 1 << 7; + return; + } + + umr->flags = (1 << 5); /* fail if not free */ + umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); + umr->mkey_mask = frwr_mkey_mask(); +} + +static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, + struct ib_send_wr *wr) +{ + struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg; + u64 mask; + + memset(umr, 0, sizeof(*umr)); + + if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { + umr->flags = 1 << 5; /* fail if not free */ + umr->klm_octowords = get_klm_octo(umrwr->npages); + mask = MLX5_MKEY_MASK_LEN | + MLX5_MKEY_MASK_PAGE_SIZE | + MLX5_MKEY_MASK_START_ADDR | + MLX5_MKEY_MASK_PD | + MLX5_MKEY_MASK_LR | + MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_RR | + MLX5_MKEY_MASK_RW | + MLX5_MKEY_MASK_A | + MLX5_MKEY_MASK_FREE; + umr->mkey_mask = cpu_to_be64(mask); + } else { + umr->flags = 2 << 5; /* fail if free */ + mask = MLX5_MKEY_MASK_FREE; + umr->mkey_mask = cpu_to_be64(mask); + } + + if (!wr->num_sge) + umr->flags |= (1 << 7); /* inline */ +} + +static u8 get_umr_flags(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | + MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT; +} + +static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, + int li, int *writ) +{ + memset(seg, 0, sizeof(*seg)); + if (li) { + seg->status = 1 << 6; + return; + } + + seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags); + *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); + seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); + seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); + seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); + seg->len = cpu_to_be64(wr->wr.fast_reg.length); + seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); + seg->log2_page_size = wr->wr.fast_reg.page_shift; +} + +static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) +{ + memset(seg, 0, sizeof(*seg)); + if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { + seg->status = 1 << 6; + return; + } + + seg->flags = convert_access(wr->wr.fast_reg.access_flags); + seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn); + seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); + seg->len = cpu_to_be64(wr->wr.fast_reg.length); + seg->log2_page_size = wr->wr.fast_reg.page_shift; + seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); +} + +static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, + struct ib_send_wr *wr, + struct mlx5_core_dev *mdev, + struct mlx5_ib_pd *pd, + int writ) +{ + struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); + u64 *page_list = wr->wr.fast_reg.page_list->page_list; + u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); + int i; + + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) + mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); + dseg->addr = cpu_to_be64(mfrpl->map); + dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); + dseg->lkey = cpu_to_be32(pd->pa_lkey); +} + +static __be32 send_ieth(struct ib_send_wr *wr) +{ + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + return wr->ex.imm_data; + + case IB_WR_SEND_WITH_INV: + return cpu_to_be32(wr->ex.invalidate_rkey); + + default: + return 0; + } +} + +static u8 calc_sig(void *wqe, int size) +{ + u8 *p = wqe; + u8 res = 0; + int i; + + for (i = 0; i < size; i++) + res ^= p[i]; + + return ~res; +} + +static u8 wq_sig(void *wqe) +{ + return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); +} + +static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, + void *wqe, int *sz) +{ + struct mlx5_wqe_inline_seg *seg; + void *qend = qp->sq.qend; + void *addr; + int inl = 0; + int copy; + int len; + int i; + + seg = wqe; + wqe += sizeof(*seg); + for (i = 0; i < wr->num_sge; i++) { + addr = (void *)(unsigned long)(wr->sg_list[i].addr); + len = wr->sg_list[i].length; + inl += len; + + if (unlikely(inl > qp->max_inline_data)) + return -ENOMEM; + + if (unlikely(wqe + len > qend)) { + copy = qend - wqe; + memcpy(wqe, addr, copy); + addr += copy; + len -= copy; + wqe = mlx5_get_send_wqe(qp, 0); + } + memcpy(wqe, addr, len); + wqe += len; + } + + seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); + + *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; + + return 0; +} + +static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, + struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) +{ + int writ = 0; + int li; + + li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0; + if (unlikely(wr->send_flags & IB_SEND_INLINE)) + return -EINVAL; + + set_frwr_umr_segment(*seg, wr, li); + *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); + *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; + if (unlikely((*seg == qp->sq.qend))) + *seg = mlx5_get_send_wqe(qp, 0); + set_mkey_segment(*seg, wr, li, &writ); + *seg += sizeof(struct mlx5_mkey_seg); + *size += sizeof(struct mlx5_mkey_seg) / 16; + if (unlikely((*seg == qp->sq.qend))) + *seg = mlx5_get_send_wqe(qp, 0); + if (!li) { + set_frwr_pages(*seg, wr, mdev, pd, writ); + *seg += sizeof(struct mlx5_wqe_data_seg); + *size += (sizeof(struct mlx5_wqe_data_seg) / 16); + } + return 0; +} + +static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) +{ + __be32 *p = NULL; + int tidx = idx; + int i, j; + + pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); + for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { + if ((i & 0xf) == 0) { + void *buf = mlx5_get_send_wqe(qp, tidx); + tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); + p = buf; + j = 0; + } + pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), + be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), + be32_to_cpu(p[j + 3])); + } +} + +static void mlx5_bf_copy(u64 __iomem *dst, u64 *src, + unsigned bytecnt, struct mlx5_ib_qp *qp) +{ + while (bytecnt > 0) { + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + __iowrite64_copy(dst++, src++, 8); + bytecnt -= 64; + if (unlikely(src == qp->sq.qend)) + src = mlx5_get_send_wqe(qp, 0); + } +} + +static u8 get_fence(u8 fence, struct ib_send_wr *wr) +{ + if (unlikely(wr->opcode == IB_WR_LOCAL_INV && + wr->send_flags & IB_SEND_FENCE)) + return MLX5_FENCE_MODE_STRONG_ORDERING; + + if (unlikely(fence)) { + if (wr->send_flags & IB_SEND_FENCE) + return MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + return fence; + + } else { + return 0; + } +} + +int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_core_dev *mdev = &dev->mdev; + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_wqe_data_seg *dpseg; + struct mlx5_wqe_xrc_seg *xrc; + struct mlx5_bf *bf = qp->bf; + int uninitialized_var(size); + void *qend = qp->sq.qend; + unsigned long flags; + u32 mlx5_opcode; + unsigned idx; + int err = 0; + int inl = 0; + int num_sge; + void *seg; + int nreq; + int i; + u8 next_fence = 0; + u8 opmod = 0; + u8 fence; + + spin_lock_irqsave(&qp->sq.lock, flags); + + for (nreq = 0; wr; nreq++, wr = wr->next) { + if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) { + mlx5_ib_warn(dev, "\n"); + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { + mlx5_ib_warn(dev, "\n"); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + fence = qp->fm_cache; + num_sge = wr->num_sge; + if (unlikely(num_sge > qp->sq.max_gs)) { + mlx5_ib_warn(dev, "\n"); + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); + seg = mlx5_get_send_wqe(qp, idx); + ctrl = seg; + *(uint32_t *)(seg + 8) = 0; + ctrl->imm = send_ieth(wr); + ctrl->fm_ce_se = qp->sq_signal_bits | + (wr->send_flags & IB_SEND_SIGNALED ? + MLX5_WQE_CTRL_CQ_UPDATE : 0) | + (wr->send_flags & IB_SEND_SOLICITED ? + MLX5_WQE_CTRL_SOLICITED : 0); + + seg += sizeof(*ctrl); + size = sizeof(*ctrl) / 16; + + switch (ibqp->qp_type) { + case IB_QPT_XRC_INI: + xrc = seg; + xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num); + seg += sizeof(*xrc); + size += sizeof(*xrc) / 16; + /* fall through */ + case IB_QPT_RC: + switch (wr->opcode) { + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + set_raddr_seg(seg, wr->wr.rdma.remote_addr, + wr->wr.rdma.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + size += sizeof(struct mlx5_wqe_raddr_seg) / 16; + break; + + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + set_raddr_seg(seg, wr->wr.atomic.remote_addr, + wr->wr.atomic.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + + set_atomic_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_atomic_seg); + + size += (sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_atomic_seg)) / 16; + break; + + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + set_raddr_seg(seg, wr->wr.atomic.remote_addr, + wr->wr.atomic.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + + set_masked_atomic_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_masked_atomic_seg); + + size += (sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16; + break; + + case IB_WR_LOCAL_INV: + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; + ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); + err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); + if (err) { + mlx5_ib_warn(dev, "\n"); + *bad_wr = wr; + goto out; + } + num_sge = 0; + break; + + case IB_WR_FAST_REG_MR: + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; + ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); + err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); + if (err) { + mlx5_ib_warn(dev, "\n"); + *bad_wr = wr; + goto out; + } + num_sge = 0; + break; + + default: + break; + } + break; + + case IB_QPT_UC: + switch (wr->opcode) { + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + set_raddr_seg(seg, wr->wr.rdma.remote_addr, + wr->wr.rdma.rkey); + seg += sizeof(struct mlx5_wqe_raddr_seg); + size += sizeof(struct mlx5_wqe_raddr_seg) / 16; + break; + + default: + break; + } + break; + + case IB_QPT_UD: + case IB_QPT_SMI: + case IB_QPT_GSI: + set_datagram_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_datagram_seg); + size += sizeof(struct mlx5_wqe_datagram_seg) / 16; + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + break; + + case MLX5_IB_QPT_REG_UMR: + if (wr->opcode != MLX5_IB_WR_UMR) { + err = -EINVAL; + mlx5_ib_warn(dev, "bad opcode\n"); + goto out; + } + qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; + ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); + set_reg_umr_segment(seg, wr); + seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); + size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + set_reg_mkey_segment(seg, wr); + seg += sizeof(struct mlx5_mkey_seg); + size += sizeof(struct mlx5_mkey_seg) / 16; + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + break; + + default: + break; + } + + if (wr->send_flags & IB_SEND_INLINE && num_sge) { + int uninitialized_var(sz); + + err = set_data_inl_seg(qp, wr, seg, &sz); + if (unlikely(err)) { + mlx5_ib_warn(dev, "\n"); + *bad_wr = wr; + goto out; + } + inl = 1; + size += sz; + } else { + dpseg = seg; + for (i = 0; i < num_sge; i++) { + if (unlikely(dpseg == qend)) { + seg = mlx5_get_send_wqe(qp, 0); + dpseg = seg; + } + if (likely(wr->sg_list[i].length)) { + set_data_ptr_seg(dpseg, wr->sg_list + i); + size += sizeof(struct mlx5_wqe_data_seg) / 16; + dpseg++; + } + } + } + + mlx5_opcode = mlx5_ib_opcode[wr->opcode]; + ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | + mlx5_opcode | + ((u32)opmod << 24)); + ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); + ctrl->fm_ce_se |= get_fence(fence, wr); + qp->fm_cache = next_fence; + if (unlikely(qp->wq_sig)) + ctrl->signature = wq_sig(ctrl); + + qp->sq.wrid[idx] = wr->wr_id; + qp->sq.w_list[idx].opcode = mlx5_opcode; + qp->sq.wqe_head[idx] = qp->sq.head + nreq; + qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); + qp->sq.w_list[idx].next = qp->sq.cur_post; + + if (0) + dump_wqe(qp, idx, size); + } + +out: + if (likely(nreq)) { + qp->sq.head += nreq; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + + qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); + + if (bf->need_lock) + spin_lock(&bf->lock); + + /* TBD enable WC */ + if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { + mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); + /* wc_wmb(); */ + } else { + mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, + MLX5_GET_DOORBELL_LOCK(&bf->lock32)); + /* Make sure doorbells don't leak out of SQ spinlock + * and reach the HCA out of order. + */ + mmiowb(); + } + bf->offset ^= bf->buf_size; + if (bf->need_lock) + spin_unlock(&bf->lock); + } + + spin_unlock_irqrestore(&qp->sq.lock, flags); + + return err; +} + +static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) +{ + sig->signature = calc_sig(sig, size); +} + +int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_wqe_data_seg *scat; + struct mlx5_rwqe_sig *sig; + unsigned long flags; + int err = 0; + int nreq; + int ind; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + ind = qp->rq.head & (qp->rq.wqe_cnt - 1); + + for (nreq = 0; wr; nreq++, wr = wr->next) { + if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { + err = -ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + goto out; + } + + scat = get_recv_wqe(qp, ind); + if (qp->wq_sig) + scat++; + + for (i = 0; i < wr->num_sge; i++) + set_data_ptr_seg(scat + i, wr->sg_list + i); + + if (i < qp->rq.max_gs) { + scat[i].byte_count = 0; + scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + scat[i].addr = 0; + } + + if (qp->wq_sig) { + sig = (struct mlx5_rwqe_sig *)scat; + set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); + } + + qp->rq.wrid[ind] = wr->wr_id; + + ind = (ind + 1) & (qp->rq.wqe_cnt - 1); + } + +out: + if (likely(nreq)) { + qp->rq.head += nreq; + + /* Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} + +static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) +{ + switch (mlx5_state) { + case MLX5_QP_STATE_RST: return IB_QPS_RESET; + case MLX5_QP_STATE_INIT: return IB_QPS_INIT; + case MLX5_QP_STATE_RTR: return IB_QPS_RTR; + case MLX5_QP_STATE_RTS: return IB_QPS_RTS; + case MLX5_QP_STATE_SQ_DRAINING: + case MLX5_QP_STATE_SQD: return IB_QPS_SQD; + case MLX5_QP_STATE_SQER: return IB_QPS_SQE; + case MLX5_QP_STATE_ERR: return IB_QPS_ERR; + default: return -1; + } +} + +static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) +{ + switch (mlx5_mig_state) { + case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; + case MLX5_QP_PM_REARM: return IB_MIG_REARM; + case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; + default: return -1; + } +} + +static int to_ib_qp_access_flags(int mlx5_flags) +{ + int ib_flags = 0; + + if (mlx5_flags & MLX5_QP_BIT_RRE) + ib_flags |= IB_ACCESS_REMOTE_READ; + if (mlx5_flags & MLX5_QP_BIT_RWE) + ib_flags |= IB_ACCESS_REMOTE_WRITE; + if (mlx5_flags & MLX5_QP_BIT_RAE) + ib_flags |= IB_ACCESS_REMOTE_ATOMIC; + + return ib_flags; +} + +static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, + struct mlx5_qp_path *path) +{ + struct mlx5_core_dev *dev = &ibdev->mdev; + + memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); + ib_ah_attr->port_num = path->port; + + if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) + return; + + ib_ah_attr->sl = path->sl & 0xf; + + ib_ah_attr->dlid = be16_to_cpu(path->rlid); + ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f; + ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; + ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0; + if (ib_ah_attr->ah_flags) { + ib_ah_attr->grh.sgid_index = path->mgid_index; + ib_ah_attr->grh.hop_limit = path->hop_limit; + ib_ah_attr->grh.traffic_class = + (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; + ib_ah_attr->grh.flow_label = + be32_to_cpu(path->tclass_flowlabel) & 0xfffff; + memcpy(ib_ah_attr->grh.dgid.raw, + path->rgid, sizeof(ib_ah_attr->grh.dgid.raw)); + } +} + +int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibqp->device); + struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_query_qp_mbox_out *outb; + struct mlx5_qp_context *context; + int mlx5_state; + int err = 0; + + mutex_lock(&qp->mutex); + outb = kzalloc(sizeof(*outb), GFP_KERNEL); + if (!outb) { + err = -ENOMEM; + goto out; + } + context = &outb->ctx; + err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb)); + if (err) + goto out_free; + + mlx5_state = be32_to_cpu(context->flags) >> 28; + + qp->state = to_ib_qp_state(mlx5_state); + qp_attr->qp_state = qp->state; + qp_attr->path_mtu = context->mtu_msgmax >> 5; + qp_attr->path_mig_state = + to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); + qp_attr->qkey = be32_to_cpu(context->qkey); + qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; + qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; + qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; + qp_attr->qp_access_flags = + to_ib_qp_access_flags(be32_to_cpu(context->params2)); + + if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { + to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); + to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); + qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; + qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; + } + + qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; + qp_attr->port_num = context->pri_path.port; + + /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ + qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; + + qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); + + qp_attr->max_dest_rd_atomic = + 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); + qp_attr->min_rnr_timer = + (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; + qp_attr->timeout = context->pri_path.ackto_lt >> 3; + qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; + qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; + qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + + if (!ibqp->uobject) { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } else { + qp_attr->cap.max_send_wr = 0; + qp_attr->cap.max_send_sge = 0; + } + + /* We don't support inline sends for kernel QPs (yet), and we + * don't know what userspace's value should be. + */ + qp_attr->cap.max_inline_data = 0; + + qp_init_attr->cap = qp_attr->cap; + + qp_init_attr->create_flags = 0; + if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) + qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + + qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? + IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + +out_free: + kfree(outb); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_ib_xrcd *xrcd; + int err; + + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) + return ERR_PTR(-ENOSYS); + + xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); + if (!xrcd) + return ERR_PTR(-ENOMEM); + + err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn); + if (err) { + kfree(xrcd); + return ERR_PTR(-ENOMEM); + } + + return &xrcd->ibxrcd; +} + +int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) +{ + struct mlx5_ib_dev *dev = to_mdev(xrcd->device); + u32 xrcdn = to_mxrcd(xrcd)->xrcdn; + int err; + + err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn); + if (err) { + mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); + return err; + } + + kfree(xrcd); + + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c new file mode 100644 index 000000000000..84d297afd6a9 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "mlx5_ib.h" +#include "user.h" + +/* not supported currently */ +static int srq_signature; + +static void *get_wqe(struct mlx5_ib_srq *srq, int n) +{ + return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); +} + +static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) +{ + struct ib_event event; + struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; + + if (ibsrq->event_handler) { + event.device = ibsrq->device; + event.element.srq = ibsrq; + switch (type) { + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + event.event = IB_EVENT_SRQ_LIMIT_REACHED; + break; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + event.event = IB_EVENT_SRQ_ERR; + break; + default: + pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n", + type, srq->srqn); + return; + } + + ibsrq->event_handler(&event, ibsrq->srq_context); + } +} + +static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, + struct mlx5_create_srq_mbox_in **in, + struct ib_udata *udata, int buf_size, int *inlen) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_create_srq ucmd; + int err; + int npages; + int page_shift; + int ncont; + u32 offset; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + mlx5_ib_dbg(dev, "failed copy udata\n"); + return -EFAULT; + } + srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); + + srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, + 0, 0); + if (IS_ERR(srq->umem)) { + mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); + err = PTR_ERR(srq->umem); + return err; + } + + mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, + &page_shift, &ncont, NULL); + err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, + &offset); + if (err) { + mlx5_ib_warn(dev, "bad offset\n"); + goto err_umem; + } + + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; + *in = mlx5_vzalloc(*inlen); + if (!(*in)) { + err = -ENOMEM; + goto err_umem; + } + + mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); + + err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context), + ucmd.db_addr, &srq->db); + if (err) { + mlx5_ib_dbg(dev, "map doorbell failed\n"); + goto err_in; + } + + (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); + + return 0; + +err_in: + mlx5_vfree(*in); + +err_umem: + ib_umem_release(srq->umem); + + return err; +} + +static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, + struct mlx5_create_srq_mbox_in **in, int buf_size, + int *inlen) +{ + int err; + int i; + struct mlx5_wqe_srq_next_seg *next; + int page_shift; + int npages; + + err = mlx5_db_alloc(&dev->mdev, &srq->db); + if (err) { + mlx5_ib_warn(dev, "alloc dbell rec failed\n"); + return err; + } + + *srq->db.db = 0; + + if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { + mlx5_ib_dbg(dev, "buf alloc failed\n"); + err = -ENOMEM; + goto err_db; + } + page_shift = srq->buf.page_shift; + + srq->head = 0; + srq->tail = srq->msrq.max - 1; + srq->wqe_ctr = 0; + + for (i = 0; i < srq->msrq.max; i++) { + next = get_wqe(srq, i); + next->next_wqe_index = + cpu_to_be16((i + 1) & (srq->msrq.max - 1)); + } + + npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); + mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", + buf_size, page_shift, srq->buf.npages, npages); + *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; + *in = mlx5_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + mlx5_fill_page_array(&srq->buf, (*in)->pas); + + srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); + if (!srq->wrid) { + mlx5_ib_dbg(dev, "kmalloc failed %lu\n", + (unsigned long)(srq->msrq.max * sizeof(u64))); + err = -ENOMEM; + goto err_in; + } + srq->wq_sig = !!srq_signature; + + (*in)->ctx.log_pg_sz = page_shift - PAGE_SHIFT; + + return 0; + +err_in: + mlx5_vfree(*in); + +err_buf: + mlx5_buf_free(&dev->mdev, &srq->buf); + +err_db: + mlx5_db_free(&dev->mdev, &srq->db); + return err; +} + +static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) +{ + mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); + ib_umem_release(srq->umem); +} + + +static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) +{ + kfree(srq->wrid); + mlx5_buf_free(&dev->mdev, &srq->buf); + mlx5_db_free(&dev->mdev, &srq->db); +} + +struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_srq *srq; + int desc_size; + int buf_size; + int err; + struct mlx5_create_srq_mbox_in *uninitialized_var(in); + int uninitialized_var(inlen); + int is_xrc; + u32 flgs, xrcdn; + + /* Sanity check SRQ size before proceeding */ + if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) { + mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", + init_attr->attr.max_wr, + dev->mdev.caps.max_srq_wqes); + return ERR_PTR(-EINVAL); + } + + srq = kmalloc(sizeof(*srq), GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + mutex_init(&srq->mutex); + spin_lock_init(&srq->lock); + srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); + srq->msrq.max_gs = init_attr->attr.max_sge; + + desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + + srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); + desc_size = roundup_pow_of_two(desc_size); + desc_size = max_t(int, 32, desc_size); + srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / + sizeof(struct mlx5_wqe_data_seg); + srq->msrq.wqe_shift = ilog2(desc_size); + buf_size = srq->msrq.max * desc_size; + mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", + desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, + srq->msrq.max_avail_gather); + + if (pd->uobject) + err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); + else + err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); + + if (err) { + mlx5_ib_warn(dev, "create srq %s failed, err %d\n", + pd->uobject ? "user" : "kernel", err); + goto err_srq; + } + + is_xrc = (init_attr->srq_type == IB_SRQT_XRC); + in->ctx.state_log_sz = ilog2(srq->msrq.max); + flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; + xrcdn = 0; + if (is_xrc) { + xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn; + in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); + } else if (init_attr->srq_type == IB_SRQT_BASIC) { + xrcdn = to_mxrcd(dev->devr.x0)->xrcdn; + in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); + } + + in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF)); + + in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); + in->ctx.db_record = cpu_to_be64(srq->db.dma); + err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen); + mlx5_vfree(in); + if (err) { + mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); + goto err_srq; + } + + mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); + + srq->msrq.event = mlx5_ib_srq_event; + srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; + + if (pd->uobject) + if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { + mlx5_ib_dbg(dev, "copy to user failed\n"); + err = -EFAULT; + goto err_core; + } + + init_attr->attr.max_wr = srq->msrq.max - 1; + + return &srq->ibsrq; + +err_core: + mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); + if (pd->uobject) + destroy_srq_user(pd, srq); + else + destroy_srq_kernel(dev, srq); + +err_srq: + kfree(srq); + + return ERR_PTR(err); +} + +int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); + struct mlx5_ib_srq *srq = to_msrq(ibsrq); + int ret; + + /* We don't support resizing SRQs yet */ + if (attr_mask & IB_SRQ_MAX_WR) + return -EINVAL; + + if (attr_mask & IB_SRQ_LIMIT) { + if (attr->srq_limit >= srq->msrq.max) + return -EINVAL; + + mutex_lock(&srq->mutex); + ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1); + mutex_unlock(&srq->mutex); + + if (ret) + return ret; + } + + return 0; +} + +int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) +{ + struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); + struct mlx5_ib_srq *srq = to_msrq(ibsrq); + int ret; + struct mlx5_query_srq_mbox_out *out; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out); + if (ret) + goto out_box; + + srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm); + srq_attr->max_wr = srq->msrq.max - 1; + srq_attr->max_sge = srq->msrq.max_gs; + +out_box: + kfree(out); + return ret; +} + +int mlx5_ib_destroy_srq(struct ib_srq *srq) +{ + struct mlx5_ib_dev *dev = to_mdev(srq->device); + struct mlx5_ib_srq *msrq = to_msrq(srq); + + mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq); + + if (srq->uobject) { + mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); + ib_umem_release(msrq->umem); + } else { + kfree(msrq->wrid); + mlx5_buf_free(&dev->mdev, &msrq->buf); + mlx5_db_free(&dev->mdev, &msrq->db); + } + + kfree(srq); + return 0; +} + +void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) +{ + struct mlx5_wqe_srq_next_seg *next; + + /* always called with interrupts disabled. */ + spin_lock(&srq->lock); + + next = get_wqe(srq, srq->tail); + next->next_wqe_index = cpu_to_be16(wqe_index); + srq->tail = wqe_index; + + spin_unlock(&srq->lock); +} + +int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx5_ib_srq *srq = to_msrq(ibsrq); + struct mlx5_wqe_srq_next_seg *next; + struct mlx5_wqe_data_seg *scat; + unsigned long flags; + int err = 0; + int nreq; + int i; + + spin_lock_irqsave(&srq->lock, flags); + + for (nreq = 0; wr; nreq++, wr = wr->next) { + if (unlikely(wr->num_sge > srq->msrq.max_gs)) { + err = -EINVAL; + *bad_wr = wr; + break; + } + + if (unlikely(srq->head == srq->tail)) { + err = -ENOMEM; + *bad_wr = wr; + break; + } + + srq->wrid[srq->head] = wr->wr_id; + + next = get_wqe(srq, srq->head); + srq->head = be16_to_cpu(next->next_wqe_index); + scat = (struct mlx5_wqe_data_seg *)(next + 1); + + for (i = 0; i < wr->num_sge; i++) { + scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); + scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); + scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); + } + + if (i < srq->msrq.max_avail_gather) { + scat[i].byte_count = 0; + scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + scat[i].addr = 0; + } + } + + if (likely(nreq)) { + srq->wqe_ctr += nreq; + + /* Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + *srq->db.db = cpu_to_be32(srq->wqe_ctr); + } + + spin_unlock_irqrestore(&srq->lock, flags); + + return err; +} diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h new file mode 100644 index 000000000000..a886de3e593c --- /dev/null +++ b/drivers/infiniband/hw/mlx5/user.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_IB_USER_H +#define MLX5_IB_USER_H + +#include + +enum { + MLX5_QP_FLAG_SIGNATURE = 1 << 0, + MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, +}; + +enum { + MLX5_SRQ_FLAG_SIGNATURE = 1 << 0, +}; + + +/* Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define MLX5_IB_UVERBS_ABI_VERSION 1 + +/* Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +struct mlx5_ib_alloc_ucontext_req { + __u32 total_num_uuars; + __u32 num_low_latency_uuars; +}; + +struct mlx5_ib_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 bf_reg_size; + __u32 tot_uuars; + __u32 cache_line_size; + __u16 max_sq_desc_sz; + __u16 max_rq_desc_sz; + __u32 max_send_wqebb; + __u32 max_recv_wr; + __u32 max_srq_recv_wr; + __u16 num_ports; + __u16 reserved; +}; + +struct mlx5_ib_alloc_pd_resp { + __u32 pdn; +}; + +struct mlx5_ib_create_cq { + __u64 buf_addr; + __u64 db_addr; + __u32 cqe_size; +}; + +struct mlx5_ib_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct mlx5_ib_resize_cq { + __u64 buf_addr; +}; + +struct mlx5_ib_create_srq { + __u64 buf_addr; + __u64 db_addr; + __u32 flags; +}; + +struct mlx5_ib_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + +struct mlx5_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; + __u32 sq_wqe_count; + __u32 rq_wqe_count; + __u32 rq_wqe_shift; + __u32 flags; +}; + +struct mlx5_ib_create_qp_resp { + __u32 uuar_index; +}; +#endif /* MLX5_IB_USER_H */ diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index bcdbc14aeff0..8cf7563a8d92 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -19,5 +19,6 @@ config NET_VENDOR_MELLANOX if NET_VENDOR_MELLANOX source "drivers/net/ethernet/mellanox/mlx4/Kconfig" +source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" endif # NET_VENDOR_MELLANOX diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile index 37afb9683372..38fe32ef5e5f 100644 --- a/drivers/net/ethernet/mellanox/Makefile +++ b/drivers/net/ethernet/mellanox/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_MLX4_CORE) += mlx4/ +obj-$(CONFIG_MLX5_CORE) += mlx5/core/ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig new file mode 100644 index 000000000000..21962828925a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -0,0 +1,18 @@ +# +# Mellanox driver configuration +# + +config MLX5_CORE + tristate + depends on PCI && X86 + default n + +config MLX5_DEBUG + bool "Verbose debugging output" if (MLX5_CORE && EXPERT) + depends on MLX5_CORE + default y + ---help--- + This option causes debugging code to be compiled into the + mlx5_core driver. The output can be turned on via the + debug_mask module parameter (which can also be set after + the driver is loaded through sysfs). diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile new file mode 100644 index 000000000000..105780bb980b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_MLX5_CORE) += mlx5_core.o + +mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c new file mode 100644 index 000000000000..b215742b842f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx5_core.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, + struct mlx5_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + mlx5_buf_free(dev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx5_buf_alloc); + +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + else { + if (BITS_PER_LONG == 64 && buf->direct.buf) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(mlx5_buf_free); + +static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) +{ + struct mlx5_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, + &pgdir->db_dma, GFP_KERNEL); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, + struct mlx5_db *db) +{ + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); + if (i >= MLX5_DB_PER_PAGE) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * L1_CACHE_BYTES; + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + return 0; +} + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + struct mlx5_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&dev->priv.pgdir_mutex); + + list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) + if (!mlx5_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &dev->priv.pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&dev->priv.pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx5_db_alloc); + +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + mutex_lock(&dev->priv.pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&dev->priv.pgdir_mutex); +} +EXPORT_SYMBOL_GPL(mlx5_db_free); + + +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +{ + u64 addr; + int i; + + for (i = 0; i < buf->npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << buf->page_shift); + else + addr = buf->page_list[i].map; + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(mlx5_fill_page_array); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c new file mode 100644 index 000000000000..c1c0eef89694 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -0,0 +1,1515 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx5_core.h" + +enum { + CMD_IF_REV = 3, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + MLX5_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, +}; + +enum { + MLX5_CMD_DELIVERY_STAT_OK = 0x0, + MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, + MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, + struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, + mlx5_cmd_cbk_t cbk, + void *context, int page_queue) +{ + gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; + struct mlx5_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), alloc_flags); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->callback = cbk; + ent->context = context; + ent->cmd = cmd; + ent->page_queue = page_queue; + + return ent; +} + +static u8 alloc_token(struct mlx5_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + token = cmd->token++ % 255 + 1; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct mlx5_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct mlx5_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct mlx5_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token) +{ + block->token = token; + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); +} + +static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token) +{ + struct mlx5_cmd_mailbox *next = msg->next; + + while (next) { + calc_block_sig(next->buf, token); + next = next->next; + } +} + +static void set_signature(struct mlx5_cmd_work_ent *ent) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in, ent->token); + calc_chain_sig(ent->out, ent->token); +} + +static void poll_timeout(struct mlx5_cmd_work_ent *ent) +{ + unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); + u8 own; + + do { + own = ent->lay->status_own; + if (!(own & CMD_OWNER_HW)) { + ent->ret = 0; + return; + } + usleep_range(5000, 10000); + } while (time_before(jiffies, poll_end)); + + ent->ret = -ETIMEDOUT; +} + +static void free_cmd(struct mlx5_cmd_work_ent *ent) +{ + kfree(ent); +} + + +static int verify_signature(struct mlx5_cmd_work_ent *ent) +{ + struct mlx5_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int data_only, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), + be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + if (!data_only) + pr_debug("\n"); +} + +const char *mlx5_command_str(int command) +{ + switch (command) { + case MLX5_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case MLX5_CMD_OP_SET_HCA_CAP: + return "SET_HCA_CAP"; + + case MLX5_CMD_OP_QUERY_ADAPTER: + return "QUERY_ADAPTER"; + + case MLX5_CMD_OP_INIT_HCA: + return "INIT_HCA"; + + case MLX5_CMD_OP_TEARDOWN_HCA: + return "TEARDOWN_HCA"; + + case MLX5_CMD_OP_QUERY_PAGES: + return "QUERY_PAGES"; + + case MLX5_CMD_OP_MANAGE_PAGES: + return "MANAGE_PAGES"; + + case MLX5_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case MLX5_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case MLX5_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case MLX5_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case MLX5_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case MLX5_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case MLX5_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case MLX5_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case MLX5_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case MLX5_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case MLX5_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case MLX5_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case MLX5_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case MLX5_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case MLX5_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case MLX5_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case MLX5_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case MLX5_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case MLX5_CMD_OP_RTS2SQD_QP: + return "RTS2SQD_QP"; + + case MLX5_CMD_OP_SQD2RTS_QP: + return "SQD2RTS_QP"; + + case MLX5_CMD_OP_2RST_QP: + return "2RST_QP"; + + case MLX5_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case MLX5_CMD_OP_CONF_SQP: + return "CONF_SQP"; + + case MLX5_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case MLX5_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case MLX5_CMD_OP_SUSPEND_QP: + return "SUSPEND_QP"; + + case MLX5_CMD_OP_UNSUSPEND_QP: + return "UNSUSPEND_QP"; + + case MLX5_CMD_OP_SQD2SQD_QP: + return "SQD2SQD_QP"; + + case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET: + return "ALLOC_QP_COUNTER_SET"; + + case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET: + return "DEALLOC_QP_COUNTER_SET"; + + case MLX5_CMD_OP_QUERY_QP_COUNTER_SET: + return "QUERY_QP_COUNTER_SET"; + + case MLX5_CMD_OP_CREATE_PSV: + return "CREATE_PSV"; + + case MLX5_CMD_OP_DESTROY_PSV: + return "DESTROY_PSV"; + + case MLX5_CMD_OP_QUERY_PSV: + return "QUERY_PSV"; + + case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE: + return "QUERY_SIG_RULE_TABLE"; + + case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE: + return "QUERY_BLOCK_SIZE_TABLE"; + + case MLX5_CMD_OP_CREATE_SRQ: + return "CREATE_SRQ"; + + case MLX5_CMD_OP_DESTROY_SRQ: + return "DESTROY_SRQ"; + + case MLX5_CMD_OP_QUERY_SRQ: + return "QUERY_SRQ"; + + case MLX5_CMD_OP_ARM_RQ: + return "ARM_RQ"; + + case MLX5_CMD_OP_RESIZE_SRQ: + return "RESIZE_SRQ"; + + case MLX5_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case MLX5_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case MLX5_CMD_OP_ALLOC_UAR: + return "ALLOC_UAR"; + + case MLX5_CMD_OP_DEALLOC_UAR: + return "DEALLOC_UAR"; + + case MLX5_CMD_OP_ATTACH_TO_MCG: + return "ATTACH_TO_MCG"; + + case MLX5_CMD_OP_DETACH_FROM_MCG: + return "DETACH_FROM_MCG"; + + case MLX5_CMD_OP_ALLOC_XRCD: + return "ALLOC_XRCD"; + + case MLX5_CMD_OP_DEALLOC_XRCD: + return "DEALLOC_XRCD"; + + case MLX5_CMD_OP_ACCESS_REG: + return "MLX5_CMD_OP_ACCESS_REG"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct mlx5_core_dev *dev, + struct mlx5_cmd_work_ent *ent, int input) +{ + u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); + struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; + struct mlx5_cmd_mailbox *next = msg->next; + int data_only; + int offset = 0; + int dump_len; + + data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); + + if (data_only) + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, + "dump command data %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + else + mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (data_only) { + if (input) { + dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); + offset += sizeof(ent->lay->in); + } else { + dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); + offset += sizeof(ent->lay->out); + } + } else { + dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); + offset += sizeof(*ent->lay); + } + + while (next && offset < msg->len) { + if (data_only) { + dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); + dump_buf(next->buf, dump_len, 1, offset); + offset += MLX5_CMD_DATA_BLOCK_SIZE; + } else { + mlx5_core_dbg(dev, "command block:\n"); + dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); + offset += sizeof(struct mlx5_cmd_prot_block); + } + next = next->next; + } + + if (data_only) + pr_debug("\n"); +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); + struct mlx5_cmd *cmd = ent->cmd; + struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); + struct mlx5_cmd_layout *lay; + struct semaphore *sem; + + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + mlx5_core_err(dev, "failed to allocate command entry\n"); + up(sem); + return; + } + } else { + ent->idx = cmd->max_reg_cmds; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = MLX5_PCI_CMD_XPORT; + lay->token = ent->token; + lay->status_own = CMD_OWNER_HW; + if (!cmd->checksum_disabled) + set_signature(ent); + dump_command(dev, ent, 1); + ktime_get_ts(&ent->ts1); + + /* ring doorbell after the descriptor is valid */ + wmb(); + iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); + mlx5_core_dbg(dev, "write 0x%x to command doorbell\n", 1 << ent->idx); + mmiowb(); + if (cmd->mode == CMD_MODE_POLLING) { + poll_timeout(ent); + /* make sure we read the descriptor after ownership is SW */ + rmb(); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + } +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case MLX5_CMD_DELIVERY_STAT_OK: + return "no errors"; + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command ouput length error"; + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct mlx5_cmd_msg *in) +{ + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); + struct mlx5_cmd *cmd = &dev->cmd; + int err; + + if (cmd->mode == CMD_MODE_POLLING) { + wait_for_completion(&ent->done); + err = ent->ret; + } else { + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = 0; + } + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, + deliv_status_to_str(ent->status), ent->status); + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, mlx5_cmd_cbk_t callback, + void *context, int page_queue, u8 *status) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + ktime_t t1, t2, delta; + struct mlx5_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + + if (callback && page_queue) + return -EINVAL; + + ent = alloc_cmd(cmd, in, out, callback, context, page_queue); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + if (!callback) + init_completion(&ent->done); + + INIT_WORK(&ent->work, cmd_work_handler); + if (page_queue) { + cmd_work_handler(&ent->work); + } else if (!queue_work(cmd->wq, &ent->work)) { + mlx5_core_warn(dev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + if (!callback) { + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; + + t1 = timespec_to_ktime(ent->ts1); + t2 = timespec_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + mlx5_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + } + + return err; + +out_free: + free_cmd(ent); +out: + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EPERM; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, + gfp_t flags) +{ + struct mlx5_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + mlx5_core_dbg(dev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *mailbox) +{ + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, + gfp_t flags, int size) +{ + struct mlx5_cmd_mailbox *tmp, *head = NULL; + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(dev, flags); + if (IS_ERR(tmp)) { + mlx5_core_warn(dev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(dev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg) +{ + struct mlx5_cmd_mailbox *head = msg->next; + struct mlx5_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(dev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EPERM; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EPERM; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EPERM; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EPERM; + + outlen_str[7] = 0; + + err = sscanf(outlen_str, "%d", &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", + dev_name(&dev->pdev->dev)); +} + +static void clean_debug_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + + if (!mlx5_debugfs_root) + return; + + mlx5_cmdif_debugfs_cleanup(dev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int err = -ENOMEM; + + if (!mlx5_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + dev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, + &dbg->status); + if (!dbg->dbg_status) + goto err_dbg; + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + mlx5_cmdif_debugfs_init(dev); + + return 0; + +err_dbg: + clean_debug_files(dev); + return err; +} + +void mlx5_cmd_use_events(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + mlx5_cmd_cbk_t callback; + void *context; + int err; + int i; + + for (i = 0; i < (1 << cmd->log_sz); i++) { + if (test_bit(i, &vector)) { + ent = cmd->ent_arr[i]; + ktime_get_ts(&ent->ts2); + memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); + dump_command(dev, ent, 0); + if (!ent->ret) { + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = ent->lay->status_own >> 1; + mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", + ent->ret, deliv_status_to_str(ent->status), ent->status); + } + free_ent(cmd, ent->idx); + if (ent->callback) { + callback = ent->callback; + context = ent->context; + err = ent->ret; + free_cmd(ent); + callback(err, context); + } else { + complete(&ent->done); + } + if (ent->page_queue) + up(&cmd->pages_sem); + else + up(&cmd->sem); + } + } +} +EXPORT_SYMBOL(mlx5_cmd_comp_handler); + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size) +{ + struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct mlx5_cmd *cmd = &dev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock(&ent->lock); + } + + if (IS_ERR(msg)) + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, in_size); + + return msg; +} + +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + if (msg->cache) { + spin_lock(&msg->cache->lock); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock(&msg->cache->lock); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + +static int is_manage_pages(struct mlx5_inbox_hdr *in) +{ + return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; +} + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + struct mlx5_cmd_msg *inb; + struct mlx5_cmd_msg *outb; + int pages_queue; + int err; + u8 status = 0; + + pages_queue = is_manage_pages(in); + + inb = alloc_msg(dev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = mlx5_copy_to_msg(inb, in, in_size); + if (err) { + mlx5_core_warn(dev, "err %d\n", err); + goto out_in; + } + + outb = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = mlx5_cmd_invoke(dev, inb, outb, NULL, NULL, pages_queue, &status); + if (err) + goto out_out; + + mlx5_core_dbg(dev, "err %d, status %d\n", err, status); + if (status) { + err = status_to_err(status); + goto out_out; + } + + err = mlx5_copy_from_msg(out, outb, out_size); + +out_out: + mlx5_free_cmd_msg(dev, outb); + +out_in: + free_msg(dev, inb); + return err; +} +EXPORT_SYMBOL(mlx5_cmd_exec); + +static void destroy_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + struct mlx5_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } +} + +static int create_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(dev); + return err; +} + +int mlx5_cmd_init(struct mlx5_core_dev *dev) +{ + int size = sizeof(struct mlx5_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct mlx5_cmd *cmd = &dev->cmd; + u32 cmd_h, cmd_l; + u16 cmd_if_rev; + int err; + int i; + + cmd_if_rev = cmdif_rev(dev); + if (cmd_if_rev != CMD_IF_REV) { + dev_err(&dev->pdev->dev, + "Driver cmdif rev(%d) differs from firmware's(%d)\n", + CMD_IF_REV, cmd_if_rev); + return -EINVAL; + } + + cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cmd_buf) { + err = -ENOMEM; + goto err_free_pool; + } + cmd->dma = dma_map_single(&dev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, cmd->dma)) { + err = -ENOMEM; + goto err_free; + } + + cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; + cmd->log_sz = cmd_l >> 4 & 0xf; + cmd->log_stride = cmd_l & 0xf; + if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { + dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_map; + } + + if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + dev_err(&dev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_map; + } + + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + if (cmd->cmdif_rev > CMD_IF_REV) { + dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", + CMD_IF_REV, cmd->cmdif_rev); + err = -ENOTSUPP; + goto err_map; + } + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + sema_init(&cmd->pages_sem, 1); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&dev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + + iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); + iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); + + cmd->mode = CMD_MODE_POLLING; + + err = create_msg_cache(dev); + if (err) { + dev_err(&dev->pdev->dev, "failed to create command cache\n"); + goto err_map; + } + + set_wqname(dev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + err = create_debugfs_files(dev); + if (err) { + err = -ENOMEM; + goto err_wq; + } + + return 0; + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(dev); + +err_map: + dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); +err_free: + free_pages((unsigned long)cmd->cmd_buf, 0); + +err_free_pool: + pci_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(mlx5_cmd_init); + +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + clean_debug_files(dev); + destroy_workqueue(cmd->wq); + destroy_msg_cache(dev); + dma_unmap_single(&dev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cmd_buf, 0); + pci_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(mlx5_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: + return "OK"; + case MLX5_CMD_STAT_INT_ERR: + return "internal error"; + case MLX5_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case MLX5_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case MLX5_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case MLX5_CMD_STAT_RES_BUSY: + return "resource busy"; + case MLX5_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case MLX5_CMD_STAT_IX_ERR: + return "bad index"; + case MLX5_CMD_STAT_NO_RES_ERR: + return "no resources"; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case MLX5_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(hdr->status), hdr->status, + be32_to_cpu(hdr->syndrome)); + + switch (hdr->status) { + case MLX5_CMD_STAT_OK: return 0; + case MLX5_CMD_STAT_INT_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_LIM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_IX_ERR: return -EINVAL; + case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c new file mode 100644 index 000000000000..c2d660be6f76 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) +{ + struct mlx5_core_cq *cq; + struct mlx5_cq_table *table = &dev->priv.cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + + +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_create_cq_mbox_in *in, int inlen) +{ + int err; + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_create_cq_mbox_out out; + struct mlx5_destroy_cq_mbox_in din; + struct mlx5_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + cq->cons_index = 0; + cq->arm_sn = 0; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = mlx5_debug_cq_add(dev, cq); + if (err) + mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", + cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_cq); + +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_destroy_cq_mbox_in in; + struct mlx5_destroy_cq_mbox_out out; + struct mlx5_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + synchronize_irq(cq->irqn); + + mlx5_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_cq); + +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_query_cq_mbox_out *out) +{ + struct mlx5_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_cq); + + +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int type, struct mlx5_cq_modify_params *params) +{ + return -ENOSYS; +} + +int mlx5_init_cq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + int err; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + err = mlx5_cq_debugfs_init(dev); + + return err; +} + +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) +{ + mlx5_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c new file mode 100644 index 000000000000..5e9cf2b9aaf7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *mlx5_debugfs_root; +EXPORT_SYMBOL(mlx5_debugfs_root); + +void mlx5_register_debugfs(void) +{ + mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); + if (IS_ERR_OR_NULL(mlx5_debugfs_root)) + mlx5_debugfs_root = NULL; +} + +void mlx5_unregister_debugfs(void) +{ + debugfs_remove(mlx5_debugfs_root); +} + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + if (!dev->priv.qp_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.qp_debugfs); +} + +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + if (!dev->priv.eq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + u64 field = 0; + int ret; + int err; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock(&stats->lock); + if (stats->n) + field = stats->sum / stats->n; + spin_unlock(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + + stats = filp->private_data; + spin_lock(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_stats *stats; + struct dentry **cmd; + const char *namep; + int err; + int i; + + if (!mlx5_debugfs_root) + return 0; + + cmd = &dev->priv.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + if (!*cmd) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { + stats = &dev->cmd.stats[i]; + namep = mlx5_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmd); + if (!stats->root) { + mlx5_core_warn(dev, "failed adding command %d\n", + i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + stats->count = debugfs_create_u64("n", 0400, + stats->root, + &stats->n); + if (!stats->count) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + } + } + + return 0; +out: + debugfs_remove_recursive(dev->priv.cmdif_debugfs); + return err; +} + +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cmdif_debugfs); +} + +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + if (!dev->priv.cq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cq_debugfs); +} + +static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + int index) +{ + struct mlx5_query_qp_mbox_out *out; + struct mlx5_qp_context *ctx; + u64 param = 0; + int err; + int no_sq; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query qp\n"); + goto out; + } + + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_STATE: + param = be32_to_cpu(ctx->flags) >> 28; + break; + case QP_XPORT: + param = (be32_to_cpu(ctx->flags) >> 16) & 0xff; + break; + case QP_MTU: + param = ctx->mtu_msgmax >> 5; + break; + case QP_N_RECV: + param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); + break; + case QP_RECV_SZ: + param = 1 << ((ctx->rq_size_stride & 7) + 4); + break; + case QP_N_SEND: + no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15; + if (!no_sq) + param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11); + else + param = 0; + break; + case QP_LOG_PG_SZ: + param = ((cpu_to_be32(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f); + param += 12; + break; + case QP_RQPN: + param = cpu_to_be32(ctx->log_pg_sz_remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + int index) +{ + struct mlx5_query_eq_mbox_out *out; + struct mlx5_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case EQ_INTR: + param = ctx->intr; + break; + case EQ_LOG_PG_SZ: + param = (ctx->log_page_size & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int index) +{ + struct mlx5_query_cq_mbox_out *out; + struct mlx5_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_query_cq(dev, cq, out); + if (err) { + mlx5_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + param = cq->pid; + break; + case CQ_NUM_CQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case CQ_LOG_PG_SZ: + param = (ctx->log_pg_sz & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_field_desc *desc; + struct mlx5_rsc_debug *d; + char tbuf[18]; + u64 field; + int ret; + int err; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case MLX5_DBG_RSC_QP: + field = qp_read_field(d->dev, d->object, desc->i); + break; + + case MLX5_DBG_RSC_EQ: + field = eq_read_field(d->dev, d->object, desc->i); + break; + + case MLX5_DBG_RSC_CQ: + field = cq_read_field(d->dev, d->object, desc->i); + break; + + default: + mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, + struct dentry *root, struct mlx5_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct mlx5_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->dev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct mlx5_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + if (!mlx5_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + if (!mlx5_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + if (!mlx5_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c new file mode 100644 index 000000000000..c02cbcfd0fb8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -0,0 +1,521 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), + MLX5_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + MLX5_EQ_STATE_ARMED = 0x9, + MLX5_EQ_STATE_FIRED = 0xa, + MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, +}; + +enum { + MLX5_NUM_SPARE_EQE = 0x80, + MLX5_NUM_ASYNC_EQE = 0x100, + MLX5_NUM_CMD_EQE = 32, +}; + +enum { + MLX5_EQ_DOORBEL_OFFSET = 0x40, +}; + +#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) +{ + struct mlx5_destroy_eq_mbox_in in; + struct mlx5_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); + in.eqn = eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) +{ + return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); +} + +static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static const char *eqe_type_str(u8 type) +{ + switch (type) { + case MLX5_EVENT_TYPE_COMP: + return "MLX5_EVENT_TYPE_COMP"; + case MLX5_EVENT_TYPE_PATH_MIG: + return "MLX5_EVENT_TYPE_PATH_MIG"; + case MLX5_EVENT_TYPE_COMM_EST: + return "MLX5_EVENT_TYPE_COMM_EST"; + case MLX5_EVENT_TYPE_SQ_DRAINED: + return "MLX5_EVENT_TYPE_SQ_DRAINED"; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; + case MLX5_EVENT_TYPE_CQ_ERROR: + return "MLX5_EVENT_TYPE_CQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_INTERNAL_ERROR: + return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; + case MLX5_EVENT_TYPE_PORT_CHANGE: + return "MLX5_EVENT_TYPE_PORT_CHANGE"; + case MLX5_EVENT_TYPE_GPIO_EVENT: + return "MLX5_EVENT_TYPE_GPIO_EVENT"; + case MLX5_EVENT_TYPE_REMOTE_CONFIG: + return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; + case MLX5_EVENT_TYPE_DB_BF_CONGESTION: + return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; + case MLX5_EVENT_TYPE_STALL_EVENT: + return "MLX5_EVENT_TYPE_STALL_EVENT"; + case MLX5_EVENT_TYPE_CMD: + return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_PAGE_REQUEST: + return "MLX5_EVENT_TYPE_PAGE_REQUEST"; + default: + return "Unrecognized event"; + } +} + +static enum mlx5_dev_event port_subtype_event(u8 subtype) +{ + switch (subtype) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + return MLX5_DEV_EVENT_PORT_DOWN; + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + return MLX5_DEV_EVENT_PORT_UP; + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + return MLX5_DEV_EVENT_PORT_INITIALIZED; + case MLX5_PORT_CHANGE_SUBTYPE_LID: + return MLX5_DEV_EVENT_LID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + return MLX5_DEV_EVENT_PKEY_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + return MLX5_DEV_EVENT_GUID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + return MLX5_DEV_EVENT_CLIENT_REREG; + } + return -1; +} + +static void eq_update_ci(struct mlx5_eq *eq, int arm) +{ + __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + __raw_writel((__force u32) cpu_to_be32(val), addr); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn; + u32 srqn; + u8 port; + + while ((eqe = next_eqe_sw(eq))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + + mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); + switch (eqe->type) { + case MLX5_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; + mlx5_cq_completion(dev, cqn); + break; + + case MLX5_EVENT_TYPE_PATH_MIG: + case MLX5_EVENT_TYPE_COMM_EST: + case MLX5_EVENT_TYPE_SQ_DRAINED: + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + mlx5_core_dbg(dev, "event %s(%d) arrived\n", + eqe_type_str(eqe->type), eqe->type); + mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, + eqe->type); + break; + + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", + eqe_type_str(eqe->type), eqe->type, srqn); + mlx5_srq_event(dev, srqn, eqe->type); + break; + + case MLX5_EVENT_TYPE_CMD: + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); + break; + + case MLX5_EVENT_TYPE_PORT_CHANGE: + port = (eqe->data.port.port >> 4) & 0xf; + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + case MLX5_PORT_CHANGE_SUBTYPE_LID: + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + dev->event(dev, port_subtype_event(eqe->sub_type), &port); + break; + default: + mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", + port, eqe->sub_type); + } + break; + case MLX5_EVENT_TYPE_CQ_ERROR: + cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; + mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", + cqn, eqe->data.cq_err.syndrome); + mlx5_cq_event(dev, cqn, eqe->type); + break; + + case MLX5_EVENT_TYPE_PAGE_REQUEST: + { + u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); + s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); + + mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); + mlx5_core_req_pages_handler(dev, func_id, npages); + } + break; + + + default: + mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX5_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; + + mlx5_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = MLX5_EQE_OWNER_INIT_VAL; + } +} + +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_create_eq_mbox_in *in; + struct mlx5_create_eq_mbox_out out; + int err; + int inlen; + + eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, + &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + mlx5_fill_page_array(&eq->buf, in->pas); + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); + in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); + in->ctx.intr = vecidx; + in->ctx.log_page_size = PAGE_SHIFT - 12; + in->events_mask = cpu_to_be64(mask); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto err_in; + } + + eq->eqn = out.eq_number; + err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + name, eq); + if (err) + goto err_eq; + + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; + + err = mlx5_debug_eq_add(dev, eq); + if (err) + goto err_irq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + + mlx5_vfree(in); + return 0; + +err_irq: + free_irq(table->msix_arr[vecidx].vector, eq); + +err_eq: + mlx5_cmd_destroy_eq(dev, eq->eqn); + +err_in: + mlx5_vfree(in); + +err_buf: + mlx5_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_create_map_eq); + +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + mlx5_debug_eq_remove(dev, eq); + free_irq(table->msix_arr[eq->irqn].vector, eq); + err = mlx5_cmd_destroy_eq(dev, eq->eqn); + if (err) + mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + mlx5_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); + +int mlx5_eq_init(struct mlx5_core_dev *dev) +{ + int err; + + spin_lock_init(&dev->priv.eq_table.lock); + + err = mlx5_eq_debugfs_init(dev); + + return err; +} + + +void mlx5_eq_cleanup(struct mlx5_core_dev *dev) +{ + mlx5_eq_debugfs_cleanup(dev); +} + +int mlx5_start_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, + MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, + "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); + return err; + } + + mlx5_cmd_use_events(dev); + + err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, + MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK, + "mlx5_async_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create async EQ %d\n", err); + goto err1; + } + + err = mlx5_create_map_eq(dev, &table->pages_eq, + MLX5_EQ_VEC_PAGES, + dev->caps.max_vf + 1, + 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", + &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); + goto err2; + } + + return err; + +err2: + mlx5_destroy_unmap_eq(dev, &table->async_eq); + +err1: + mlx5_cmd_use_polling(dev); + mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + return err; +} + +int mlx5_stop_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); + if (err) + return err; + + mlx5_destroy_unmap_eq(dev, &table->async_eq); + mlx5_cmd_use_polling(dev); + + err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + if (err) + mlx5_cmd_use_events(dev); + + return err; +} + +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct mlx5_query_eq_mbox_out *out, int outlen) +{ + struct mlx5_query_eq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c new file mode 100644 index 000000000000..72a5222447f5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_query_adapter_mbox_out *out; + struct mlx5_cmd_query_adapter_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_out; + } + + memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); + +out_out: + kfree(out); + + return err; +} + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, + struct mlx5_caps *caps) +{ + struct mlx5_cmd_query_hca_cap_mbox_out *out; + struct mlx5_cmd_query_hca_cap_mbox_in in; + struct mlx5_query_special_ctxs_mbox_out ctx_out; + struct mlx5_query_special_ctxs_mbox_in ctx_in; + int err; + u16 t16; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); + in.hdr.opmod = cpu_to_be16(0x1); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_out; + } + + + caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; + caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; + caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; + caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); + caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); + caps->flags = be64_to_cpu(out->hca_cap.flags); + caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); + caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; + caps->num_ports = out->hca_cap.num_ports & 0xf; + caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; + if (caps->num_ports > MLX5_MAX_PORTS) { + mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n", + caps->num_ports, MLX5_MAX_PORTS); + err = -EINVAL; + goto out_out; + } + caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; + caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; + caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; + caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; + caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; + caps->log_max_mcg = out->hca_cap.log_max_mcg; + caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); + caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); + caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; + t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); + if (t16 & 0x8000) { + caps->bf_reg_size = 1 << (t16 & 0x1f); + caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE; + } else { + caps->bf_reg_size = 0; + caps->bf_regs_per_page = 0; + } + caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1); + + memset(&ctx_in, 0, sizeof(ctx_in)); + memset(&ctx_out, 0, sizeof(ctx_out)); + ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in), + &ctx_out, sizeof(ctx_out)); + if (err) + goto out_out; + + if (ctx_out.hdr.status) + err = mlx5_cmd_status_to_err(&ctx_out.hdr); + + caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey); + +out_out: + kfree(out); + + return err; +} + +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_init_hca_mbox_in in; + struct mlx5_cmd_init_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} + +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_teardown_hca_mbox_in in; + struct mlx5_cmd_teardown_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c new file mode 100644 index 000000000000..ea4b9bca6d4a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, + MAX_MISSES = 3, +}; + +enum { + MLX5_HEALTH_SYNDR_FW_ERR = 0x1, + MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, + MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, + MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, + MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, + MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, + MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, + MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, +}; + +static DEFINE_SPINLOCK(health_lock); + +static LIST_HEAD(health_list); +static struct work_struct health_work; + +static health_handler_t reg_handler; +int mlx5_register_health_report_handler(health_handler_t handler) +{ + spin_lock_irq(&health_lock); + if (reg_handler) { + spin_unlock_irq(&health_lock); + return -EEXIST; + } + reg_handler = handler; + spin_unlock_irq(&health_lock); + + return 0; +} +EXPORT_SYMBOL(mlx5_register_health_report_handler); + +void mlx5_unregister_health_report_handler(void) +{ + spin_lock_irq(&health_lock); + reg_handler = NULL; + spin_unlock_irq(&health_lock); +} +EXPORT_SYMBOL(mlx5_unregister_health_report_handler); + +static void health_care(struct work_struct *work) +{ + struct mlx5_core_health *health, *n; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + LIST_HEAD(tlist); + + spin_lock_irq(&health_lock); + list_splice_init(&health_list, &tlist); + + spin_unlock_irq(&health_lock); + + list_for_each_entry_safe(health, n, &tlist, list) { + priv = container_of(health, struct mlx5_priv, health); + dev = container_of(priv, struct mlx5_core_dev, priv); + mlx5_core_warn(dev, "handling bad device here\n"); + spin_lock_irq(&health_lock); + if (reg_handler) + reg_handler(dev->pdev, health->health, + sizeof(health->health)); + + list_del_init(&health->list); + spin_unlock_irq(&health_lock); + } +} + +static const char *hsynd_str(u8 synd) +{ + switch (synd) { + case MLX5_HEALTH_SYNDR_FW_ERR: + return "firmware internal error"; + case MLX5_HEALTH_SYNDR_IRISC_ERR: + return "irisc not responding"; + case MLX5_HEALTH_SYNDR_CRC_ERR: + return "firmware CRC error"; + case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: + return "ICM fetch PCI error"; + case MLX5_HEALTH_SYNDR_HW_FTL_ERR: + return "HW fatal error\n"; + case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: + return "async EQ buffer overrun"; + case MLX5_HEALTH_SYNDR_EQ_ERR: + return "EQ error"; + case MLX5_HEALTH_SYNDR_FFSER_ERR: + return "FFSER error"; + default: + return "unrecognized error"; + } +} + +static void print_health_info(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + struct health_buffer __iomem *h = health->health; + int i; + + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) + pr_info("assert_var[%d] 0x%08x\n", i, be32_to_cpu(h->assert_var[i])); + + pr_info("assert_exit_ptr 0x%08x\n", be32_to_cpu(h->assert_exit_ptr)); + pr_info("assert_callra 0x%08x\n", be32_to_cpu(h->assert_callra)); + pr_info("fw_ver 0x%08x\n", be32_to_cpu(h->fw_ver)); + pr_info("hw_id 0x%08x\n", be32_to_cpu(h->hw_id)); + pr_info("irisc_index %d\n", h->irisc_index); + pr_info("synd 0x%x: %s\n", h->synd, hsynd_str(h->synd)); + pr_info("ext_sync 0x%04x\n", be16_to_cpu(h->ext_sync)); +} + +static void poll_health(unsigned long data) +{ + struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; + struct mlx5_core_health *health = &dev->priv.health; + unsigned long next; + u32 count; + + count = ioread32be(health->health_counter); + if (count == health->prev) + ++health->miss_counter; + else + health->miss_counter = 0; + + health->prev = count; + if (health->miss_counter == MAX_MISSES) { + mlx5_core_err(dev, "device's health compromised\n"); + print_health_info(dev); + spin_lock_irq(&health_lock); + list_add_tail(&health->list, &health_list); + spin_unlock_irq(&health_lock); + + queue_work(mlx5_core_wq, &health_work); + } else { + get_random_bytes(&next, sizeof(next)); + next %= HZ; + next += jiffies + MLX5_HEALTH_POLL_INTERVAL; + mod_timer(&health->timer, next); + } +} + +void mlx5_start_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + INIT_LIST_HEAD(&health->list); + init_timer(&health->timer); + health->health = &dev->iseg->health; + health->health_counter = &dev->iseg->health_counter; + + health->timer.data = (unsigned long)dev; + health->timer.function = poll_health; + health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); + add_timer(&health->timer); +} + +void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + del_timer_sync(&health->timer); + + spin_lock_irq(&health_lock); + if (!list_empty(&health->list)) + list_del_init(&health->list); + spin_unlock_irq(&health_lock); +} + +void mlx5_health_cleanup(void) +{ +} + +void __init mlx5_health_init(void) +{ + INIT_WORK(&health_work, health_care); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c new file mode 100644 index 000000000000..18d6fd5dd90b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, + u16 opmod, int port) +{ + struct mlx5_mad_ifc_mbox_in *in = NULL; + struct mlx5_mad_ifc_mbox_out *out = NULL; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); + in->hdr.opmod = cpu_to_be16(opmod); + in->port = port; + + memcpy(in->data, inb, sizeof(in->data)); + + err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); + if (err) + goto out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out; + } + + memcpy(outb, out->data, sizeof(out->data)); + +out: + kfree(out); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c new file mode 100644 index 000000000000..f21cc397d1bc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -0,0 +1,475 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "June 2013" + +MODULE_AUTHOR("Eli Cohen "); +MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +int mlx5_core_debug_mask; +module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); +MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); + +struct workqueue_struct *mlx5_core_wq; + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); + return err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "Can't set consistent PCI DMA mask, aborting.\n"); + return err; + } + } + + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + return err; +} + +static int request_bar(struct pci_dev *pdev) +{ + int err = 0; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); + return -ENODEV; + } + + err = pci_request_regions(pdev, DRIVER_NAME); + if (err) + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + + return err; +} + +static void release_bar(struct pci_dev *pdev) +{ + pci_release_regions(pdev); +} + +static int mlx5_enable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int num_eqs = 1 << dev->caps.log_max_eq; + int nvec; + int err; + int i; + + nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = min_t(int, nvec, num_eqs); + if (nvec <= MLX5_EQ_VEC_COMP_BASE) + return -ENOMEM; + + table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); + if (!table->msix_arr) + return -ENOMEM; + + for (i = 0; i < nvec; i++) + table->msix_arr[i].entry = i; + +retry: + table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; + err = pci_enable_msix(dev->pdev, table->msix_arr, nvec); + if (err <= 0) { + return err; + } else if (err > 2) { + nvec = err; + goto retry; + } + + mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec); + + return 0; +} + +static void mlx5_disable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + + pci_disable_msix(dev->pdev); + kfree(table->msix_arr); +} + +struct mlx5_reg_host_endianess { + u8 he; + u8 rsvd[15]; +}; + +static int handle_hca_cap(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; + struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; + struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; + struct mlx5_cmd_set_hca_cap_mbox_out set_out; + struct mlx5_profile *prof = dev->profile; + u64 flags; + int csum = 1; + int err; + + memset(&query_ctx, 0, sizeof(query_ctx)); + query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); + if (!query_out) + return -ENOMEM; + + set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); + if (!set_ctx) { + err = -ENOMEM; + goto query_ex; + } + + query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); + query_ctx.hdr.opmod = cpu_to_be16(0x1); + err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), + query_out, sizeof(*query_out)); + if (err) + goto query_ex; + + err = mlx5_cmd_status_to_err(&query_out->hdr); + if (err) { + mlx5_core_warn(dev, "query hca cap failed, %d\n", err); + goto query_ex; + } + + memcpy(&set_ctx->hca_cap, &query_out->hca_cap, + sizeof(set_ctx->hca_cap)); + + if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) { + csum = !!prof->cmdif_csum; + flags = be64_to_cpu(set_ctx->hca_cap.flags); + if (csum) + flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + else + flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + + set_ctx->hca_cap.flags = cpu_to_be64(flags); + } + + if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) + set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; + + memset(&set_out, 0, sizeof(set_out)); + set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); + set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); + err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), + &set_out, sizeof(set_out)); + if (err) { + mlx5_core_warn(dev, "set hca cap failed, %d\n", err); + goto query_ex; + } + + err = mlx5_cmd_status_to_err(&set_out.hdr); + if (err) + goto query_ex; + + if (!csum) + dev->cmd.checksum_disabled = 1; + +query_ex: + kfree(query_out); + kfree(set_ctx); + + return err; +} + +static int set_hca_ctrl(struct mlx5_core_dev *dev) +{ + struct mlx5_reg_host_endianess he_in; + struct mlx5_reg_host_endianess he_out; + int err; + + memset(&he_in, 0, sizeof(he_in)); + he_in.he = MLX5_SET_HOST_ENDIANNESS; + err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), + &he_out, sizeof(he_out), + MLX5_REG_HOST_ENDIANNESS, 0, 1); + return err; +} + +int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) +{ + struct mlx5_priv *priv = &dev->priv; + int err; + + dev->pdev = pdev; + pci_set_drvdata(dev->pdev, dev); + strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); + priv->name[MLX5_MAX_NAME_LEN - 1] = 0; + + mutex_init(&priv->pgdir_mutex); + INIT_LIST_HEAD(&priv->pgdir_list); + spin_lock_init(&priv->mkey_lock); + + priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); + if (!priv->dbg_root) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); + goto err_dbg; + } + + err = request_bar(pdev); + if (err) { + dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); + goto err_clr_master; + } + + dev->iseg_base = pci_resource_start(dev->pdev, 0); + dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); + if (!dev->iseg) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); + goto err_clr_master; + } + dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), + fw_rev_min(dev), fw_rev_sub(dev)); + + err = mlx5_cmd_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); + goto err_unmap; + } + + mlx5_pagealloc_init(dev); + err = set_hca_ctrl(dev); + if (err) { + dev_err(&pdev->dev, "set_hca_ctrl failed\n"); + goto err_pagealloc_cleanup; + } + + err = handle_hca_cap(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap failed\n"); + goto err_pagealloc_cleanup; + } + + err = mlx5_satisfy_startup_pages(dev); + if (err) { + dev_err(&pdev->dev, "failed to allocate startup pages\n"); + goto err_pagealloc_cleanup; + } + + err = mlx5_pagealloc_start(dev); + if (err) { + dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); + goto err_reclaim_pages; + } + + err = mlx5_cmd_init_hca(dev); + if (err) { + dev_err(&pdev->dev, "init hca failed\n"); + goto err_pagealloc_stop; + } + + mlx5_start_health_poll(dev); + + err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + dev_err(&pdev->dev, "query hca failed\n"); + goto err_stop_poll; + } + + err = mlx5_cmd_query_adapter(dev); + if (err) { + dev_err(&pdev->dev, "query adapter failed\n"); + goto err_stop_poll; + } + + err = mlx5_enable_msix(dev); + if (err) { + dev_err(&pdev->dev, "enable msix failed\n"); + goto err_stop_poll; + } + + err = mlx5_eq_init(dev); + if (err) { + dev_err(&pdev->dev, "failed to initialize eq\n"); + goto disable_msix; + } + + err = mlx5_alloc_uuars(dev, &priv->uuari); + if (err) { + dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); + goto err_eq_cleanup; + } + + err = mlx5_start_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); + goto err_free_uar; + } + + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); + + mlx5_init_cq_table(dev); + mlx5_init_qp_table(dev); + mlx5_init_srq_table(dev); + + return 0; + +err_free_uar: + mlx5_free_uuars(dev, &priv->uuari); + +err_eq_cleanup: + mlx5_eq_cleanup(dev); + +disable_msix: + mlx5_disable_msix(dev); + +err_stop_poll: + mlx5_stop_health_poll(dev); + mlx5_cmd_teardown_hca(dev); + +err_pagealloc_stop: + mlx5_pagealloc_stop(dev); + +err_reclaim_pages: + mlx5_reclaim_startup_pages(dev); + +err_pagealloc_cleanup: + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + +err_unmap: + iounmap(dev->iseg); + +err_clr_master: + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + +err_disable: + pci_disable_device(dev->pdev); + +err_dbg: + debugfs_remove(priv->dbg_root); + return err; +} +EXPORT_SYMBOL(mlx5_dev_init); + +void mlx5_dev_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + + mlx5_cleanup_srq_table(dev); + mlx5_cleanup_qp_table(dev); + mlx5_cleanup_cq_table(dev); + mlx5_stop_eqs(dev); + mlx5_free_uuars(dev, &priv->uuari); + mlx5_eq_cleanup(dev); + mlx5_disable_msix(dev); + mlx5_stop_health_poll(dev); + mlx5_cmd_teardown_hca(dev); + mlx5_pagealloc_stop(dev); + mlx5_reclaim_startup_pages(dev); + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + iounmap(dev->iseg); + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + pci_disable_device(dev->pdev); + debugfs_remove(priv->dbg_root); +} +EXPORT_SYMBOL(mlx5_dev_cleanup); + +static int __init init(void) +{ + int err; + + mlx5_register_debugfs(); + mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq"); + if (!mlx5_core_wq) { + err = -ENOMEM; + goto err_debug; + } + mlx5_health_init(); + + return 0; + + mlx5_health_cleanup(); +err_debug: + mlx5_unregister_debugfs(); + return err; +} + +static void __exit cleanup(void) +{ + mlx5_health_cleanup(); + destroy_workqueue(mlx5_core_wq); + mlx5_unregister_debugfs(); +} + +module_init(init); +module_exit(cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c new file mode 100644 index 000000000000..44837640bd7c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +struct mlx5_attach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_attach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +struct mlx5_detach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_detach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_attach_mcg_mbox_in in; + struct mlx5_attach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_attach_mcg); + +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_detach_mcg_mbox_in in; + struct mlx5_detach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h new file mode 100644 index 000000000000..68b74e1ae1b0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_CORE_H__ +#define __MLX5_CORE_H__ + +#include +#include +#include + +extern int mlx5_core_debug_mask; + +#define mlx5_core_dbg(dev, format, arg...) \ +pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ + current->pid, ##arg) + +#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ +do { \ + if ((mask) & mlx5_core_debug_mask) \ + pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define mlx5_core_err(dev, format, arg...) \ +pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ + current->pid, ##arg) + +#define mlx5_core_warn(dev, format, arg...) \ +pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ + current->pid, ##arg) + +enum { + MLX5_CMD_DATA, /* print command payload only */ + MLX5_CMD_TIME, /* print command execution time */ +}; + + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, + struct mlx5_caps *caps); +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); + +#endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c new file mode 100644 index 000000000000..5b44e2e46daf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_create_mkey_mbox_in *in, int inlen) +{ + struct mlx5_create_mkey_mbox_out out; + int err; + u8 key; + + memset(&out, 0, sizeof(out)); + spin_lock(&dev->priv.mkey_lock); + key = dev->priv.mkey_key++; + spin_unlock(&dev->priv.mkey_lock); + in->seg.qpn_mkey7_0 |= cpu_to_be32(key); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_dbg(dev, "cmd exec faile %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", out.hdr.status); + return mlx5_cmd_status_to_err(&out.hdr); + } + + mr->key = mlx5_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_mkey); + +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) +{ + struct mlx5_destroy_mkey_mbox_in in; + struct mlx5_destroy_mkey_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_mkey); + +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_query_mkey_mbox_out *out, int outlen) +{ + struct mlx5_destroy_mkey_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_mkey); + +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + u32 *mkey) +{ + struct mlx5_query_special_ctxs_mbox_in in; + struct mlx5_query_special_ctxs_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *mkey = be32_to_cpu(out.dump_fill_mkey); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c new file mode 100644 index 000000000000..f0bf46339b28 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + MLX5_PAGES_CANT_GIVE = 0, + MLX5_PAGES_GIVE = 1, + MLX5_PAGES_TAKE = 2 +}; + +struct mlx5_pages_req { + struct mlx5_core_dev *dev; + u32 func_id; + s16 npages; + struct work_struct work; +}; + +struct fw_page { + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; +}; + +struct mlx5_query_pages_inbox { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_pages_outbox { + struct mlx5_outbox_hdr hdr; + u8 reserved[2]; + __be16 func_id; + __be16 init_pages; + __be16 num_pages; +}; + +struct mlx5_manage_pages_inbox { + struct mlx5_inbox_hdr hdr; + __be16 rsvd0; + __be16 func_id; + __be16 rsvd1; + __be16 num_entries; + u8 rsvd2[16]; + __be64 pas[0]; +}; + +struct mlx5_manage_pages_outbox { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[2]; + __be16 num_entries; + u8 rsvd1[20]; + __be64 pas[0]; +}; + +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + struct fw_page *nfp; + struct fw_page *tfp; + + while (*new) { + parent = *new; + tfp = rb_entry(parent, struct fw_page, rb_node); + if (tfp->addr < addr) + new = &parent->rb_left; + else if (tfp->addr > addr) + new = &parent->rb_right; + else + return -EEXIST; + } + + nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) + return -ENOMEM; + + nfp->addr = addr; + nfp->page = page; + nfp->func_id = func_id; + + rb_link_node(&nfp->rb_node, parent, new); + rb_insert_color(&nfp->rb_node, root); + + return 0; +} + +static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node *tmp = root->rb_node; + struct page *result = NULL; + struct fw_page *tfp; + + while (tmp) { + tfp = rb_entry(tmp, struct fw_page, rb_node); + if (tfp->addr < addr) { + tmp = tmp->rb_left; + } else if (tfp->addr > addr) { + tmp = tmp->rb_right; + } else { + rb_erase(&tfp->rb_node, root); + result = tfp->page; + kfree(tfp); + break; + } + } + + return result; +} + +static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, + s16 *pages, s16 *init_pages) +{ + struct mlx5_query_pages_inbox in; + struct mlx5_query_pages_outbox out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + if (pages) + *pages = be16_to_cpu(out.num_pages); + if (init_pages) + *init_pages = be16_to_cpu(out.init_pages); + *func_id = be16_to_cpu(out.func_id); + + return err; +} + +static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, + int notify_fail) +{ + struct mlx5_manage_pages_inbox *in; + struct mlx5_manage_pages_outbox out; + struct page *page; + int inlen; + u64 addr; + int err; + int i; + + inlen = sizeof(*in) + npages * sizeof(in->pas[0]); + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); + return -ENOMEM; + } + memset(&out, 0, sizeof(out)); + + for (i = 0; i < npages; i++) { + page = alloc_page(GFP_HIGHUSER); + if (!page) { + err = -ENOMEM; + mlx5_core_warn(dev, "failed to allocate page\n"); + goto out_alloc; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + __free_page(page); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + err = -ENOMEM; + goto out_alloc; + } + in->pas[i] = cpu_to_be64(addr); + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); + in->func_id = cpu_to_be16(func_id); + in->num_entries = cpu_to_be16(npages); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + mlx5_core_dbg(dev, "err %d\n", err); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); + goto out_alloc; + } + dev->priv.fw_pages += npages; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); + goto out_alloc; + } + } + + mlx5_core_dbg(dev, "err %d\n", err); + + goto out_free; + +out_alloc: + if (notify_fail) { + memset(in, 0, inlen); + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out))) + mlx5_core_warn(dev, "\n"); + } + for (i--; i >= 0; i--) { + addr = be64_to_cpu(in->pas[i]); + page = remove_page(dev, addr); + if (!page) { + mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n", + addr); + continue; + } + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + } + +out_free: + mlx5_vfree(in); + return err; +} + +static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, + int *nclaimed) +{ + struct mlx5_manage_pages_inbox in; + struct mlx5_manage_pages_outbox *out; + struct page *page; + int num_claimed; + int outlen; + u64 addr; + int err; + int i; + + memset(&in, 0, sizeof(in)); + outlen = sizeof(*out) + npages * sizeof(out->pas[0]); + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); + in.func_id = cpu_to_be16(func_id); + in.num_entries = cpu_to_be16(npages); + mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) { + mlx5_core_err(dev, "failed recliaming pages\n"); + goto out_free; + } + dev->priv.fw_pages -= npages; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_free; + } + + num_claimed = be16_to_cpu(out->num_entries); + if (nclaimed) + *nclaimed = num_claimed; + + for (i = 0; i < num_claimed; i++) { + addr = be64_to_cpu(out->pas[i]); + page = remove_page(dev, addr); + if (!page) { + mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr); + } else { + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + } + } + +out_free: + mlx5_vfree(out); + return err; +} + +static void pages_work_handler(struct work_struct *work) +{ + struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); + struct mlx5_core_dev *dev = req->dev; + int err = 0; + + if (req->npages < 0) + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + else if (req->npages > 0) + err = give_pages(dev, req->func_id, req->npages, 1); + + if (err) + mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? + "reclaim" : "give", err); + + kfree(req); +} + +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s16 npages) +{ + struct mlx5_pages_req *req; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + mlx5_core_warn(dev, "failed to allocate pages request\n"); + return; + } + + req->dev = dev; + req->func_id = func_id; + req->npages = npages; + INIT_WORK(&req->work, pages_work_handler); + queue_work(dev->priv.pg_wq, &req->work); +} + +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev) +{ + s16 uninitialized_var(init_pages); + u16 uninitialized_var(func_id); + int err; + + err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages); + if (err) + return err; + + mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id); + + return give_pages(dev, func_id, init_pages, 0); +} + +static int optimal_reclaimed_pages(void) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_layout *lay; + int ret; + + ret = (sizeof(lay->in) + sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / 8; + + return ret; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + unsigned long end = jiffies + msecs_to_jiffies(5000); + struct fw_page *fwp; + struct rb_node *p; + int err; + + do { + p = rb_first(&dev->priv.page_root); + if (p) { + fwp = rb_entry(p, struct fw_page, rb_node); + err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); + return err; + } + } + if (time_after(jiffies, end)) { + mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); + break; + } + } while (p); + + return 0; +} + +void mlx5_pagealloc_init(struct mlx5_core_dev *dev) +{ + dev->priv.page_root = RB_ROOT; +} + +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) +{ + /* nothing */ +} + +int mlx5_pagealloc_start(struct mlx5_core_dev *dev) +{ + dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); + if (!dev->priv.pg_wq) + return -ENOMEM; + + return 0; +} + +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) +{ + destroy_workqueue(dev->priv.pg_wq); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c new file mode 100644 index 000000000000..790da5c4ca4f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +struct mlx5_alloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) +{ + struct mlx5_alloc_pd_mbox_in in; + struct mlx5_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(mlx5_core_alloc_pd); + +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) +{ + struct mlx5_dealloc_pd_mbox_in in; + struct mlx5_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c new file mode 100644 index 000000000000..f6afe7b5a675 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct mlx5_access_reg_mbox_in *in = NULL; + struct mlx5_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = mlx5_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = mlx5_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); + in->hdr.opmod = cpu_to_be16(!write); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, + sizeof(out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + mlx5_vfree(out); +ex1: + mlx5_vfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_access_reg); + + +struct mlx5_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) +{ + struct mlx5_reg_pcap in; + struct mlx5_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), MLX5_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_caps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c new file mode 100644 index 000000000000..54faf8bfcaf4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include +#include +#include +#include +#include + +#include "mlx5_core.h" + +void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_core_qp *qp; + + spin_lock(&table->lock); + + qp = radix_tree_lookup(&table->tree, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&table->lock); + + if (!qp) { + mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + struct mlx5_create_qp_mbox_in *in, + int inlen) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_create_qp_mbox_out out; + struct mlx5_destroy_qp_mbox_in din; + struct mlx5_destroy_qp_mbox_out dout; + int err; + + memset(&dout, 0, sizeof(dout)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_warn(dev, "ret %d", err); + return err; + } + + if (out.hdr.status) { + pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); + return mlx5_cmd_status_to_err(&out.hdr); + } + + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d", err); + goto err_cmd; + } + + err = mlx5_debug_qp_add(dev, qp); + if (err) + mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", + qp->qpn); + + qp->pid = current->pid; + atomic_set(&qp->refcount, 1); + atomic_inc(&dev->num_qps); + init_completion(&qp->free); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_create_qp); + +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp) +{ + struct mlx5_destroy_qp_mbox_in in; + struct mlx5_destroy_qp_mbox_out out; + struct mlx5_qp_table *table = &dev->priv.qp_table; + unsigned long flags; + int err; + + mlx5_debug_qp_remove(dev, qp); + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + atomic_dec(&dev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); + +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, + enum mlx5_qp_state new_state, + struct mlx5_modify_qp_mbox_in *in, int sqd_event, + struct mlx5_core_qp *qp) +{ + static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { + [MLX5_QP_STATE_RST] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, + }, + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, + [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, + }, + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, + [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP, + }, + [MLX5_QP_STATE_SQD] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP, + [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP, + }, + [MLX5_QP_STATE_SQER] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, + }, + [MLX5_QP_STATE_ERR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + } + }; + + struct mlx5_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + in->hdr.opcode = cpu_to_be16(op); + in->qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err) + return err; + + return mlx5_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); + +void mlx5_init_qp_table(struct mlx5_core_dev *dev) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + mlx5_qp_debugfs_init(dev); +} + +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) +{ + mlx5_qp_debugfs_cleanup(dev); +} + +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + struct mlx5_query_qp_mbox_out *out, int outlen) +{ + struct mlx5_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_query); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) +{ + struct mlx5_alloc_xrcd_mbox_in in; + struct mlx5_alloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + else + *xrcdn = be32_to_cpu(out.xrcdn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); + +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) +{ + struct mlx5_dealloc_xrcd_mbox_in in; + struct mlx5_dealloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); + in.xrcdn = cpu_to_be32(xrcdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c new file mode 100644 index 000000000000..38bce93f8314 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + if (!srq) { + mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + return srq; +} +EXPORT_SYMBOL(mlx5_core_get_srq); + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) +{ + struct mlx5_create_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_destroy_srq_mbox_in din; + struct mlx5_destroy_srq_mbox_out dout; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, srq->srqn, srq); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); + goto err_cmd; + } + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.srqn = cpu_to_be32(srq->srqn); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_srq); + +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, srq->srqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); + return -EINVAL; + } + if (tmp != srq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_srq); + +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_srq); + +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_arm_srq); + +void mlx5_init_srq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev) +{ + /* nothing */ +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c new file mode 100644 index 000000000000..71d4a3937200 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + NUM_DRIVER_UARS = 4, + NUM_LOW_LAT_UUARS = 4, +}; + + +struct mlx5_alloc_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) +{ + struct mlx5_alloc_uar_mbox_in in; + struct mlx5_alloc_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto ex; + } + + *uarn = be32_to_cpu(out.uarn) & 0xffffff; + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_alloc_uar); + +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) +{ + struct mlx5_free_uar_mbox_in in; + struct mlx5_free_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); + in.uarn = cpu_to_be32(uarn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_free_uar); + +static int need_uuar_lock(int uuarn) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + + if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) + return 0; + + return 1; +} + +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + struct mlx5_bf *bf; + phys_addr_t addr; + int err; + int i; + + uuari->num_uars = NUM_DRIVER_UARS; + uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + + mutex_init(&uuari->lock); + uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); + if (!uuari->uars) + return -ENOMEM; + + uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); + if (!uuari->bfs) { + err = -ENOMEM; + goto out_uars; + } + + uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), + GFP_KERNEL); + if (!uuari->bitmap) { + err = -ENOMEM; + goto out_bfs; + } + + uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); + if (!uuari->count) { + err = -ENOMEM; + goto out_bitmap; + } + + for (i = 0; i < uuari->num_uars; i++) { + err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); + if (err) + goto out_count; + + addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); + uuari->uars[i].map = ioremap(addr, PAGE_SIZE); + if (!uuari->uars[i].map) { + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + goto out_count; + } + mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", + uuari->uars[i].index, uuari->uars[i].map); + } + + for (i = 0; i < tot_uuars; i++) { + bf = &uuari->bfs[i]; + + bf->buf_size = dev->caps.bf_reg_size / 2; + bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; + bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; + bf->reg = NULL; /* Add WC support */ + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + + MLX5_BF_OFFSET; + bf->need_lock = need_uuar_lock(i); + spin_lock_init(&bf->lock); + spin_lock_init(&bf->lock32); + bf->uuarn = i; + } + + return 0; + +out_count: + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + kfree(uuari->count); + +out_bitmap: + kfree(uuari->bitmap); + +out_bfs: + kfree(uuari->bfs); + +out_uars: + kfree(uuari->uars); + return err; +} + +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int i = uuari->num_uars; + + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + + kfree(uuari->count); + kfree(uuari->bitmap); + kfree(uuari->bfs); + kfree(uuari->uars); + + return 0; +} diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h new file mode 100644 index 000000000000..2826a4b6071e --- /dev/null +++ b/include/linux/mlx5/cmd.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CMD_H +#define MLX5_CMD_H + +#include + +struct manage_pages_layout { + u64 ptr; + u32 reserved; + u16 num_entries; + u16 func_id; +}; + + +struct mlx5_cmd_alloc_uar_imm_out { + u32 rsvd[3]; + u32 uarn; +}; + +#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h new file mode 100644 index 000000000000..3db67f73d96d --- /dev/null +++ b/include/linux/mlx5/cq.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CORE_CQ_H +#define MLX5_CORE_CQ_H + +#include +#include + + +struct mlx5_core_cq { + u32 cqn; + int cqe_sz; + __be32 *set_ci_db; + __be32 *arm_db; + atomic_t refcount; + struct completion free; + unsigned vector; + int irqn; + void (*comp) (struct mlx5_core_cq *); + void (*event) (struct mlx5_core_cq *, enum mlx5_event); + struct mlx5_uar *uar; + u32 cons_index; + unsigned arm_sn; + struct mlx5_rsc_debug *dbg; + int pid; +}; + + +enum { + MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, + MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, + MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, + MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, + MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06, + MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10, + MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, + MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, + MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, + MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, + MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, +}; + +enum { + MLX5_CQE_OWNER_MASK = 1, + MLX5_CQE_REQ = 0, + MLX5_CQE_RESP_WR_IMM = 1, + MLX5_CQE_RESP_SEND = 2, + MLX5_CQE_RESP_SEND_IMM = 3, + MLX5_CQE_RESP_SEND_INV = 4, + MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */ + MLX5_CQE_REQ_ERR = 13, + MLX5_CQE_RESP_ERR = 14, +}; + +enum { + MLX5_CQ_MODIFY_RESEIZE = 0, + MLX5_CQ_MODIFY_MODER = 1, + MLX5_CQ_MODIFY_MAPPING = 2, +}; + +struct mlx5_cq_modify_params { + int type; + union { + struct { + u32 page_offset; + u8 log_cq_size; + } resize; + + struct { + } moder; + + struct { + } mapping; + } params; +}; + +enum { + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, +}; + +static inline int cqe_sz_to_mlx_sz(u8 size) +{ + return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; +} + +static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) +{ + *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); +} + +enum { + MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24, + MLX5_CQ_DB_REQ_NOT = 0 << 24 +}; + +static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, + void __iomem *uar_page, + spinlock_t *doorbell_lock) +{ + __be32 doorbell[2]; + u32 sn; + u32 ci; + + sn = cq->arm_sn & 3; + ci = cq->cons_index & 0xffffff; + + *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); + doorbell[1] = cpu_to_be32(cq->cqn); + + mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); +} + +int mlx5_init_cq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_create_cq_mbox_in *in, int inlen); +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_query_cq_mbox_out *out); +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int type, struct mlx5_cq_modify_params *params); +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); + +#endif /* MLX5_CORE_CQ_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h new file mode 100644 index 000000000000..51390915e538 --- /dev/null +++ b/include/linux/mlx5/device.h @@ -0,0 +1,893 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DEVICE_H +#define MLX5_DEVICE_H + +#include +#include + +#if defined(__LITTLE_ENDIAN) +#define MLX5_SET_HOST_ENDIANNESS 0 +#elif defined(__BIG_ENDIAN) +#define MLX5_SET_HOST_ENDIANNESS 0x80 +#else +#error Host endianness not defined +#endif + +enum { + MLX5_MAX_COMMANDS = 32, + MLX5_CMD_DATA_BLOCK_SIZE = 512, + MLX5_PCI_CMD_XPORT = 7, +}; + +enum { + MLX5_EXTENDED_UD_AV = 0x80000000, +}; + +enum { + MLX5_CQ_STATE_ARMED = 9, + MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, + MLX5_CQ_STATE_FIRED = 0xa, +}; + +enum { + MLX5_STAT_RATE_OFFSET = 5, +}; + +enum { + MLX5_INLINE_SEG = 0x80000000, +}; + +enum { + MLX5_PERM_LOCAL_READ = 1 << 2, + MLX5_PERM_LOCAL_WRITE = 1 << 3, + MLX5_PERM_REMOTE_READ = 1 << 4, + MLX5_PERM_REMOTE_WRITE = 1 << 5, + MLX5_PERM_ATOMIC = 1 << 6, + MLX5_PERM_UMR_EN = 1 << 7, +}; + +enum { + MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, + MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, + MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, + MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, + MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, +}; + +enum { + MLX5_ACCESS_MODE_PA = 0, + MLX5_ACCESS_MODE_MTT = 1, + MLX5_ACCESS_MODE_KLM = 2 +}; + +enum { + MLX5_MKEY_REMOTE_INVAL = 1 << 24, + MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, + MLX5_MKEY_BSF_EN = 1 << 30, + MLX5_MKEY_LEN64 = 1 << 31, +}; + +enum { + MLX5_EN_RD = (u64)1, + MLX5_EN_WR = (u64)2 +}; + +enum { + MLX5_BF_REGS_PER_PAGE = 4, + MLX5_MAX_UAR_PAGES = 1 << 8, + MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE, +}; + +enum { + MLX5_MKEY_MASK_LEN = 1ull << 0, + MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, + MLX5_MKEY_MASK_START_ADDR = 1ull << 6, + MLX5_MKEY_MASK_PD = 1ull << 7, + MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, + MLX5_MKEY_MASK_BSF_EN = 1ull << 12, + MLX5_MKEY_MASK_KEY = 1ull << 13, + MLX5_MKEY_MASK_QPN = 1ull << 14, + MLX5_MKEY_MASK_LR = 1ull << 17, + MLX5_MKEY_MASK_LW = 1ull << 18, + MLX5_MKEY_MASK_RR = 1ull << 19, + MLX5_MKEY_MASK_RW = 1ull << 20, + MLX5_MKEY_MASK_A = 1ull << 21, + MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, + MLX5_MKEY_MASK_FREE = 1ull << 29, +}; + +enum mlx5_event { + MLX5_EVENT_TYPE_COMP = 0x0, + + MLX5_EVENT_TYPE_PATH_MIG = 0x01, + MLX5_EVENT_TYPE_COMM_EST = 0x02, + MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, + MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, + MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, + + MLX5_EVENT_TYPE_CQ_ERROR = 0x04, + MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + + MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, + MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, + MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, + + MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, + MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, + + MLX5_EVENT_TYPE_CMD = 0x0a, + MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, +}; + +enum { + MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, + MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, + MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, + MLX5_PORT_CHANGE_SUBTYPE_LID = 6, + MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, + MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, + MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, +}; + +enum { + MLX5_DEV_CAP_FLAG_RC = 1LL << 0, + MLX5_DEV_CAP_FLAG_UC = 1LL << 1, + MLX5_DEV_CAP_FLAG_UD = 1LL << 2, + MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, + MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, + MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + MLX5_DEV_CAP_FLAG_APM = 1LL << 17, + MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, + MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, + MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, + MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, +}; + +enum { + MLX5_OPCODE_NOP = 0x00, + MLX5_OPCODE_SEND_INVAL = 0x01, + MLX5_OPCODE_RDMA_WRITE = 0x08, + MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, + MLX5_OPCODE_SEND = 0x0a, + MLX5_OPCODE_SEND_IMM = 0x0b, + MLX5_OPCODE_RDMA_READ = 0x10, + MLX5_OPCODE_ATOMIC_CS = 0x11, + MLX5_OPCODE_ATOMIC_FA = 0x12, + MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, + MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, + MLX5_OPCODE_BIND_MW = 0x18, + MLX5_OPCODE_CONFIG_CMD = 0x1f, + + MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, + MLX5_RECV_OPCODE_SEND = 0x01, + MLX5_RECV_OPCODE_SEND_IMM = 0x02, + MLX5_RECV_OPCODE_SEND_INVAL = 0x03, + + MLX5_CQE_OPCODE_ERROR = 0x1e, + MLX5_CQE_OPCODE_RESIZE = 0x16, + + MLX5_OPCODE_SET_PSV = 0x20, + MLX5_OPCODE_GET_PSV = 0x21, + MLX5_OPCODE_CHECK_PSV = 0x22, + MLX5_OPCODE_RGET_PSV = 0x26, + MLX5_OPCODE_RCHECK_PSV = 0x27, + + MLX5_OPCODE_UMR = 0x25, + +}; + +enum { + MLX5_SET_PORT_RESET_QKEY = 0, + MLX5_SET_PORT_GUID0 = 16, + MLX5_SET_PORT_NODE_GUID = 17, + MLX5_SET_PORT_SYS_GUID = 18, + MLX5_SET_PORT_GID_TABLE = 19, + MLX5_SET_PORT_PKEY_TABLE = 20, +}; + +enum { + MLX5_MAX_PAGE_SHIFT = 31 +}; + +struct mlx5_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 opmod; +}; + +struct mlx5_outbox_hdr { + u8 status; + u8 rsvd[3]; + __be32 syndrome; +}; + +struct mlx5_cmd_query_adapter_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cmd_query_adapter_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[24]; + u8 intapin; + u8 rsvd1[13]; + __be16 vsd_vendor_id; + u8 vsd[208]; + u8 vsd_psid[16]; +}; + +struct mlx5_hca_cap { + u8 rsvd1[16]; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + u8 rsvd2; + u8 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 rsvd5; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 rsvd7; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 rsvd10; + u8 log_max_ra_res_qp; + u8 rsvd11[4]; + __be16 max_qp_count; + __be16 rsvd12; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 rsvd14; + u8 num_ports; + u8 log_max_msg; + u8 rsvd15[3]; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + u8 rsvd19[4]; + __be16 max_desc_sz_sq; + u8 rsvd20[2]; + __be16 max_desc_sz_rq; + u8 rsvd21[2]; + __be16 max_desc_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[40]; + __be32 uar_page_sz; + u8 rsvd27[28]; + u8 log_msx_atomic_size_qp; + u8 rsvd28[2]; + u8 log_msx_atomic_size_dc; + u8 rsvd29[76]; +}; + + +struct mlx5_cmd_query_hca_cap_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + + +struct mlx5_cmd_query_hca_cap_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; + struct mlx5_hca_cap hca_cap; +}; + + +struct mlx5_cmd_set_hca_cap_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; + struct mlx5_hca_cap hca_cap; +}; + + +struct mlx5_cmd_set_hca_cap_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + + +struct mlx5_cmd_init_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 profile; + u8 rsvd1[4]; +}; + +struct mlx5_cmd_init_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cmd_teardown_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 profile; + u8 rsvd1[4]; +}; + +struct mlx5_cmd_teardown_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cmd_layout { + u8 type; + u8 rsvd0[3]; + __be32 inlen; + __be64 in_ptr; + __be32 in[4]; + __be32 out[4]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 rsvd1; + u8 status_own; +}; + + +struct health_buffer { + __be32 assert_var[5]; + __be32 rsvd0[3]; + __be32 assert_exit_ptr; + __be32 assert_callra; + __be32 rsvd1[2]; + __be32 fw_ver; + __be32 hw_id; + __be32 rsvd2; + u8 irisc_index; + u8 synd; + __be16 ext_sync; +}; + +struct mlx5_init_seg { + __be32 fw_rev; + __be32 cmdif_rev_fw_sub; + __be32 rsvd0[2]; + __be32 cmdq_addr_h; + __be32 cmdq_addr_l_sz; + __be32 cmd_dbell; + __be32 rsvd1[121]; + struct health_buffer health; + __be32 rsvd2[884]; + __be32 health_counter; + __be32 rsvd3[1023]; + __be64 ieee1588_clk; + __be32 ieee1588_clk_type; + __be32 clr_intx; +}; + +struct mlx5_eqe_comp { + __be32 reserved[6]; + __be32 cqn; +}; + +struct mlx5_eqe_qp_srq { + __be32 reserved[6]; + __be32 qp_srq_n; +}; + +struct mlx5_eqe_cq_err { + __be32 cqn; + u8 reserved1[7]; + u8 syndrome; +}; + +struct mlx5_eqe_dropped_packet { +}; + +struct mlx5_eqe_port_state { + u8 reserved0[8]; + u8 port; +}; + +struct mlx5_eqe_gpio { + __be32 reserved0[2]; + __be64 gpio_event; +}; + +struct mlx5_eqe_congestion { + u8 type; + u8 rsvd0; + u8 congestion_level; +}; + +struct mlx5_eqe_stall_vl { + u8 rsvd0[3]; + u8 port_vl; +}; + +struct mlx5_eqe_cmd { + __be32 vector; + __be32 rsvd[6]; +}; + +struct mlx5_eqe_page_req { + u8 rsvd0[2]; + __be16 func_id; + u8 rsvd1[2]; + __be16 num_pages; + __be32 rsvd2[5]; +}; + +union ev_data { + __be32 raw[7]; + struct mlx5_eqe_cmd cmd; + struct mlx5_eqe_comp comp; + struct mlx5_eqe_qp_srq qp_srq; + struct mlx5_eqe_cq_err cq_err; + struct mlx5_eqe_dropped_packet dp; + struct mlx5_eqe_port_state port; + struct mlx5_eqe_gpio gpio; + struct mlx5_eqe_congestion cong; + struct mlx5_eqe_stall_vl stall_vl; + struct mlx5_eqe_page_req req_pages; +} __packed; + +struct mlx5_eqe { + u8 rsvd0; + u8 type; + u8 rsvd1; + u8 sub_type; + __be32 rsvd2[7]; + union ev_data data; + __be16 rsvd3; + u8 signature; + u8 owner; +} __packed; + +struct mlx5_cmd_prot_block { + u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 rsvd1; + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +struct mlx5_err_cqe { + u8 rsvd0[32]; + __be32 srqn; + u8 rsvd1[18]; + u8 vendor_err_synd; + u8 syndrome; + __be32 s_wqe_opcode_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +}; + +struct mlx5_cqe64 { + u8 rsvd0[17]; + u8 ml_path; + u8 rsvd20[4]; + __be16 slid; + __be32 flags_rqpn; + u8 rsvd28[4]; + __be32 srqn; + __be32 imm_inval_pkey; + u8 rsvd40[4]; + __be32 byte_cnt; + __be64 timestamp; + __be32 sop_drop_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +}; + +struct mlx5_wqe_srq_next_seg { + u8 rsvd0[2]; + __be16 next_wqe_index; + u8 signature; + u8 rsvd1[11]; +}; + +union mlx5_ext_cqe { + struct ib_grh grh; + u8 inl[64]; +}; + +struct mlx5_cqe128 { + union mlx5_ext_cqe inl_grh; + struct mlx5_cqe64 cqe64; +}; + +struct mlx5_srq_ctx { + u8 state_log_sz; + u8 rsvd0[3]; + __be32 flags_xrcd; + __be32 pgoff_cqn; + u8 rsvd1[4]; + u8 log_pg_sz; + u8 rsvd2[7]; + __be32 pd; + __be16 lwm; + __be16 wqe_cnt; + u8 rsvd3[8]; + __be64 db_record; +}; + +struct mlx5_create_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_srqn; + u8 rsvd0[4]; + struct mlx5_srq_ctx ctx; + u8 rsvd1[208]; + __be64 pas[0]; +}; + +struct mlx5_create_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 srqn; + u8 rsvd[4]; +}; + +struct mlx5_destroy_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 srqn; + u8 rsvd[4]; +}; + +struct mlx5_destroy_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 srqn; + u8 rsvd0[4]; +}; + +struct mlx5_query_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; + struct mlx5_srq_ctx ctx; + u8 rsvd1[32]; + __be64 pas[0]; +}; + +struct mlx5_arm_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 srqn; + __be16 rsvd; + __be16 lwm; +}; + +struct mlx5_arm_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cq_context { + u8 status; + u8 cqe_sz_flags; + u8 st; + u8 rsvd3; + u8 rsvd4[6]; + __be16 page_offset; + __be32 log_sz_usr_page; + __be16 cq_period; + __be16 cq_max_count; + __be16 rsvd20; + __be16 c_eqn; + u8 log_pg_sz; + u8 rsvd25[7]; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_counter; + __be32 producer_counter; + u8 rsvd48[8]; + __be64 db_record_addr; +}; + +struct mlx5_create_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_cqn; + u8 rsvdx[4]; + struct mlx5_cq_context ctx; + u8 rsvd6[192]; + __be64 pas[0]; +}; + +struct mlx5_create_cq_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_cq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + +struct mlx5_query_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct mlx5_query_cq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; + struct mlx5_cq_context ctx; + u8 rsvd6[16]; + __be64 pas[0]; +}; + +struct mlx5_eq_context { + u8 status; + u8 ec_oi; + u8 st; + u8 rsvd2[7]; + __be16 page_pffset; + __be32 log_sz_usr_page; + u8 rsvd3[7]; + u8 intr; + u8 log_page_size; + u8 rsvd4[15]; + __be32 consumer_counter; + __be32 produser_counter; + u8 rsvd5[16]; +}; + +struct mlx5_create_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[3]; + u8 input_eqn; + u8 rsvd1[4]; + struct mlx5_eq_context ctx; + u8 rsvd2[8]; + __be64 events_mask; + u8 rsvd3[176]; + __be64 pas[0]; +}; + +struct mlx5_create_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[3]; + u8 eq_number; + u8 rsvd1[4]; +}; + +struct mlx5_destroy_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct mlx5_destroy_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_map_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be64 mask; + u8 mu; + u8 rsvd0[2]; + u8 eqn; + u8 rsvd1[24]; +}; + +struct mlx5_map_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct mlx5_query_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; + struct mlx5_eq_context ctx; +}; + +struct mlx5_mkey_seg { + /* This is a two bit field occupying bits 31-30. + * bit 31 is always 0, + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + */ + u8 status; + u8 pcie_control; + u8 flags; + u8 version; + __be32 qpn_mkey7_0; + u8 rsvd1[4]; + __be32 flags_pd; + __be64 start_addr; + __be64 len; + __be32 bsfs_octo_size; + u8 rsvd2[16]; + __be32 xlt_oct_size; + u8 rsvd3[3]; + u8 log2_page_size; + u8 rsvd4[4]; +}; + +struct mlx5_query_special_ctxs_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_special_ctxs_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 dump_fill_mkey; + __be32 reserved_lkey; +}; + +struct mlx5_create_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_mkey_index; + u8 rsvd0[4]; + struct mlx5_mkey_seg seg; + u8 rsvd1[16]; + __be32 xlat_oct_act_size; + __be32 bsf_coto_act_size; + u8 rsvd2[168]; + __be64 pas[0]; +}; + +struct mlx5_create_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct mlx5_destroy_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct mlx5_destroy_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 mkey; +}; + +struct mlx5_query_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + __be64 pas[0]; +}; + +struct mlx5_modify_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 mkey; + __be64 pas[0]; +}; + +struct mlx5_modify_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; +}; + +struct mlx5_dump_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; +}; + +struct mlx5_dump_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 mkey; +}; + +struct mlx5_mad_ifc_mbox_in { + struct mlx5_inbox_hdr hdr; + __be16 remote_lid; + u8 rsvd0; + u8 port; + u8 rsvd1[4]; + u8 data[256]; +}; + +struct mlx5_mad_ifc_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[256]; +}; + +struct mlx5_access_reg_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 register_id; + __be32 arg; + __be32 data[0]; +}; + +struct mlx5_access_reg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; + __be32 data[0]; +}; + +#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum { + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 +}; + +#endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h new file mode 100644 index 000000000000..163a818411e7 --- /dev/null +++ b/include/linux/mlx5/doorbell.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DOORBELL_H +#define MLX5_DOORBELL_H + +#define MLX5_BF_OFFSET 0x800 +#define MLX5_CQ_DOORBELL 0x20 + +#if BITS_PER_LONG == 64 +/* Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) +#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *)val, dest); +} + +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* MLX5_DOORBELL_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h new file mode 100644 index 000000000000..e47f1e4c9b03 --- /dev/null +++ b/include/linux/mlx5/driver.h @@ -0,0 +1,769 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DRIVER_H +#define MLX5_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + MLX5_BOARD_ID_LEN = 64, + MLX5_MAX_NAME_LEN = 16, +}; + +enum { + /* one minute for the sake of bringup. Generally, commands must always + * complete and we may need to increase this timeout value + */ + MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, + MLX5_CMD_WQ_MAX_NAME = 32, +}; + +enum { + CMD_OWNER_SW = 0x0, + CMD_OWNER_HW = 0x1, + CMD_STATUS_SUCCESS = 0, +}; + +enum mlx5_sqp_t { + MLX5_SQP_SMI = 0, + MLX5_SQP_GSI = 1, + MLX5_SQP_IEEE_1588 = 2, + MLX5_SQP_SNIFFER = 3, + MLX5_SQP_SYNC_UMR = 4, +}; + +enum { + MLX5_MAX_PORTS = 2, +}; + +enum { + MLX5_EQ_VEC_PAGES = 0, + MLX5_EQ_VEC_CMD = 1, + MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_COMP_BASE, +}; + +enum { + MLX5_MAX_EQ_NAME = 20 +}; + +enum { + MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, + MLX5_ATOMIC_MODE_CX = 2 << 16, + MLX5_ATOMIC_MODE_8B = 3 << 16, + MLX5_ATOMIC_MODE_16B = 4 << 16, + MLX5_ATOMIC_MODE_32B = 5 << 16, + MLX5_ATOMIC_MODE_64B = 6 << 16, + MLX5_ATOMIC_MODE_128B = 7 << 16, + MLX5_ATOMIC_MODE_256B = 8 << 16, +}; + +enum { + MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, + MLX5_CMD_OP_QUERY_ADAPTER = 0x101, + MLX5_CMD_OP_INIT_HCA = 0x102, + MLX5_CMD_OP_TEARDOWN_HCA = 0x103, + MLX5_CMD_OP_QUERY_PAGES = 0x107, + MLX5_CMD_OP_MANAGE_PAGES = 0x108, + MLX5_CMD_OP_SET_HCA_CAP = 0x109, + + MLX5_CMD_OP_CREATE_MKEY = 0x200, + MLX5_CMD_OP_QUERY_MKEY = 0x201, + MLX5_CMD_OP_DESTROY_MKEY = 0x202, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + + MLX5_CMD_OP_CREATE_EQ = 0x301, + MLX5_CMD_OP_DESTROY_EQ = 0x302, + MLX5_CMD_OP_QUERY_EQ = 0x303, + + MLX5_CMD_OP_CREATE_CQ = 0x400, + MLX5_CMD_OP_DESTROY_CQ = 0x401, + MLX5_CMD_OP_QUERY_CQ = 0x402, + MLX5_CMD_OP_MODIFY_CQ = 0x403, + + MLX5_CMD_OP_CREATE_QP = 0x500, + MLX5_CMD_OP_DESTROY_QP = 0x501, + MLX5_CMD_OP_RST2INIT_QP = 0x502, + MLX5_CMD_OP_INIT2RTR_QP = 0x503, + MLX5_CMD_OP_RTR2RTS_QP = 0x504, + MLX5_CMD_OP_RTS2RTS_QP = 0x505, + MLX5_CMD_OP_SQERR2RTS_QP = 0x506, + MLX5_CMD_OP_2ERR_QP = 0x507, + MLX5_CMD_OP_RTS2SQD_QP = 0x508, + MLX5_CMD_OP_SQD2RTS_QP = 0x509, + MLX5_CMD_OP_2RST_QP = 0x50a, + MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_CONF_SQP = 0x50c, + MLX5_CMD_OP_MAD_IFC = 0x50d, + MLX5_CMD_OP_INIT2INIT_QP = 0x50e, + MLX5_CMD_OP_SUSPEND_QP = 0x50f, + MLX5_CMD_OP_UNSUSPEND_QP = 0x510, + MLX5_CMD_OP_SQD2SQD_QP = 0x511, + MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, + MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, + MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, + + MLX5_CMD_OP_CREATE_PSV = 0x600, + MLX5_CMD_OP_DESTROY_PSV = 0x601, + MLX5_CMD_OP_QUERY_PSV = 0x602, + MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, + MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, + + MLX5_CMD_OP_CREATE_SRQ = 0x700, + MLX5_CMD_OP_DESTROY_SRQ = 0x701, + MLX5_CMD_OP_QUERY_SRQ = 0x702, + MLX5_CMD_OP_ARM_RQ = 0x703, + MLX5_CMD_OP_RESIZE_SRQ = 0x704, + + MLX5_CMD_OP_ALLOC_PD = 0x800, + MLX5_CMD_OP_DEALLOC_PD = 0x801, + MLX5_CMD_OP_ALLOC_UAR = 0x802, + MLX5_CMD_OP_DEALLOC_UAR = 0x803, + + MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, + MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + + + MLX5_CMD_OP_ALLOC_XRCD = 0x80e, + MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, + + MLX5_CMD_OP_ACCESS_REG = 0x805, + MLX5_CMD_OP_MAX = 0x810, +}; + +enum { + MLX5_REG_PCAP = 0x5001, + MLX5_REG_PMTU = 0x5003, + MLX5_REG_PTYS = 0x5004, + MLX5_REG_PAOS = 0x5006, + MLX5_REG_PMAOS = 0x5012, + MLX5_REG_PUDE = 0x5009, + MLX5_REG_PMPE = 0x5010, + MLX5_REG_PELC = 0x500e, + MLX5_REG_PMLP = 0, /* TBD */ + MLX5_REG_NODE_DESC = 0x6001, + MLX5_REG_HOST_ENDIANNESS = 0x7004, +}; + +enum dbg_rsc_type { + MLX5_DBG_RSC_QP, + MLX5_DBG_RSC_EQ, + MLX5_DBG_RSC_CQ, +}; + +struct mlx5_field_desc { + struct dentry *dent; + int i; +}; + +struct mlx5_rsc_debug { + struct mlx5_core_dev *dev; + void *object; + enum dbg_rsc_type type; + struct dentry *root; + struct mlx5_field_desc fields[0]; +}; + +enum mlx5_dev_event { + MLX5_DEV_EVENT_SYS_ERROR, + MLX5_DEV_EVENT_PORT_UP, + MLX5_DEV_EVENT_PORT_DOWN, + MLX5_DEV_EVENT_PORT_INITIALIZED, + MLX5_DEV_EVENT_LID_CHANGE, + MLX5_DEV_EVENT_PKEY_CHANGE, + MLX5_DEV_EVENT_GUID_CHANGE, + MLX5_DEV_EVENT_CLIENT_REREG, +}; + +struct mlx5_uuar_info { + struct mlx5_uar *uars; + int num_uars; + int num_low_latency_uuars; + unsigned long *bitmap; + unsigned int *count; + struct mlx5_bf *bfs; + + /* + * protect uuar allocation data structs + */ + struct mutex lock; +}; + +struct mlx5_bf { + void __iomem *reg; + void __iomem *regreg; + int buf_size; + struct mlx5_uar *uar; + unsigned long offset; + int need_lock; + /* protect blue flame buffer selection when needed + */ + spinlock_t lock; + + /* serialize 64 bit writes when done as two 32 bit accesses + */ + spinlock_t lock32; + int uuarn; +}; + +struct mlx5_cmd_first { + __be32 data[4]; +}; + +struct mlx5_cmd_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct mlx5_cmd_first first; + struct mlx5_cmd_mailbox *next; +}; + +struct mlx5_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct cache_ent { + /* protect block chain allocations + */ + spinlock_t lock; + struct list_head head; +}; + +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + +}; + +struct mlx5_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct mlx5_cmd { + void *cmd_buf; + dma_addr_t dma; + u16 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + /* protect command queue allocations + */ + spinlock_t alloc_lock; + + /* protect token allocations + */ + spinlock_t token_lock; + u8 token; + unsigned long bitmask; + char wq_name[MLX5_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct semaphore sem; + struct semaphore pages_sem; + int mode; + struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; + struct pci_pool *pool; + struct mlx5_cmd_debug dbg; + struct cmd_msg_cache cache; + int checksum_disabled; + struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; +}; + +struct mlx5_port_caps { + int gid_table_len; + int pkey_table_len; +}; + +struct mlx5_caps { + u8 log_max_eq; + u8 log_max_cq; + u8 log_max_qp; + u8 log_max_mkey; + u8 log_max_pd; + u8 log_max_srq; + u32 max_cqes; + int max_wqes; + int max_sq_desc_sz; + int max_rq_desc_sz; + u64 flags; + u16 stat_rate_support; + int log_max_msg; + int num_ports; + int max_ra_res_qp; + int max_ra_req_qp; + int max_srq_wqes; + int bf_reg_size; + int bf_regs_per_page; + struct mlx5_port_caps port[MLX5_MAX_PORTS]; + u8 ext_port_cap[MLX5_MAX_PORTS]; + int max_vf; + u32 reserved_lkey; + u8 local_ca_ack_delay; + u8 log_max_mcg; + u16 max_qp_mcg; + int min_page_sz; +}; + +struct mlx5_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct mlx5_cmd_mailbox *next; +}; + +struct mlx5_buf_list { + void *buf; + dma_addr_t map; +}; + +struct mlx5_buf { + struct mlx5_buf_list direct; + struct mlx5_buf_list *page_list; + int nbufs; + int npages; + int page_shift; + int size; +}; + +struct mlx5_eq { + struct mlx5_core_dev *dev; + __be32 __iomem *doorbell; + u32 cons_index; + struct mlx5_buf buf; + int size; + u8 irqn; + u8 eqn; + int nent; + u64 mask; + char name[MLX5_MAX_EQ_NAME]; + struct list_head list; + int index; + struct mlx5_rsc_debug *dbg; +}; + + +struct mlx5_core_mr { + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; +}; + +struct mlx5_core_srq { + u32 srqn; + int max; + int max_gs; + int max_avail_gather; + int wqe_shift; + void (*event) (struct mlx5_core_srq *, enum mlx5_event); + + atomic_t refcount; + struct completion free; +}; + +struct mlx5_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head *comp_eq_head; + struct mlx5_eq pages_eq; + struct mlx5_eq async_eq; + struct mlx5_eq cmd_eq; + struct msix_entry *msix_arr; + int num_comp_vectors; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct mlx5_uar { + u32 index; + struct list_head bf_list; + unsigned free_bf_bmap; + void __iomem *wc_map; + void __iomem *map; +}; + + +struct mlx5_core_health { + struct health_buffer __iomem *health; + __be32 __iomem *health_counter; + struct timer_list timer; + struct list_head list; + u32 prev; + int miss_counter; +}; + +struct mlx5_cq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_srq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_priv { + char name[MLX5_MAX_NAME_LEN]; + struct mlx5_eq_table eq_table; + struct mlx5_uuar_info uuari; + MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); + + /* pages stuff */ + struct workqueue_struct *pg_wq; + struct rb_root page_root; + int fw_pages; + int reg_pages; + + struct mlx5_core_health health; + + struct mlx5_srq_table srq_table; + + /* start: qp staff */ + struct mlx5_qp_table qp_table; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + /* end: qp staff */ + + /* start: cq staff */ + struct mlx5_cq_table cq_table; + /* end: cq staff */ + + /* start: alloc staff */ + struct mutex pgdir_mutex; + struct list_head pgdir_list; + /* end: alloc staff */ + struct dentry *dbg_root; + + /* protect mkey key part */ + spinlock_t mkey_lock; + u8 mkey_key; +}; + +struct mlx5_core_dev { + struct pci_dev *pdev; + u8 rev_id; + char board_id[MLX5_BOARD_ID_LEN]; + struct mlx5_cmd cmd; + struct mlx5_caps caps; + phys_addr_t iseg_base; + struct mlx5_init_seg __iomem *iseg; + void (*event) (struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + void *data); + struct mlx5_priv priv; + struct mlx5_profile *profile; + atomic_t num_qps; +}; + +struct mlx5_db { + __be32 *db; + union { + struct mlx5_db_pgdir *pgdir; + struct mlx5_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; +}; + +enum { + MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, +}; + +enum { + MLX5_COMP_EQ_SIZE = 1024, +}; + +struct mlx5_db_pgdir { + struct list_head list; + DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); + __be32 *db_page; + dma_addr_t db_dma; +}; + +typedef void (*mlx5_cmd_cbk_t)(int status, void *context); + +struct mlx5_cmd_work_ent { + struct mlx5_cmd_msg *in; + struct mlx5_cmd_msg *out; + mlx5_cmd_cbk_t callback; + void *context; + int idx; + struct completion done; + struct mlx5_cmd *cmd; + struct work_struct work; + struct mlx5_cmd_layout *lay; + int ret; + int page_queue; + u8 status; + u8 token; + struct timespec ts1; + struct timespec ts2; +}; + +struct mlx5_pas { + u64 pa; + u8 log_sz; +}; + +static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) +{ + if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +extern struct workqueue_struct *mlx5_core_wq; + +#define STRUCT_FIELD(header, field) \ + .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ + .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field + +struct ib_field { + size_t struct_offset_bytes; + size_t struct_size_bytes; + int offset_bits; + int size_bits; +}; + +static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +extern struct dentry *mlx5_debugfs_root; + +static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->fw_rev) & 0xffff; +} + +static inline u16 fw_rev_min(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->fw_rev) >> 16; +} + +static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; +} + +static inline u16 cmdif_rev(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; +} + +static inline void *mlx5_vzalloc(unsigned long size) +{ + void *rtn; + + rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!rtn) + rtn = vzalloc(size); + return rtn; +} + +static inline void mlx5_vfree(const void *addr) +{ + if (addr && is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); +void mlx5_dev_cleanup(struct mlx5_core_dev *dev); +int mlx5_cmd_init(struct mlx5_core_dev *dev); +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +void mlx5_cmd_use_events(struct mlx5_core_dev *dev); +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size); +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +void mlx5_health_cleanup(void); +void __init mlx5_health_init(void); +void mlx5_start_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, + struct mlx5_buf *buf); +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); +struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, + gfp_t flags, int npages); +void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *head); +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen); +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out); +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_create_mkey_mbox_in *in, int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_query_mkey_mbox_out *out, int outlen); +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + u32 *mkey); +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, + u16 opmod, int port); +void mlx5_pagealloc_init(struct mlx5_core_dev *dev); +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); +int mlx5_pagealloc_start(struct mlx5_core_dev *dev); +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s16 npages); +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev); +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); +void mlx5_register_debugfs(void); +void mlx5_unregister_debugfs(void); +int mlx5_eq_init(struct mlx5_core_dev *dev); +void mlx5_eq_cleanup(struct mlx5_core_dev *dev); +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); +void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar); +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_start_eqs(struct mlx5_core_dev *dev); +int mlx5_stop_eqs(struct mlx5_core_dev *dev); +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); +int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct mlx5_query_eq_mbox_out *out, int outlen); +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); + +typedef void (*health_handler_t)(struct pci_dev *pdev, void *buf, int size); +int mlx5_register_health_report_handler(health_handler_t handler); +void mlx5_unregister_health_report_handler(void); +const char *mlx5_command_str(int command); +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); + +static inline u32 mlx5_mkey_to_idx(u32 mkey) +{ + return mkey >> 8; +} + +static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << 8; +} + +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2, +}; + +enum { + MAX_MR_CACHE_ENTRIES = 16, +}; + +struct mlx5_profile { + u64 mask; + u32 log_max_qp; + int cmdif_csum; + struct { + int size; + int limit; + } mr_cache[MAX_MR_CACHE_ENTRIES]; +}; + +#endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h new file mode 100644 index 000000000000..d9e3eacb3a7f --- /dev/null +++ b/include/linux/mlx5/qp.h @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_QP_H +#define MLX5_QP_H + +#include +#include + +#define MLX5_INVALID_LKEY 0x100 + +enum mlx5_qp_optpar { + MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MLX5_QP_OPTPAR_RRE = 1 << 1, + MLX5_QP_OPTPAR_RAE = 1 << 2, + MLX5_QP_OPTPAR_RWE = 1 << 3, + MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MLX5_QP_OPTPAR_Q_KEY = 1 << 5, + MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MLX5_QP_OPTPAR_SRA_MAX = 1 << 8, + MLX5_QP_OPTPAR_RRA_MAX = 1 << 9, + MLX5_QP_OPTPAR_PM_STATE = 1 << 10, + MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, + MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, + MLX5_QP_OPTPAR_SRQN = 1 << 18, + MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, + MLX5_QP_OPTPAR_DC_HS = 1 << 20, + MLX5_QP_OPTPAR_DC_KEY = 1 << 21, +}; + +enum mlx5_qp_state { + MLX5_QP_STATE_RST = 0, + MLX5_QP_STATE_INIT = 1, + MLX5_QP_STATE_RTR = 2, + MLX5_QP_STATE_RTS = 3, + MLX5_QP_STATE_SQER = 4, + MLX5_QP_STATE_SQD = 5, + MLX5_QP_STATE_ERR = 6, + MLX5_QP_STATE_SQ_DRAINING = 7, + MLX5_QP_STATE_SUSPENDED = 9, + MLX5_QP_NUM_STATE +}; + +enum { + MLX5_QP_ST_RC = 0x0, + MLX5_QP_ST_UC = 0x1, + MLX5_QP_ST_UD = 0x2, + MLX5_QP_ST_XRC = 0x3, + MLX5_QP_ST_MLX = 0x4, + MLX5_QP_ST_DCI = 0x5, + MLX5_QP_ST_DCT = 0x6, + MLX5_QP_ST_QP0 = 0x7, + MLX5_QP_ST_QP1 = 0x8, + MLX5_QP_ST_RAW_ETHERTYPE = 0x9, + MLX5_QP_ST_RAW_IPV6 = 0xa, + MLX5_QP_ST_SNIFFER = 0xb, + MLX5_QP_ST_SYNC_UMR = 0xe, + MLX5_QP_ST_PTP_1588 = 0xd, + MLX5_QP_ST_REG_UMR = 0xc, + MLX5_QP_ST_MAX +}; + +enum { + MLX5_QP_PM_MIGRATED = 0x3, + MLX5_QP_PM_ARMED = 0x0, + MLX5_QP_PM_REARM = 0x1 +}; + +enum { + MLX5_NON_ZERO_RQ = 0 << 24, + MLX5_SRQ_RQ = 1 << 24, + MLX5_CRQ_RQ = 2 << 24, + MLX5_ZERO_LEN_RQ = 3 << 24 +}; + +enum { + /* params1 */ + MLX5_QP_BIT_SRE = 1 << 15, + MLX5_QP_BIT_SWE = 1 << 14, + MLX5_QP_BIT_SAE = 1 << 13, + /* params2 */ + MLX5_QP_BIT_RRE = 1 << 15, + MLX5_QP_BIT_RWE = 1 << 14, + MLX5_QP_BIT_RAE = 1 << 13, + MLX5_QP_BIT_RIC = 1 << 4, +}; + +enum { + MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, + MLX5_WQE_CTRL_SOLICITED = 1 << 1, +}; + +enum { + MLX5_SEND_WQE_BB = 64, +}; + +enum { + MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29, + MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, + MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31 +}; + +enum { + MLX5_FENCE_MODE_NONE = 0 << 5, + MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, + MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, + MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, +}; + +enum { + MLX5_QP_LAT_SENSITIVE = 1 << 28, + MLX5_QP_ENABLE_SIG = 1 << 31, +}; + +enum { + MLX5_RCV_DBR = 0, + MLX5_SND_DBR = 1, +}; + +struct mlx5_wqe_fmr_seg { + __be32 flags; + __be32 mem_key; + __be64 buf_list; + __be64 start_addr; + __be64 reg_len; + __be32 offset; + __be32 page_size; + u32 reserved[2]; +}; + +struct mlx5_wqe_ctrl_seg { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +}; + +struct mlx5_wqe_xrc_seg { + __be32 xrc_srqn; + u8 rsvd[12]; +}; + +struct mlx5_wqe_masked_atomic_seg { + __be64 swap_add; + __be64 compare; + __be64 swap_add_mask; + __be64 compare_mask; +}; + +struct mlx5_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + __be16 rlid; + u8 reserved0[10]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +}; + +struct mlx5_wqe_datagram_seg { + struct mlx5_av av; +}; + +struct mlx5_wqe_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mlx5_wqe_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mlx5_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mlx5_wqe_umr_ctrl_seg { + u8 flags; + u8 rsvd0[3]; + __be16 klm_octowords; + __be16 bsf_octowords; + __be64 mkey_mask; + u8 rsvd1[32]; +}; + +struct mlx5_seg_set_psv { + __be32 psv_num; + __be16 syndrome; + __be16 status; + __be32 transient_sig; + __be32 ref_tag; +}; + +struct mlx5_seg_get_psv { + u8 rsvd[19]; + u8 num_psv; + __be32 l_key; + __be64 va; + __be32 psv_index[4]; +}; + +struct mlx5_seg_check_psv { + u8 rsvd0[2]; + __be16 err_coalescing_op; + u8 rsvd1[2]; + __be16 xport_err_op; + u8 rsvd2[2]; + __be16 xport_err_mask; + u8 rsvd3[7]; + u8 num_psv; + __be32 l_key; + __be64 va; + __be32 psv_index[4]; +}; + +struct mlx5_rwqe_sig { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +}; + +struct mlx5_wqe_signature_seg { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +}; + +struct mlx5_wqe_inline_seg { + __be32 byte_count; +}; + +struct mlx5_core_qp { + void (*event) (struct mlx5_core_qp *, int); + int qpn; + atomic_t refcount; + struct completion free; + struct mlx5_rsc_debug *dbg; + int pid; +}; + +struct mlx5_qp_path { + u8 fl; + u8 rsvd3; + u8 free_ar; + u8 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 rsvd1[4]; + u8 sl; + u8 port; + u8 rsvd2[6]; +}; + +struct mlx5_qp_context { + __be32 flags; + __be32 flags_pd; + u8 mtu_msgmax; + u8 rq_size_stride; + __be16 sq_crq_size; + __be32 qp_counter_set_usr_page; + __be32 wire_qpn; + __be32 log_pg_sz_remote_qpn; + struct mlx5_qp_path pri_path; + struct mlx5_qp_path alt_path; + __be32 params1; + u8 reserved2[4]; + __be32 next_send_psn; + __be32 cqn_send; + u8 reserved3[8]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 rq_type_srqn; + __be32 rmsn; + __be16 hw_sq_wqe_counter; + __be16 sw_sq_wqe_counter; + __be16 hw_rcyclic_byte_counter; + __be16 hw_rq_counter; + __be16 sw_rcyclic_byte_counter; + __be16 sw_rq_counter; + u8 rsvd0[5]; + u8 cgs; + u8 cs_req; + u8 cs_res; + __be64 dc_access_key; + u8 rsvd1[24]; +}; + +struct mlx5_create_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_qpn; + u8 rsvd0[4]; + __be32 opt_param_mask; + u8 rsvd1[4]; + struct mlx5_qp_context ctx; + u8 rsvd3[16]; + __be64 pas[0]; +}; + +struct mlx5_create_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 qpn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + +struct mlx5_modify_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd1[4]; + __be32 optparam; + u8 rsvd0[4]; + struct mlx5_qp_context ctx; +}; + +struct mlx5_modify_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + +struct mlx5_query_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct mlx5_query_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd1[8]; + __be32 optparam; + u8 rsvd0[4]; + struct mlx5_qp_context ctx; + u8 rsvd2[16]; + __be64 pas[0]; +}; + +struct mlx5_conf_sqp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[3]; + u8 type; +}; + +struct mlx5_conf_sqp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_xrcd_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_xrcd_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 xrcdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_xrcd_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 xrcdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_xrcd_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); +} + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + struct mlx5_create_qp_mbox_in *in, + int inlen); +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, + enum mlx5_qp_state new_state, + struct mlx5_modify_qp_mbox_in *in, int sqd_event, + struct mlx5_core_qp *qp); +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp); +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + struct mlx5_query_qp_mbox_out *out, int outlen); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); +void mlx5_init_qp_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); + +#endif /* MLX5_QP_H */ diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h new file mode 100644 index 000000000000..e1a363a33663 --- /dev/null +++ b/include/linux/mlx5/srq.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_SRQ_H +#define MLX5_SRQ_H + +#include + +void mlx5_init_srq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); + +#endif /* MLX5_SRQ_H */ -- cgit v1.2.3 From 582c016e68dc5dfea4d3582512157f165a428149 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Mon, 8 Jul 2013 10:52:28 -0700 Subject: mlx5_core: Fixes for sparse warnings - use be32_to_cpu() instead of cpu_to_be32() where appropriate. - use proper accessors for pointers marked __iomem. Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/health.c | 28 +++++++++++++++-------- 2 files changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 5e9cf2b9aaf7..a550a8ef94a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -323,11 +323,11 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, param = 0; break; case QP_LOG_PG_SZ: - param = ((cpu_to_be32(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f); + param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f; param += 12; break; case QP_RQPN: - param = cpu_to_be32(ctx->log_pg_sz_remote_qpn) & 0xffffff; + param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff; break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index ea4b9bca6d4a..748f10a155c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -132,6 +132,16 @@ static const char *hsynd_str(u8 synd) } } +static u16 read_be16(__be16 __iomem *p) +{ + return swab16(readl((__force u16 __iomem *) p)); +} + +static u32 read_be32(__be32 __iomem *p) +{ + return swab32(readl((__force u32 __iomem *) p)); +} + static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -139,15 +149,15 @@ static void print_health_info(struct mlx5_core_dev *dev) int i; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) - pr_info("assert_var[%d] 0x%08x\n", i, be32_to_cpu(h->assert_var[i])); - - pr_info("assert_exit_ptr 0x%08x\n", be32_to_cpu(h->assert_exit_ptr)); - pr_info("assert_callra 0x%08x\n", be32_to_cpu(h->assert_callra)); - pr_info("fw_ver 0x%08x\n", be32_to_cpu(h->fw_ver)); - pr_info("hw_id 0x%08x\n", be32_to_cpu(h->hw_id)); - pr_info("irisc_index %d\n", h->irisc_index); - pr_info("synd 0x%x: %s\n", h->synd, hsynd_str(h->synd)); - pr_info("ext_sync 0x%04x\n", be16_to_cpu(h->ext_sync)); + pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i)); + + pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr)); + pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra)); + pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver)); + pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id)); + pr_info("irisc_index %d\n", readb(&h->irisc_index)); + pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd))); + pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync)); } static void poll_health(unsigned long data) -- cgit v1.2.3 From 01276ed2424eb78c95461545410923d5da154d31 Mon Sep 17 00:00:00 2001 From: Jongsung Kim Date: Tue, 9 Jul 2013 17:36:00 +0900 Subject: net/cadence/macb: fix bug/typo in extracting gem_irq_read_clear bit Signed-off-by: Jongsung Kim Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index f7e21f278e05..e866608d7d91 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1292,7 +1292,7 @@ static void macb_configure_dma(struct macb *bp) static void macb_configure_caps(struct macb *bp) { if (macb_is_gem(bp)) { - if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) + if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0) bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; } } -- cgit v1.2.3 From 785bf6f7904352242d187ff6087d523a4ce1b3ac Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Mon, 8 Jul 2013 00:44:55 +0200 Subject: net: mv643xx_eth: do not use port number as platform device id The port number is only local to the ethernet block, not global, so there can be two ethernet blocks both using the same port, like kirkwood with both using port 0. Fix this by using the array index offset for the allocated platform devices as the id. Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 6495bea56ec8..c35db735958f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2483,6 +2483,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, struct resource res; const char *mac_addr; int ret; + int dev_num = 0; memset(&ppd, 0, sizeof(ppd)); ppd.shared = pdev; @@ -2503,6 +2504,14 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, return -EINVAL; } + while (dev_num < 3 && port_platdev[dev_num]) + dev_num++; + + if (dev_num == 3) { + dev_err(&pdev->dev, "too many ports registered\n"); + return -EINVAL; + } + mac_addr = of_get_mac_address(pnp); if (mac_addr) memcpy(ppd.mac_addr, mac_addr, 6); @@ -2521,7 +2530,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, of_property_read_u32(pnp, "duplex", &ppd.duplex); } - ppdev = platform_device_alloc(MV643XX_ETH_NAME, ppd.port_number); + ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); if (!ppdev) return -ENOMEM; ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -2538,7 +2547,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, if (ret) goto port_err; - port_platdev[ppd.port_number] = ppdev; + port_platdev[dev_num] = ppdev; return 0; -- cgit v1.2.3 From e057590b05bc120732967a04d63c15f2f82cdd2b Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Mon, 8 Jul 2013 11:22:51 +0400 Subject: drivers/net: enic: release rtnl_lock on error-path enic_change_mtu_work() must call rtnl_unlock() on all exiting paths. Signed-off-by: Konstantin Khlebnikov Cc: Christian Benvenuti Cc: Roopa Prabhu Cc: Neel Patel Cc: Nishank Trivedi Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 635f55992d7e..992ec2ee64d9 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1761,6 +1761,7 @@ static void enic_change_mtu_work(struct work_struct *work) enic_synchronize_irqs(enic); err = vnic_rq_disable(&enic->rq[0]); if (err) { + rtnl_unlock(); netdev_err(netdev, "Unable to disable RQ.\n"); return; } @@ -1773,6 +1774,7 @@ static void enic_change_mtu_work(struct work_struct *work) vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[0]) == 0) { + rtnl_unlock(); netdev_err(netdev, "Unable to alloc receive buffers.\n"); return; } -- cgit v1.2.3 From aabb9875d02559ab9b928cd6f259a5cc4c21a589 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 1 Jul 2013 16:49:22 -0500 Subject: sunvnet: vnet_port_remove must call unregister_netdev The missing call to unregister_netdev() leaves the interface active after the driver is unloaded by rmmod. Signed-off-by: Dave Kleikamp Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 1df0ff3839e8..3df56840a3b9 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1239,6 +1239,8 @@ static int vnet_port_remove(struct vio_dev *vdev) dev_set_drvdata(&vdev->dev, NULL); kfree(port); + + unregister_netdev(vp->dev); } return 0; } -- cgit v1.2.3 From 076bb0c82a44fbe46fe2c8527a5b5b64b69f679d Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Wed, 10 Jul 2013 17:13:17 +0300 Subject: net: rename include/net/ll_poll.h to include/net/busy_poll.h Rename the file and correct all the places where it is included. Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- fs/select.c | 2 +- include/net/busy_poll.h | 183 ++++++++++++++++++++++++ include/net/ll_poll.h | 183 ------------------------ net/core/datagram.c | 2 +- net/core/sock.c | 2 +- net/core/sysctl_net_core.c | 2 +- net/ipv4/tcp.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/ipv6/udp.c | 2 +- net/socket.c | 2 +- 16 files changed, 197 insertions(+), 197 deletions(-) create mode 100644 include/net/busy_poll.h delete mode 100644 include/net/ll_poll.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ec3aa1d451e8..05b6b4e8b073 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include "bnx2x_cmn.h" #include "bnx2x_init.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index fb098b46c6a6..7be725cdfea8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -52,7 +52,7 @@ #include #endif -#include +#include #ifdef CONFIG_NET_LL_RX_POLL #define LL_EXTENDED_STATS diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index caf204770569..0fb2438dc2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 76997b93fdfe..90746d37ac9b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -31,7 +31,7 @@ * */ -#include +#include #include #include #include diff --git a/fs/select.c b/fs/select.c index f9f49c40cfd4..35d4adc749d9 100644 --- a/fs/select.c +++ b/fs/select.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h new file mode 100644 index 000000000000..76f034087743 --- /dev/null +++ b/include/net/busy_poll.h @@ -0,0 +1,183 @@ +/* + * Low Latency Sockets + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Author: Eliezer Tamir + * + * Contact Information: + * e1000-devel Mailing List + */ + +#ifndef _LINUX_NET_LL_POLL_H +#define _LINUX_NET_LL_POLL_H + +#include +#include + +#ifdef CONFIG_NET_LL_RX_POLL + +struct napi_struct; +extern unsigned int sysctl_net_ll_read __read_mostly; +extern unsigned int sysctl_net_ll_poll __read_mostly; + +/* return values from ndo_ll_poll */ +#define LL_FLUSH_FAILED -1 +#define LL_FLUSH_BUSY -2 + +static inline bool net_busy_loop_on(void) +{ + return sysctl_net_ll_poll; +} + +/* a wrapper to make debug_smp_processor_id() happy + * we can use sched_clock() because we don't care much about precision + * we only care that the average is bounded + */ +#ifdef CONFIG_DEBUG_PREEMPT +static inline u64 busy_loop_us_clock(void) +{ + u64 rc; + + preempt_disable_notrace(); + rc = sched_clock(); + preempt_enable_no_resched_notrace(); + + return rc >> 10; +} +#else /* CONFIG_DEBUG_PREEMPT */ +static inline u64 busy_loop_us_clock(void) +{ + return sched_clock() >> 10; +} +#endif /* CONFIG_DEBUG_PREEMPT */ + +static inline unsigned long sk_busy_loop_end_time(struct sock *sk) +{ + return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); +} + +/* in poll/select we use the global sysctl_net_ll_poll value */ +static inline unsigned long busy_loop_end_time(void) +{ + return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll); +} + +static inline bool sk_can_busy_loop(struct sock *sk) +{ + return sk->sk_ll_usec && sk->sk_napi_id && + !need_resched() && !signal_pending(current); +} + + +static inline bool busy_loop_timeout(unsigned long end_time) +{ + unsigned long now = busy_loop_us_clock(); + + return time_after(now, end_time); +} + +/* when used in sock_poll() nonblock is known at compile time to be true + * so the loop and end_time will be optimized out + */ +static inline bool sk_busy_loop(struct sock *sk, int nonblock) +{ + unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; + const struct net_device_ops *ops; + struct napi_struct *napi; + int rc = false; + + /* + * rcu read lock for napi hash + * bh so we don't race with net_rx_action + */ + rcu_read_lock_bh(); + + napi = napi_by_id(sk->sk_napi_id); + if (!napi) + goto out; + + ops = napi->dev->netdev_ops; + if (!ops->ndo_ll_poll) + goto out; + + do { + rc = ops->ndo_ll_poll(napi); + + if (rc == LL_FLUSH_FAILED) + break; /* permanent failure */ + + if (rc > 0) + /* local bh are disabled so it is ok to use _BH */ + NET_ADD_STATS_BH(sock_net(sk), + LINUX_MIB_LOWLATENCYRXPACKETS, rc); + + } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && + !need_resched() && !busy_loop_timeout(end_time)); + + rc = !skb_queue_empty(&sk->sk_receive_queue); +out: + rcu_read_unlock_bh(); + return rc; +} + +/* used in the NIC receive handler to mark the skb */ +static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +{ + skb->napi_id = napi->napi_id; +} + +/* used in the protocol hanlder to propagate the napi_id to the socket */ +static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +{ + sk->sk_napi_id = skb->napi_id; +} + +#else /* CONFIG_NET_LL_RX_POLL */ +static inline unsigned long net_busy_loop_on(void) +{ + return 0; +} + +static inline unsigned long busy_loop_end_time(void) +{ + return 0; +} + +static inline bool sk_can_busy_loop(struct sock *sk) +{ + return false; +} + +static inline bool sk_busy_poll(struct sock *sk, int nonblock) +{ + return false; +} + +static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +{ +} + +static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +{ +} + +static inline bool busy_loop_timeout(unsigned long end_time) +{ + return true; +} + +#endif /* CONFIG_NET_LL_RX_POLL */ +#endif /* _LINUX_NET_LL_POLL_H */ diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h deleted file mode 100644 index 76f034087743..000000000000 --- a/include/net/ll_poll.h +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Low Latency Sockets - * Copyright(c) 2013 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - * Author: Eliezer Tamir - * - * Contact Information: - * e1000-devel Mailing List - */ - -#ifndef _LINUX_NET_LL_POLL_H -#define _LINUX_NET_LL_POLL_H - -#include -#include - -#ifdef CONFIG_NET_LL_RX_POLL - -struct napi_struct; -extern unsigned int sysctl_net_ll_read __read_mostly; -extern unsigned int sysctl_net_ll_poll __read_mostly; - -/* return values from ndo_ll_poll */ -#define LL_FLUSH_FAILED -1 -#define LL_FLUSH_BUSY -2 - -static inline bool net_busy_loop_on(void) -{ - return sysctl_net_ll_poll; -} - -/* a wrapper to make debug_smp_processor_id() happy - * we can use sched_clock() because we don't care much about precision - * we only care that the average is bounded - */ -#ifdef CONFIG_DEBUG_PREEMPT -static inline u64 busy_loop_us_clock(void) -{ - u64 rc; - - preempt_disable_notrace(); - rc = sched_clock(); - preempt_enable_no_resched_notrace(); - - return rc >> 10; -} -#else /* CONFIG_DEBUG_PREEMPT */ -static inline u64 busy_loop_us_clock(void) -{ - return sched_clock() >> 10; -} -#endif /* CONFIG_DEBUG_PREEMPT */ - -static inline unsigned long sk_busy_loop_end_time(struct sock *sk) -{ - return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec); -} - -/* in poll/select we use the global sysctl_net_ll_poll value */ -static inline unsigned long busy_loop_end_time(void) -{ - return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll); -} - -static inline bool sk_can_busy_loop(struct sock *sk) -{ - return sk->sk_ll_usec && sk->sk_napi_id && - !need_resched() && !signal_pending(current); -} - - -static inline bool busy_loop_timeout(unsigned long end_time) -{ - unsigned long now = busy_loop_us_clock(); - - return time_after(now, end_time); -} - -/* when used in sock_poll() nonblock is known at compile time to be true - * so the loop and end_time will be optimized out - */ -static inline bool sk_busy_loop(struct sock *sk, int nonblock) -{ - unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0; - const struct net_device_ops *ops; - struct napi_struct *napi; - int rc = false; - - /* - * rcu read lock for napi hash - * bh so we don't race with net_rx_action - */ - rcu_read_lock_bh(); - - napi = napi_by_id(sk->sk_napi_id); - if (!napi) - goto out; - - ops = napi->dev->netdev_ops; - if (!ops->ndo_ll_poll) - goto out; - - do { - rc = ops->ndo_ll_poll(napi); - - if (rc == LL_FLUSH_FAILED) - break; /* permanent failure */ - - if (rc > 0) - /* local bh are disabled so it is ok to use _BH */ - NET_ADD_STATS_BH(sock_net(sk), - LINUX_MIB_LOWLATENCYRXPACKETS, rc); - - } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && - !need_resched() && !busy_loop_timeout(end_time)); - - rc = !skb_queue_empty(&sk->sk_receive_queue); -out: - rcu_read_unlock_bh(); - return rc; -} - -/* used in the NIC receive handler to mark the skb */ -static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) -{ - skb->napi_id = napi->napi_id; -} - -/* used in the protocol hanlder to propagate the napi_id to the socket */ -static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) -{ - sk->sk_napi_id = skb->napi_id; -} - -#else /* CONFIG_NET_LL_RX_POLL */ -static inline unsigned long net_busy_loop_on(void) -{ - return 0; -} - -static inline unsigned long busy_loop_end_time(void) -{ - return 0; -} - -static inline bool sk_can_busy_loop(struct sock *sk) -{ - return false; -} - -static inline bool sk_busy_poll(struct sock *sk, int nonblock) -{ - return false; -} - -static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) -{ -} - -static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) -{ -} - -static inline bool busy_loop_timeout(unsigned long end_time) -{ - return true; -} - -#endif /* CONFIG_NET_LL_RX_POLL */ -#endif /* _LINUX_NET_LL_POLL_H */ diff --git a/net/core/datagram.c b/net/core/datagram.c index 6e9ab31e457e..8ab48cd89559 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -56,7 +56,7 @@ #include #include #include -#include +#include /* * Is a socket 'connection oriented' ? diff --git a/net/core/sock.c b/net/core/sock.c index ab06b719f5b1..9bfe83f4d670 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -139,7 +139,7 @@ #include #endif -#include +#include static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index afc677eadd93..1a298cb3daee 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include static int one = 1; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 15cbfa94bd8e..5423223e93c2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -279,7 +279,7 @@ #include #include -#include +#include int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 35675e46aff8..3a261b41a00c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -75,7 +75,7 @@ #include #include #include -#include +#include #include #include diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6b270e53c207..bcc0ff2c16da 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -109,7 +109,7 @@ #include #include #include -#include +#include #include "udp_impl.h" struct udp_table udp_table __read_mostly; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5cffa5c3e6b8..345bd92d4ddb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -63,7 +63,7 @@ #include #include #include -#include +#include #include diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b6f31437a1f8..40e72034da07 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include diff --git a/net/socket.c b/net/socket.c index 45afa648364a..6a3e9a3f50ad 100644 --- a/net/socket.c +++ b/net/socket.c @@ -104,7 +104,7 @@ #include #include #include -#include +#include #ifdef CONFIG_NET_LL_RX_POLL unsigned int sysctl_net_ll_read __read_mostly; -- cgit v1.2.3 From 8b80cda536ea9bceec0364e897868a30ee13b992 Mon Sep 17 00:00:00 2001 From: Eliezer Tamir Date: Wed, 10 Jul 2013 17:13:26 +0300 Subject: net: rename ll methods to busy-poll Rename ndo_ll_poll to ndo_busy_poll. Rename sk_mark_ll to sk_mark_napi_id. Rename skb_mark_ll to skb_mark_napi_id. Correct all useres of these functions. Update comments and defines in include/net/busy_poll.h Signed-off-by: Eliezer Tamir Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- include/linux/netdevice.h | 2 +- include/net/busy_poll.h | 22 ++++++++++++---------- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/ipv6/udp.c | 2 +- 11 files changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 05b6b4e8b073..3353efe79194 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -990,7 +990,7 @@ reuse_rx: __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(cqe_fp->vlan_tag)); - skb_mark_ll(skb, &fp->napi); + skb_mark_napi_id(skb, &fp->napi); if (bnx2x_fp_ll_polling(fp)) netif_receive_skb(skb); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 15a528bda87c..e5da07858a2f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12027,7 +12027,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { #endif #ifdef CONFIG_NET_LL_RX_POLL - .ndo_ll_poll = bnx2x_low_latency_recv, + .ndo_busy_poll = bnx2x_low_latency_recv, #endif }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 047ebaaf0141..bad8f14b1941 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1978,7 +1978,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } #endif /* IXGBE_FCOE */ - skb_mark_ll(skb, &q_vector->napi); + skb_mark_napi_id(skb, &q_vector->napi); ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ @@ -7228,7 +7228,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef CONFIG_NET_LL_RX_POLL - .ndo_ll_poll = ixgbe_low_latency_recv, + .ndo_busy_poll = ixgbe_low_latency_recv, #endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0fb2438dc2c7..5eac871399d8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2141,7 +2141,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif #ifdef CONFIG_NET_LL_RX_POLL - .ndo_ll_poll = mlx4_en_low_latency_recv, + .ndo_busy_poll = mlx4_en_low_latency_recv, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 90746d37ac9b..dec455c8f627 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -767,7 +767,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud timestamp); } - skb_mark_ll(skb, &cq->napi); + skb_mark_napi_id(skb, &cq->napi); /* Push it up the stack */ netif_receive_skb(skb); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bb82871b8494..0741a1e919a5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -974,7 +974,7 @@ struct net_device_ops { void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif #ifdef CONFIG_NET_LL_RX_POLL - int (*ndo_ll_poll)(struct napi_struct *dev); + int (*ndo_busy_poll)(struct napi_struct *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 76f034087743..4ff71908fd42 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -1,5 +1,5 @@ /* - * Low Latency Sockets + * net busy poll support * Copyright(c) 2013 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it @@ -21,8 +21,8 @@ * e1000-devel Mailing List */ -#ifndef _LINUX_NET_LL_POLL_H -#define _LINUX_NET_LL_POLL_H +#ifndef _LINUX_NET_BUSY_POLL_H +#define _LINUX_NET_BUSY_POLL_H #include #include @@ -110,11 +110,11 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) goto out; ops = napi->dev->netdev_ops; - if (!ops->ndo_ll_poll) + if (!ops->ndo_busy_poll) goto out; do { - rc = ops->ndo_ll_poll(napi); + rc = ops->ndo_busy_poll(napi); if (rc == LL_FLUSH_FAILED) break; /* permanent failure */ @@ -134,13 +134,14 @@ out: } /* used in the NIC receive handler to mark the skb */ -static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +static inline void skb_mark_napi_id(struct sk_buff *skb, + struct napi_struct *napi) { skb->napi_id = napi->napi_id; } /* used in the protocol hanlder to propagate the napi_id to the socket */ -static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) { sk->sk_napi_id = skb->napi_id; } @@ -166,11 +167,12 @@ static inline bool sk_busy_poll(struct sock *sk, int nonblock) return false; } -static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi) +static inline void skb_mark_napi_id(struct sk_buff *skb, + struct napi_struct *napi) { } -static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) +static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) { } @@ -180,4 +182,4 @@ static inline bool busy_loop_timeout(unsigned long end_time) } #endif /* CONFIG_NET_LL_RX_POLL */ -#endif /* _LINUX_NET_LL_POLL_H */ +#endif /* _LINUX_NET_BUSY_POLL_H */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3a261b41a00c..b299da5ff499 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1994,7 +1994,7 @@ process: if (sk_filter(sk, skb)) goto discard_and_relse; - sk_mark_ll(sk, skb); + sk_mark_napi_id(sk, skb); skb->dev = NULL; bh_lock_sock_nested(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bcc0ff2c16da..a0d7151ffbd9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1713,7 +1713,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (sk != NULL) { int ret; - sk_mark_ll(sk, skb); + sk_mark_napi_id(sk, skb); ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 345bd92d4ddb..6e1649d58533 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1499,7 +1499,7 @@ process: if (sk_filter(sk, skb)) goto discard_and_relse; - sk_mark_ll(sk, skb); + sk_mark_napi_id(sk, skb); skb->dev = NULL; bh_lock_sock_nested(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 40e72034da07..f4058150262b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -844,7 +844,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (sk != NULL) { int ret; - sk_mark_ll(sk, skb); + sk_mark_napi_id(sk, skb); ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); -- cgit v1.2.3 From 45dd95c443c2d81d8d41e55afeafd4c61042fa76 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Mon, 8 Jul 2013 17:09:01 +0800 Subject: r8169: add a new chip for RTL8411 Add a new chip for RTL8411 series. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 393f961a013c..4106a743ca74 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -46,6 +46,7 @@ #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" @@ -144,6 +145,7 @@ enum mac_version { RTL_GIGA_MAC_VER_41, RTL_GIGA_MAC_VER_42, RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, RTL_GIGA_MAC_NONE = 0xff, }; @@ -276,6 +278,9 @@ static const struct { [RTL_GIGA_MAC_VER_43] = _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_44] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, + JUMBO_9K, false), }; #undef _R @@ -394,6 +399,7 @@ enum rtl8168_8101_registers { #define CSIAR_FUNC_CARD 0x00000000 #define CSIAR_FUNC_SDIO 0x00010000 #define CSIAR_FUNC_NIC 0x00020000 +#define CSIAR_FUNC_NIC2 0x00010000 PMCH = 0x6f, EPHYAR = 0x80, #define EPHYAR_FLAG 0x80000000 @@ -826,6 +832,7 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1); MODULE_FIRMWARE(FIRMWARE_8168F_2); MODULE_FIRMWARE(FIRMWARE_8402_1); MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); MODULE_FIRMWARE(FIRMWARE_8106E_1); MODULE_FIRMWARE(FIRMWARE_8106E_2); MODULE_FIRMWARE(FIRMWARE_8168G_2); @@ -2051,6 +2058,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, int mac_version; } mac_info[] = { /* 8168G family. */ + { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, @@ -3651,6 +3659,7 @@ static void rtl_hw_phy_config(struct net_device *dev) break; case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: rtl8168g_2_hw_phy_config(tp); break; @@ -3863,6 +3872,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: ops->write = r8168g_mdio_write; ops->read = r8168g_mdio_read; break; @@ -3916,6 +3926,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -4178,6 +4189,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_44: ops->down = r8168_pll_power_down; ops->up = r8168_pll_power_up; break; @@ -4224,6 +4236,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: @@ -4384,6 +4397,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: default: ops->disable = NULL; ops->enable = NULL; @@ -4493,6 +4507,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_41 || tp->mac_version == RTL_GIGA_MAC_VER_42 || tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || tp->mac_version == RTL_GIGA_MAC_VER_38) { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); @@ -4782,6 +4797,29 @@ static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(CSIDR) : ~0; } +static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC2); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + static void rtl_init_csi_ops(struct rtl8169_private *tp) { struct csi_ops *ops = &tp->csi_ops; @@ -4811,6 +4849,11 @@ static void rtl_init_csi_ops(struct rtl8169_private *tp) ops->read = r8402_csi_read; break; + case RTL_GIGA_MAC_VER_44: + ops->write = r8411_csi_write; + ops->read = r8411_csi_read; + break; + default: ops->write = r8169_csi_write; ops->read = r8169_csi_read; @@ -5255,6 +5298,25 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); } +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0020, 0x0000 }, + { 0x1e, 0x0000, 0x2000 } + }; + + rtl_hw_start_8168g_1(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); +} + static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -5361,6 +5423,10 @@ static void rtl_hw_start_8168(struct net_device *dev) rtl_hw_start_8168g_2(tp); break; + case RTL_GIGA_MAC_VER_44: + rtl_hw_start_8411_2(tp); + break; + default: printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", dev->name, tp->mac_version); @@ -6877,6 +6943,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: rtl_hw_init_8168g(tp); break; -- cgit v1.2.3 From b41e6a51d57e231d2ed237f17f002cc536c0987c Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jul 2013 23:03:34 +0200 Subject: sh_eth: SH_ETH should depend on HAS_DMA If NO_DMA=y: drivers/built-in.o: In function `sh_eth_free_dma_buffer': drivers/net/ethernet/renesas/sh_eth.c:1103: undefined reference to `dma_free_coherent' drivers/net/ethernet/renesas/sh_eth.c:1110: undefined reference to `dma_free_coherent' drivers/built-in.o: In function `sh_eth_ring_init': drivers/net/ethernet/renesas/sh_eth.c:1065: undefined reference to `dma_alloc_coherent' drivers/net/ethernet/renesas/sh_eth.c:1086: undefined reference to `dma_free_coherent' drivers/built-in.o: In function `sh_eth_ring_format': drivers/net/ethernet/renesas/sh_eth.c:988: undefined reference to `dma_map_single' drivers/built-in.o: In function `sh_eth_txfree': drivers/net/ethernet/renesas/sh_eth.c:1220: undefined reference to `dma_unmap_single' drivers/built-in.o: In function `sh_eth_rx': drivers/net/ethernet/renesas/sh_eth.c:1323: undefined reference to `dma_map_single' drivers/built-in.o: In function `sh_eth_start_xmit': drivers/net/ethernet/renesas/sh_eth.c:1954: undefined reference to `dma_map_single' Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 544514e66187..19a8a045e077 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -4,6 +4,7 @@ config SH_ETH tristate "Renesas SuperH Ethernet support" + depends on HAS_DMA select CRC32 select MII select MDIO_BITBANG -- cgit v1.2.3 From a8798a5c77c9981e88caef1373a3310bf8aed219 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 11 Jul 2013 15:53:21 +0200 Subject: alx: fix lockdep annotation Move spin_lock_init to be called before the spinlocks are used, preventing a lockdep splat. Signed-off-by: Maarten Lankhorst Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/alx/main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 0e0b242a9dd4..027398ebbba6 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1245,6 +1245,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); alx = netdev_priv(netdev); + spin_lock_init(&alx->hw.mdio_lock); + spin_lock_init(&alx->irq_lock); alx->dev = netdev; alx->hw.pdev = pdev; alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | @@ -1327,9 +1329,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&alx->link_check_wk, alx_link_check); INIT_WORK(&alx->reset_wk, alx_reset); - spin_lock_init(&alx->hw.mdio_lock); - spin_lock_init(&alx->irq_lock); - netif_carrier_off(netdev); err = register_netdev(netdev); -- cgit v1.2.3 From 1b4fc0e249d61916b8a525b0e7b3c028232457c9 Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Thu, 11 Jul 2013 15:48:21 +0300 Subject: bnx2x: fix tunneling CSUM calculation Since commit c957d09ffda417f6c8e3d1f10e2b05228607d6d7 "bnx2x: Remove sparse and coccinelle warnings" driver provided wrong partial csum for HW in tunneing scenarios. Reported-by: Eric Dumazet CC: Alexander Duyck CC: Pravin Shelar CC: Cong Wang Signed-off-by: Dmitry Kravkov Tested-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 3353efe79194..ee350bde1818 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3543,9 +3543,9 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, /* outer IP header info */ if (xmit_type & XMIT_CSUM_V4) { struct iphdr *iph = ip_hdr(skb); - u16 csum = (__force u16)(~iph->check) - - (__force u16)iph->tot_len - - (__force u16)iph->frag_off; + u32 csum = (__force u32)(~iph->check) - + (__force u32)iph->tot_len - + (__force u32)iph->frag_off; pbd2->fw_ip_csum_wo_len_flags_frag = bswab16(csum_fold((__force __wsum)csum)); -- cgit v1.2.3 From 288dde9f23b6726c1e8147bf635721372bf77b16 Mon Sep 17 00:00:00 2001 From: Moshe Lazer Date: Wed, 10 Jul 2013 14:31:03 +0300 Subject: mlx5_core: Adjust hca_cap.uar_page_sz to conform to Connect-IB spec Sparse reported an endianness bug in the assignment to hca_cap.uar_page_sz. Fix the declaration of this field to be __be16 (which is what is in the firmware spec), renaming the field to log_uar_pg_size to conform to the spec, which fixes the endianness bug reported by sparse. Reported-by: Fengguang Wu Signed-off-by: Moshe Lazer Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- include/linux/mlx5/device.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f21cc397d1bc..12242de2b0e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -212,7 +212,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; memset(&set_out, 0, sizeof(set_out)); - set_ctx->hca_cap.uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); + set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), &set_out, sizeof(set_out)); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 51390915e538..8de8d8f22384 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -317,8 +317,8 @@ struct mlx5_hca_cap { u8 log_max_pd; u8 rsvd25; u8 log_max_xrcd; - u8 rsvd26[40]; - __be32 uar_page_sz; + u8 rsvd26[42]; + __be16 log_uar_page_sz; u8 rsvd27[28]; u8 log_msx_atomic_size_qp; u8 rsvd28[2]; -- cgit v1.2.3 From 5e631a03af7eaa55b9ef7fa7611144c2c698c6c6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 10 Jul 2013 13:58:59 +0300 Subject: mlx5: Return -EFAULT instead of -EPERM For copy_to/from_user() failure, the correct error code is -EFAULT not -EPERM. Signed-off-by: Dan Carpenter Acked-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx5/mr.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 12 ++++-------- 3 files changed, 13 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index e2daa8f02476..bd41df95b6f0 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -171,7 +171,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf, int c; if (copy_from_user(lbuf, buf, sizeof(lbuf))) - return -EPERM; + return -EFAULT; c = order2idx(dev, ent->order); lbuf[sizeof(lbuf) - 1] = 0; @@ -208,7 +208,7 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, return err; if (copy_to_user(buf, lbuf, err)) - return -EPERM; + return -EFAULT; *pos += err; @@ -233,7 +233,7 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, int c; if (copy_from_user(lbuf, buf, sizeof(lbuf))) - return -EPERM; + return -EFAULT; c = order2idx(dev, ent->order); lbuf[sizeof(lbuf) - 1] = 0; @@ -270,7 +270,7 @@ static ssize_t limit_read(struct file *filp, char __user *buf, size_t count, return err; if (copy_to_user(buf, lbuf, err)) - return -EPERM; + return -EFAULT; *pos += err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c1c0eef89694..205753a04cfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -693,7 +693,7 @@ static ssize_t dbg_write(struct file *filp, const char __user *buf, return -ENOMEM; if (copy_from_user(lbuf, buf, sizeof(lbuf))) - return -EPERM; + return -EFAULT; lbuf[sizeof(lbuf) - 1] = 0; @@ -889,7 +889,7 @@ static ssize_t data_write(struct file *filp, const char __user *buf, return -ENOMEM; if (copy_from_user(ptr, buf, count)) { - err = -EPERM; + err = -EFAULT; goto out; } dbg->in_msg = ptr; @@ -919,7 +919,7 @@ static ssize_t data_read(struct file *filp, char __user *buf, size_t count, copy = min_t(int, count, dbg->outlen); if (copy_to_user(buf, dbg->out_msg, copy)) - return -EPERM; + return -EFAULT; *pos += copy; @@ -949,7 +949,7 @@ static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, return err; if (copy_to_user(buf, &outlen, err)) - return -EPERM; + return -EFAULT; *pos += err; @@ -974,7 +974,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, dbg->outlen = 0; if (copy_from_user(outlen_str, buf, count)) - return -EPERM; + return -EFAULT; outlen_str[7] = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index a550a8ef94a1..4273c06e2e96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -148,7 +148,6 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, struct mlx5_cmd_stats *stats; u64 field = 0; int ret; - int err; char tbuf[22]; if (*pos) @@ -161,9 +160,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, spin_unlock(&stats->lock); ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); if (ret > 0) { - err = copy_to_user(buf, tbuf, ret); - if (err) - return err; + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; } *pos += ret; @@ -418,7 +416,6 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, char tbuf[18]; u64 field; int ret; - int err; if (*pos) return 0; @@ -445,9 +442,8 @@ static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); if (ret > 0) { - err = copy_to_user(buf, tbuf, ret); - if (err) - return err; + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; } *pos += ret; -- cgit v1.2.3 From 54842ec2b0644b3e04c6fcfb52bc94dfcf918a43 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 10 Jul 2013 16:57:43 +0100 Subject: drivers/net/ethernet/cadence: don't use devm_pinctrl_get_select_default() in probe Since commit ab78029 (drivers/pinctrl: grab default handles from device core), we can rely on device core for setting the default pins. Compile tested only. Acked-by: Linus Walleij (personally at LCE13) Signed-off-by: Wolfram Sang Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/at91_ether.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 3f1957158a3b..bb5d63fb2e6d 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "macb.h" @@ -309,7 +308,6 @@ static int __init at91ether_probe(struct platform_device *pdev) struct resource *regs; struct net_device *dev; struct phy_device *phydev; - struct pinctrl *pinctrl; struct macb *lp; int res; u32 reg; @@ -319,15 +317,6 @@ static int __init at91ether_probe(struct platform_device *pdev) if (!regs) return -ENOENT; - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - res = PTR_ERR(pinctrl); - if (res == -EPROBE_DEFER) - return res; - - dev_warn(&pdev->dev, "No pinctrl provided\n"); - } - dev = alloc_etherdev(sizeof(struct macb)); if (!dev) return -ENOMEM; -- cgit v1.2.3 From 352900b583b2852152a1e05ea0e8b579292e731e Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Fri, 12 Jul 2013 10:58:48 -0400 Subject: atl1e: fix dma mapping warnings Recently had this backtrace reported: WARNING: at lib/dma-debug.c:937 check_unmap+0x47d/0x930() Hardware name: System Product Name ATL1E 0000:02:00.0: DMA-API: device driver failed to check map error[device address=0x00000000cbfd1000] [size=90 bytes] [mapped as single] Modules linked in: xt_conntrack nf_conntrack ebtable_filter ebtables ip6table_filter ip6_tables snd_hda_codec_hdmi snd_hda_codec_realtek iTCO_wdt iTCO_vendor_support snd_hda_intel acpi_cpufreq mperf coretemp btrfs zlib_deflate snd_hda_codec snd_hwdep microcode raid6_pq libcrc32c snd_seq usblp serio_raw xor snd_seq_device joydev snd_pcm snd_page_alloc snd_timer snd lpc_ich i2c_i801 soundcore mfd_core atl1e asus_atk0110 ata_generic pata_acpi radeon i2c_algo_bit drm_kms_helper ttm drm i2c_core pata_marvell uinput Pid: 314, comm: systemd-journal Not tainted 3.9.0-0.rc6.git2.3.fc19.x86_64 #1 Call Trace: [] warn_slowpath_common+0x66/0x80 [] warn_slowpath_fmt+0x4c/0x50 [] check_unmap+0x47d/0x930 [] ? sched_clock_cpu+0xa8/0x100 [] debug_dma_unmap_page+0x5f/0x70 [] ? unmap_single+0x20/0x30 [] atl1e_intr+0x3a1/0x5b0 [atl1e] [] ? trace_hardirqs_off+0xd/0x10 [] handle_irq_event_percpu+0x56/0x390 [] handle_irq_event+0x3d/0x60 [] handle_fasteoi_irq+0x5a/0x100 [] handle_irq+0xbf/0x150 [] ? file_sb_list_del+0x3f/0x50 [] ? irq_enter+0x50/0xa0 [] do_IRQ+0x4d/0xc0 [] ? file_sb_list_del+0x3f/0x50 [] common_interrupt+0x72/0x72 [] ? lock_release+0xc2/0x310 [] lg_local_unlock_cpu+0x24/0x50 [] file_sb_list_del+0x3f/0x50 [] fput+0x2d/0xc0 [] filp_close+0x61/0x90 [] __close_fd+0x8d/0x150 [] sys_close+0x20/0x50 [] system_call_fastpath+0x16/0x1b The usual straighforward failure to check for dma_mapping_error after a map operation is completed. This patch should fix it, the reporter wandered off after filing this bz: https://bugzilla.redhat.com/show_bug.cgi?id=954170 and I don't have hardware to test, but the fix is pretty straightforward, so I figured I'd post it for review. Signed-off-by: Neil Horman CC: Jay Cliburn CC: Chris Snook CC: "David S. Miller" Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 28 ++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 895f5377ad1b..6d1a62a84c9d 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1665,8 +1665,8 @@ check_sum: return 0; } -static void atl1e_tx_map(struct atl1e_adapter *adapter, - struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +static int atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tx_buffer *tx_buffer = NULL; @@ -1677,6 +1677,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, u16 nr_frags; u16 f; int segment; + int ring_start = adapter->tx_ring.next_to_use; nr_frags = skb_shinfo(skb)->nr_frags; segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; @@ -1689,6 +1690,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->length = map_len; tx_buffer->dma = pci_map_single(adapter->pdev, skb->data, hdr_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) + return -ENOSPC; + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); @@ -1715,6 +1719,13 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->dma = pci_map_single(adapter->pdev, skb->data + mapped_len, map_len, PCI_DMA_TODEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* Reset the tx rings next pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); @@ -1750,6 +1761,13 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, (i * MAX_TX_BUF_LEN), tx_buffer->length, DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* Reset the ring next to use pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | @@ -1767,6 +1785,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, /* The last buffer info contain the skb address, so it will be free after unmap */ tx_buffer->skb = skb; + return 0; } static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, @@ -1834,10 +1853,13 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } - atl1e_tx_map(adapter, skb, tpd); + if (atl1e_tx_map(adapter, skb, tpd)) + goto out; + atl1e_tx_queue(adapter, tpd_req, tpd); netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +out: spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } -- cgit v1.2.3 From 9b4fe5fb0bdd8f31f24cbfe77e38ec8155c250c5 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Fri, 12 Jul 2013 13:35:33 -0400 Subject: via-rhine: fix dma mapping errors this bug: https://bugzilla.redhat.com/show_bug.cgi?id=951695 Reported a dma debug backtrace: WARNING: at lib/dma-debug.c:937 check_unmap+0x47d/0x930() Hardware name: To Be Filled By O.E.M. via-rhine 0000:00:12.0: DMA-API: device driver failed to check map error[device address=0x0000000075a837b2] [size=90 bytes] [mapped as single] Modules linked in: ip6_tables gspca_spca561 gspca_main videodev media snd_hda_codec_realtek snd_hda_intel i2c_viapro snd_hda_codec snd_hwdep snd_seq ppdev mperf via_rhine coretemp snd_pcm mii microcode snd_page_alloc snd_timer snd_mpu401 snd_mpu401_uart snd_rawmidi snd_seq_device snd soundcore parport_pc parport shpchp ata_generic pata_acpi radeon i2c_algo_bit drm_kms_helper ttm drm pata_via sata_via i2c_core uinput Pid: 295, comm: systemd-journal Not tainted 3.9.0-0.rc6.git2.1.fc20.x86_64 #1 Call Trace: [] warn_slowpath_common+0x70/0xa0 [] warn_slowpath_fmt+0x4c/0x50 [] check_unmap+0x47d/0x930 [] ? local_clock+0x5f/0x70 [] debug_dma_unmap_page+0x5f/0x70 [] ? rhine_ack_events.isra.14+0x3c/0x50 [via_rhine] [] rhine_napipoll+0x1d8/0xd80 [via_rhine] [] ? net_rx_action+0xa1/0x380 [] net_rx_action+0x172/0x380 [] __do_softirq+0xff/0x400 [] irq_exit+0xb5/0xc0 [] do_IRQ+0x56/0xc0 [] common_interrupt+0x72/0x72 [] ? __slab_alloc+0x4c2/0x526 [] ? mmap_region+0x2b0/0x5a0 [] ? __lock_is_held+0x57/0x80 [] ? mmap_region+0x2b0/0x5a0 [] kmem_cache_alloc+0x2df/0x360 [] mmap_region+0x2b0/0x5a0 [] do_mmap_pgoff+0x316/0x3d0 [] vm_mmap_pgoff+0x90/0xc0 [] sys_mmap_pgoff+0x4c/0x190 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] sys_mmap+0x22/0x30 [] system_call_fastpath+0x16/0x1b Usual problem with the usual fix, add the appropriate calls to dma_mapping_error where appropriate Untested, as I don't have hardware, but its pretty straightforward Signed-off-by: Neil Horman CC: David S. Miller CC: Roger Luethi Signed-off-by: David S. Miller --- drivers/net/ethernet/via/via-rhine.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ca98acabf1b4..b75eb9e0e867 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1171,7 +1171,11 @@ static void alloc_rbufs(struct net_device *dev) rp->rx_skbuff_dma[i] = pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); - + if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) { + rp->rx_skbuff_dma[i] = 0; + dev_kfree_skb(skb); + break; + } rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); } @@ -1687,6 +1691,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, rp->tx_skbuff_dma[entry] = pci_map_single(rp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { + dev_kfree_skb(skb); + rp->tx_skbuff_dma[entry] = 0; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); } @@ -1961,6 +1971,11 @@ static int rhine_rx(struct net_device *dev, int limit) pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) { + dev_kfree_skb(skb); + rp->rx_skbuff_dma[entry] = 0; + break; + } rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); } rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); -- cgit v1.2.3