summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/genet/bcmgenet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/genet/bcmgenet.c')
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c264
1 files changed, 149 insertions, 115 deletions
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ce455aed5a2f..da1a2500c91c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -191,8 +191,9 @@ enum dma_reg {
DMA_STATUS,
DMA_SCB_BURST_SIZE,
DMA_ARB_CTRL,
- DMA_PRIORITY,
- DMA_RING_PRIORITY,
+ DMA_PRIORITY_0,
+ DMA_PRIORITY_1,
+ DMA_PRIORITY_2,
};
static const u8 bcmgenet_dma_regs_v3plus[] = {
@@ -201,8 +202,9 @@ static const u8 bcmgenet_dma_regs_v3plus[] = {
[DMA_STATUS] = 0x08,
[DMA_SCB_BURST_SIZE] = 0x0C,
[DMA_ARB_CTRL] = 0x2C,
- [DMA_PRIORITY] = 0x30,
- [DMA_RING_PRIORITY] = 0x38,
+ [DMA_PRIORITY_0] = 0x30,
+ [DMA_PRIORITY_1] = 0x34,
+ [DMA_PRIORITY_2] = 0x38,
};
static const u8 bcmgenet_dma_regs_v2[] = {
@@ -211,8 +213,9 @@ static const u8 bcmgenet_dma_regs_v2[] = {
[DMA_STATUS] = 0x08,
[DMA_SCB_BURST_SIZE] = 0x0C,
[DMA_ARB_CTRL] = 0x30,
- [DMA_PRIORITY] = 0x34,
- [DMA_RING_PRIORITY] = 0x3C,
+ [DMA_PRIORITY_0] = 0x34,
+ [DMA_PRIORITY_1] = 0x38,
+ [DMA_PRIORITY_2] = 0x3C,
};
static const u8 bcmgenet_dma_regs_v1[] = {
@@ -220,8 +223,9 @@ static const u8 bcmgenet_dma_regs_v1[] = {
[DMA_STATUS] = 0x04,
[DMA_SCB_BURST_SIZE] = 0x0C,
[DMA_ARB_CTRL] = 0x30,
- [DMA_PRIORITY] = 0x34,
- [DMA_RING_PRIORITY] = 0x3C,
+ [DMA_PRIORITY_0] = 0x34,
+ [DMA_PRIORITY_1] = 0x38,
+ [DMA_PRIORITY_2] = 0x3C,
};
/* Set at runtime once bcmgenet version is known */
@@ -739,7 +743,6 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- bcmgenet_mii_reset(priv->dev);
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= (EXT_PWR_DOWN_PHY |
@@ -779,7 +782,9 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- bcmgenet_mii_reset(priv->dev);
+
+ if (mode == GENET_POWER_PASSIVE)
+ bcmgenet_mii_reset(priv->dev);
}
/* ioctl handle special commands that are not present in ethtool. */
@@ -874,6 +879,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int bds_compl;
unsigned int c_index;
/* Compute how many buffers are transmitted since last xmit call */
@@ -898,7 +904,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (last_tx_cn-- > 0) {
tx_cb_ptr = ring->cbs + last_c_index;
+ bds_compl = 0;
if (tx_cb_ptr->skb) {
+ bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -915,7 +923,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
}
dev->stats.tx_packets++;
- ring->free_bds += 1;
+ ring->free_bds += bds_compl;
last_c_index++;
last_c_index &= (num_tx_bds - 1);
@@ -1050,7 +1058,8 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
/* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors
*/
-static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
+static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
+ struct sk_buff *skb)
{
struct status_64 *status = NULL;
struct sk_buff *new_skb;
@@ -1068,7 +1077,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
if (!new_skb) {
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
- return -ENOMEM;
+ return NULL;
}
skb = new_skb;
}
@@ -1086,7 +1095,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
ip_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
- return 0;
+ return skb;
}
offset = skb_checksum_start_offset(skb) - sizeof(*status);
@@ -1107,7 +1116,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
status->tx_csum_info = tx_csum_info;
}
- return 0;
+ return skb;
}
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1154,8 +1163,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
- ret = bcmgenet_put_tx_csum(dev, skb);
- if (ret) {
+ skb = bcmgenet_put_tx_csum(dev, skb);
+ if (!skb) {
ret = NETDEV_TX_OK;
goto out;
}
@@ -1273,12 +1282,24 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+
+ /* We do not have a backing SKB, so we do not have a
+ * corresponding DMA mapping for this incoming packet since
+ * bcmgenet_rx_refill always either has both skb and mapping or
+ * none.
+ */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
/* Unmap the packet contents such that we can use the
* RSV from the 64 bytes descriptor when enabled and save
* a 32-bits register read
*/
- cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
priv->rx_buf_len, DMA_FROM_DEVICE);
@@ -1306,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
__func__, p_index, priv->rx_c_index,
priv->rx_read_ptr, dma_length_status);
- rxpktprocessed++;
-
- priv->rx_read_ptr++;
- priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
- /* out of memory, just drop packets at the hardware level */
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
- goto refill;
- }
-
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
@@ -1390,6 +1399,10 @@ refill:
err = bcmgenet_rx_refill(priv, cb);
if (err)
netif_err(priv, rx_err, dev, "Rx refill failed\n");
+
+ rxpktprocessed++;
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
}
return rxpktprocessed;
@@ -1686,7 +1699,8 @@ static void bcmgenet_init_multiq(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int i, dma_enable;
- u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
+ u32 reg, dma_ctrl, ring_cfg = 0;
+ u32 dma_priority[3] = {0, 0, 0};
if (!netif_is_multiqueue(dev)) {
netdev_warn(dev, "called with non multi queue aware HW\n");
@@ -1711,22 +1725,25 @@ static void bcmgenet_init_multiq(struct net_device *dev)
/* Configure ring as descriptor ring and setup priority */
ring_cfg |= 1 << i;
- dma_priority |= ((GENET_Q0_PRIORITY + i) <<
- (GENET_MAX_MQ_CNT + 1) * i);
dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
+
+ dma_priority[DMA_PRIO_REG_INDEX(i)] |=
+ ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
}
+ /* Set ring 16 priority and program the hardware registers */
+ dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
+ ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
+ DMA_PRIO_REG_SHIFT(DESC_INDEX));
+ bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
+ bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
+ bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
+
/* Enable rings */
reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
reg |= ring_cfg;
bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
- /* Use configured rings priority and set ring #16 priority */
- reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
- reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
- reg |= dma_priority;
- bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
-
/* Configure ring as descriptor ring and re-enable DMA if enabled */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg |= dma_ctrl;
@@ -1735,13 +1752,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
/* disable DMA */
- bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
- bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
if (priv->tx_cbs[i].skb != NULL) {
@@ -1958,18 +2025,6 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
}
-static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
-{
- /* From WOL-enabled suspend, switch to regular clock */
- clk_disable_unprepare(priv->clk_wol);
-
- phy_init_hw(priv->phydev);
- /* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev);
-
- return 0;
-}
-
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
@@ -2085,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
+ /* Re-configure the port multiplexer towards the PHY device */
+ bcmgenet_mii_config(priv->dev, false);
+
+ phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+
bcmgenet_netif_start(dev);
return 0;
@@ -2099,57 +2160,6 @@ err_clk_disable:
return ret;
}
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
-{
- int ret = 0;
- int timeout = 0;
- u32 reg;
-
- /* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
- ret = -ETIMEDOUT;
- }
-
- /* Wait 10ms for packet drain in both tx and rx dma */
- usleep_range(10000, 20000);
-
- /* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
- ret = -ETIMEDOUT;
- }
-
- return ret;
-}
-
static void bcmgenet_netif_stop(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2164,6 +2174,11 @@ static void bcmgenet_netif_stop(struct net_device *dev)
* disabled no new work will be scheduled.
*/
cancel_work_sync(&priv->bcmgenet_irq_work);
+
+ priv->old_link = -1;
+ priv->old_speed = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
}
static int bcmgenet_close(struct net_device *dev)
@@ -2175,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
bcmgenet_netif_stop(dev);
+ /* Really kill the PHY state machine and disconnect from it */
+ phy_disconnect(priv->phydev);
+
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -2426,6 +2444,13 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
major, (reg >> 16) & 0x0f, reg & 0xffff);
+ /* Store the integrated PHY revision for the MDIO probing function
+ * to pass this information to the PHY driver. The PHY driver expects
+ * to find the PHY major revision in bits 15:8 while the GENET register
+ * stores that information in bits 7:0, account for that.
+ */
+ priv->gphy_rev = (reg & 0xffff) << 8;
+
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
pr_warn("GENET does not support 40-bits PA\n");
@@ -2533,6 +2558,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->pdev = pdev;
priv->version = (enum bcmgenet_version)of_id->data;
+ priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ if (IS_ERR(priv->clk))
+ dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
bcmgenet_set_hw_params(priv);
/* Mii wait queue */
@@ -2541,17 +2573,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->rx_buf_len = RX_BUF_LENGTH;
INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
- priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
- if (IS_ERR(priv->clk))
- dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
-
priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
if (IS_ERR(priv->clk_wol))
dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
- if (!IS_ERR(priv->clk))
- clk_prepare_enable(priv->clk);
-
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -2611,6 +2636,8 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
+ phy_suspend(priv->phydev);
+
netif_device_detach(dev);
/* Disable MAC receive */
@@ -2661,11 +2688,13 @@ static int bcmgenet_resume(struct device *d)
if (ret)
goto out_clk_disable;
+ /* From WOL-enabled suspend, switch to regular clock */
if (priv->wolopts)
- ret = bcmgenet_wol_resume(priv);
+ clk_disable_unprepare(priv->clk_wol);
- if (ret)
- goto out_clk_disable;
+ phy_init_hw(priv->phydev);
+ /* Speed settings must be restored */
+ bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
@@ -2678,6 +2707,9 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
+ if (priv->wolopts)
+ bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -2693,6 +2725,8 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
+ phy_resume(priv->phydev);
+
bcmgenet_netif_start(dev);
return 0;