diff options
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_eth_soc.c')
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.c | 999 |
1 files changed, 721 insertions, 278 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c39d7f4ab1d4..c61069340f4f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -18,6 +18,7 @@ #include <linux/tcp.h> #include <linux/interrupt.h> #include <linux/pinctrl/devinfo.h> +#include <linux/phylink.h> #include "mtk_eth_soc.h" @@ -186,165 +187,339 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) mtk_w32(eth, val, TRGMII_TCK_CTRL); } -static void mtk_phy_link_adjust(struct net_device *dev) +static void mtk_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct mtk_mac *mac = netdev_priv(dev); - u16 lcl_adv = 0, rmt_adv = 0; - u8 flowctrl; - u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | - MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | - MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | - MAC_MCR_BACKPR_EN; + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + u32 mcr_cur, mcr_new, sid; + int val, ge_mode, err; + + /* MT76x8 has no hardware settings between for the MAC */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + mac->interface != state->interface) { + /* Setup soc pin functions */ + switch (state->interface) { + case PHY_INTERFACE_MODE_TRGMII: + if (mac->id) + goto err_phy; + if (!MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_GMAC1_TRGMII)) + goto err_phy; + /* fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_RMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { + err = mtk_gmac_rgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + err = mtk_gmac_sgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_GMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { + err = mtk_gmac_gephy_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + default: + goto err_phy; + } - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + /* Setup clock for 1st gmac */ + if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_TRGMII_MT7621_CLK)) { + if (mt7621_gmac0_rgmii_adjust(mac->hw, + state->interface)) + goto err_phy; + } else { + if (state->interface != + PHY_INTERFACE_MODE_TRGMII) + mtk_gmac0_rgmii_adjust(mac->hw, + state->speed); + } + } + + ge_mode = 0; + switch (state->interface) { + case PHY_INTERFACE_MODE_MII: + ge_mode = 1; + break; + case PHY_INTERFACE_MODE_REVMII: + ge_mode = 2; + break; + case PHY_INTERFACE_MODE_RMII: + if (mac->id) + goto err_phy; + ge_mode = 3; + break; + default: + break; + } + + /* put the gmac into the right mode */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); + val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + mac->interface = state->interface; + } + + /* SGMII */ + if (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) { + /* The path GMAC to SGMII will be enabled once the SGMIISYS is + * being setup done. + */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, + ~(u32)SYSCFG0_SGMII_MASK); + + /* Decide how GMAC and SGMIISYS be mapped */ + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac->id; + + /* Setup SGMIISYS with the determined property */ + if (state->interface != PHY_INTERFACE_MODE_SGMII) + err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, + state); + else if (phylink_autoneg_inband(mode)) + err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); + + if (err) + goto init_err; + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + } else if (phylink_autoneg_inband(mode)) { + dev_err(eth->dev, + "In-band mode not supported in non SGMII mode!\n"); return; + } - switch (dev->phydev->speed) { + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; + mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | + MAC_MCR_FORCE_RX_FC); + mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + + switch (state->speed) { + case SPEED_2500: case SPEED_1000: - mcr |= MAC_MCR_SPEED_1000; + mcr_new |= MAC_MCR_SPEED_1000; break; case SPEED_100: - mcr |= MAC_MCR_SPEED_100; + mcr_new |= MAC_MCR_SPEED_100; break; } - - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id) { - if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) { - if (mt7621_gmac0_rgmii_adjust(mac->hw, - dev->phydev->interface)) - return; - } else { - if (!mac->trgmii) - mtk_gmac0_rgmii_adjust(mac->hw, - dev->phydev->speed); - } + if (state->duplex == DUPLEX_FULL) { + mcr_new |= MAC_MCR_FORCE_DPX; + if (state->pause & MLO_PAUSE_TX) + mcr_new |= MAC_MCR_FORCE_TX_FC; + if (state->pause & MLO_PAUSE_RX) + mcr_new |= MAC_MCR_FORCE_RX_FC; } - if (dev->phydev->link) - mcr |= MAC_MCR_FORCE_LINK; + /* Only update control register when needed! */ + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); - if (dev->phydev->duplex) { - mcr |= MAC_MCR_FORCE_DPX; + return; - if (dev->phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (dev->phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; +err_phy: + dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, + mac->id, phy_modes(state->interface)); + return; - lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising); - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); +init_err: + dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, + mac->id, phy_modes(state->interface), err); +} - if (flowctrl & FLOW_CTRL_TX) - mcr |= MAC_MCR_FORCE_TX_FC; - if (flowctrl & FLOW_CTRL_RX) - mcr |= MAC_MCR_FORCE_RX_FC; +static int mtk_mac_link_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id)); - netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", - flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", - flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); + state->link = (pmsr & MAC_MSR_LINK); + state->duplex = (pmsr & MAC_MSR_DPX) >> 1; + + switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) { + case 0: + state->speed = SPEED_10; + break; + case MAC_MSR_SPEED_100: + state->speed = SPEED_100; + break; + case MAC_MSR_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; } - mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (pmsr & MAC_MSR_RX_FC) + state->pause |= MLO_PAUSE_RX; + if (pmsr & MAC_MSR_TX_FC) + state->pause |= MLO_PAUSE_TX; - if (!of_phy_is_fixed_link(mac->of_node)) - phy_print_status(dev->phydev); + return 1; } -static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, - struct device_node *phy_node) +static void mtk_mac_an_restart(struct phylink_config *config) { - struct phy_device *phydev; - int phy_mode; - - phy_mode = of_get_phy_mode(phy_node); - if (phy_mode < 0) { - dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); - return -EINVAL; - } + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); - phydev = of_phy_connect(eth->netdev[mac->id], phy_node, - mtk_phy_link_adjust, 0, phy_mode); - if (!phydev) { - dev_err(eth->dev, "could not connect to PHY\n"); - return -ENODEV; - } + mtk_sgmii_restart_an(mac->hw, mac->id); +} - dev_info(eth->dev, - "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", - mac->id, phydev_name(phydev), phydev->phy_id, - phydev->drv->name); +static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - return 0; + mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN); + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } -static int mtk_phy_connect(struct net_device *dev) +static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, + struct phy_device *phy) { - struct mtk_mac *mac = netdev_priv(dev); - struct mtk_eth *eth; - struct device_node *np; - u32 val; - int err; + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - eth = mac->hw; - np = of_parse_phandle(mac->of_node, "phy-handle", 0); - if (!np && of_phy_is_fixed_link(mac->of_node)) - if (!of_phy_register_fixed_link(mac->of_node)) - np = of_node_get(mac->of_node); - if (!np) - return -ENODEV; + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); +} - err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np)); - if (err) - goto err_phy; +static void mtk_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) && + phy_interface_mode_is_rgmii(state->interface)) && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && + !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) && + !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) && + (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)))) { + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); - mac->ge_mode = 0; - switch (of_get_phy_mode(np)) { + switch (state->interface) { case PHY_INTERFACE_MODE_TRGMII: - mac->trgmii = true; - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: + phylink_set(mask, 1000baseT_Full); + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + break; + case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + phylink_set(mask, 1000baseT_Half); + /* fall through */ case PHY_INTERFACE_MODE_SGMII: - break; + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + /* fall through */ case PHY_INTERFACE_MODE_MII: - case PHY_INTERFACE_MODE_GMII: - mac->ge_mode = 1; - break; - case PHY_INTERFACE_MODE_REVMII: - mac->ge_mode = 2; - break; case PHY_INTERFACE_MODE_RMII: - if (!mac->id) - goto err_phy; - mac->ge_mode = 3; - break; + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_NA: default: - goto err_phy; + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + break; } - /* put the gmac into the right mode */ - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); - val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); - val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); - - /* couple phydev to net_device */ - if (mtk_phy_connect_node(eth, mac, np)) - goto err_phy; + if (state->interface == PHY_INTERFACE_MODE_NA) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + } + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseX_Full); + } + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + } - of_node_put(np); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); - return 0; + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); -err_phy: - if (of_phy_is_fixed_link(mac->of_node)) - of_phy_deregister_fixed_link(mac->of_node); - of_node_put(np); - dev_err(eth->dev, "%s: invalid phy\n", __func__); - return -EINVAL; + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); } +static const struct phylink_mac_ops mtk_phylink_ops = { + .validate = mtk_validate, + .mac_link_state = mtk_mac_link_state, + .mac_an_restart = mtk_mac_an_restart, + .mac_config = mtk_mac_config, + .mac_link_down = mtk_mac_link_down, + .mac_link_up = mtk_mac_link_up, +}; + static int mtk_mdio_init(struct mtk_eth *eth) { struct device_node *mii_np; @@ -395,8 +570,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->tx_irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, eth->tx_int_mask_reg); + mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); spin_unlock_irqrestore(ð->tx_irq_lock, flags); } @@ -406,8 +581,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->tx_irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, eth->tx_int_mask_reg); + mtk_w32(eth, val | mask, eth->tx_int_mask_reg); spin_unlock_irqrestore(ð->tx_irq_lock, flags); } @@ -437,6 +612,7 @@ static int mtk_set_mac_address(struct net_device *dev, void *p) { int ret = eth_mac_addr(dev, p); struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; const char *macaddr = dev->dev_addr; if (ret) @@ -446,11 +622,19 @@ static int mtk_set_mac_address(struct net_device *dev, void *p) return -EBUSY; spin_lock_bh(&mac->hw->page_lock); - mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], - MTK_GDMA_MAC_ADRH(mac->id)); - mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | - (macaddr[4] << 8) | macaddr[5], - MTK_GDMA_MAC_ADRL(mac->id)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MT7628_SDM_MAC_ADRH); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MT7628_SDM_MAC_ADRL); + } else { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MTK_GDMA_MAC_ADRH(mac->id)); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MTK_GDMA_MAC_ADRL(mac->id)); + } spin_unlock_bh(&mac->hw->page_lock); return 0; @@ -626,19 +810,47 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, return &ring->buf[idx]; } +static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, + struct mtk_tx_dma *dma) +{ + return ring->dma_pdma - ring->dma + dma; +} + +static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) +{ + return ((void *)dma - (void *)ring->dma) / sizeof(*dma); +} + static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) { - if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(eth->dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); - } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(eth->dev, - dma_unmap_addr(tx_buf, dma_addr0), - dma_unmap_len(tx_buf, dma_len0), - DMA_TO_DEVICE); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { + dma_unmap_single(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + } else { + if (dma_unmap_len(tx_buf, dma_len0)) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + + if (dma_unmap_len(tx_buf, dma_len1)) { + dma_unmap_page(eth->dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); + } } + tx_buf->flags = 0; if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) @@ -646,19 +858,45 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) tx_buf->skb = NULL; } +static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + struct mtk_tx_dma *txd, dma_addr_t mapped_addr, + size_t size, int idx) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } else { + if (idx & 1) { + txd->txd3 = mapped_addr; + txd->txd2 |= TX_DMA_PLEN1(size); + dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len1, size); + } else { + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; + txd->txd1 = mapped_addr; + txd->txd2 = TX_DMA_PLEN0(size); + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } + } +} + static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, int tx_num, struct mtk_tx_ring *ring, bool gso) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_buf *itx_buf, *tx_buf; dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; u32 txd4 = 0, fport; + int k = 0; itxd = ring->next_free; + itxd_pdma = qdma_to_pdma(ring, itxd); if (itxd == ring->last_free) return -ENOMEM; @@ -689,26 +927,37 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb)); + setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), + k++); /* TX SG offload */ txd = itxd; + txd_pdma = qdma_to_pdma(ring, txd); nr_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < nr_frags; i++) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; unsigned int offset = 0; int frag_size = skb_frag_size(frag); while (frag_size) { bool last_frag = false; unsigned int frag_map_size; + bool new_desc = true; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || + (i & 0x1)) { + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + txd_pdma = qdma_to_pdma(ring, txd); + if (txd == ring->last_free) + goto err_dma; + + n_desc++; + } else { + new_desc = false; + } - txd = mtk_qdma_phys_to_virt(ring, txd->txd2); - if (txd == ring->last_free) - goto err_dma; - n_desc++; frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, frag_map_size, @@ -727,14 +976,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(txd->txd4, fport); tx_buf = mtk_desc_to_tx_buf(ring, txd); - memset(tx_buf, 0, sizeof(*tx_buf)); + if (new_desc) + memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf->flags |= MTK_TX_FLAGS_PAGE0; tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); + setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, + frag_map_size, k++); + frag_size -= frag_map_size; offset += frag_map_size; } @@ -746,6 +997,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(itxd->txd4, txd4); WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | (!nr_frags * TX_DMA_LS0))); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (k & 0x1) + txd_pdma->txd2 |= TX_DMA_LS0; + else + txd_pdma->txd2 |= TX_DMA_LS1; + } netdev_sent_queue(dev, skb->len); skb_tx_timestamp(skb); @@ -758,9 +1015,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, */ wmb(); - if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || - !netdev_xmit_more()) - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + } else { + int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), + ring->dma_size); + mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); + } return 0; @@ -772,7 +1035,11 @@ err_dma: mtk_tx_unmap(eth, tx_buf); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + itxd_pdma->txd2 = TX_DMA_DESP2_DEF; + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + itxd_pdma = qdma_to_pdma(ring, itxd); } while (itxd != txd); return -ENOMEM; @@ -781,13 +1048,14 @@ err_dma: static inline int mtk_cal_txd_req(struct sk_buff *skb) { int i, nfrags; - struct skb_frag_struct *frag; + skb_frag_t *frag; nfrags = 1; if (skb_is_gso(skb)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); + nfrags += DIV_ROUND_UP(skb_frag_size(frag), + MTK_TX_DMA_BUF_LEN); } } else { nfrags += skb_shinfo(skb)->nr_frags; @@ -902,7 +1170,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { ring = ð->rx_ring[i]; - idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); if (ring->dma[idx].rxd2 & RX_DMA_DONE) { ring->calc_idx_update = true; return ring; @@ -945,13 +1213,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, struct net_device *netdev; unsigned int pktlen; dma_addr_t dma_addr; - int mac = 0; + int mac; ring = mtk_get_rx_ring(eth); if (unlikely(!ring)) goto rx_done; - idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); rxd = &ring->dma[idx]; data = ring->data[idx]; @@ -960,9 +1228,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, break; /* find out which mac the packet come from. values start at 1 */ - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & - RX_DMA_FPORT_MASK; - mac--; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mac = 0; + } else { + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & + RX_DMA_FPORT_MASK; + mac--; + } if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || !eth->netdev[mac])) @@ -980,7 +1252,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; } dma_addr = dma_map_single(eth->dev, - new_data + NET_SKB_PAD, + new_data + NET_SKB_PAD + + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { @@ -1003,7 +1276,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; skb_put(skb, pktlen); - if (trxd.rxd4 & RX_DMA_L4_VALID) + if (trxd.rxd4 & eth->rx_dma_l4_valid) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -1020,7 +1293,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, rxd->rxd1 = (unsigned int)dma_addr; release_desc: - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + rxd->rxd2 = RX_DMA_LSO; + else + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); ring->calc_idx = idx; @@ -1039,19 +1315,14 @@ rx_done: return done; } -static int mtk_poll_tx(struct mtk_eth *eth, int budget) +static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_dma *desc; struct sk_buff *skb; struct mtk_tx_buf *tx_buf; - unsigned int done[MTK_MAX_DEVS]; - unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; - int total = 0, i; - - memset(done, 0, sizeof(done)); - memset(bytes, 0, sizeof(bytes)); cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); dma = mtk_r32(eth, MTK_QTX_DRX_PTR); @@ -1089,6 +1360,62 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); + return budget; +} + +static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma *desc; + struct sk_buff *skb; + struct mtk_tx_buf *tx_buf; + u32 cpu, dma; + + cpu = ring->cpu_idx; + dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); + + while ((cpu != dma) && budget) { + tx_buf = &ring->buf[cpu]; + skb = tx_buf->skb; + if (!skb) + break; + + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { + bytes[0] += skb->len; + done[0]++; + budget--; + } + + mtk_tx_unmap(eth, tx_buf); + + desc = &ring->dma[cpu]; + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = NEXT_DESP_IDX(cpu, ring->dma_size); + } + + ring->cpu_idx = cpu; + + return budget; +} + +static int mtk_poll_tx(struct mtk_eth *eth, int budget) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + unsigned int done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + int total = 0, i; + + memset(done, 0, sizeof(done)); + memset(bytes, 0, sizeof(bytes)); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + budget = mtk_poll_tx_qdma(eth, budget, done, bytes); + else + budget = mtk_poll_tx_pdma(eth, budget, done, bytes); + for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i] || !done[i]) continue; @@ -1120,13 +1447,14 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) u32 status, mask; int tx_done = 0; - mtk_handle_status_irq(eth); - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); tx_done = mtk_poll_tx(eth, budget); if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + status = mtk_r32(eth, eth->tx_int_status_reg); + mask = mtk_r32(eth, eth->tx_int_mask_reg); dev_info(eth->dev, "done tx %d, intr 0x%08x/0x%x\n", tx_done, status, mask); @@ -1135,7 +1463,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) if (tx_done == budget) return budget; - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + status = mtk_r32(eth, eth->tx_int_status_reg); if (status & MTK_TX_DONE_INT) return budget; @@ -1202,6 +1530,24 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; } + /* On MT7688 (PDMA only) this driver uses the ring->dma structs + * only as the framework. The real HW descriptors are the PDMA + * descriptors in ring->dma_pdma. + */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + &ring->phys_pdma, + GFP_ATOMIC); + if (!ring->dma_pdma) + goto no_tx_mem; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; + ring->dma_pdma[i].txd4 = 0; + } + } + + ring->dma_size = MTK_DMA_SIZE; atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); ring->next_free = &ring->dma[0]; ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; @@ -1212,15 +1558,23 @@ static int mtk_tx_alloc(struct mtk_eth *eth) */ wmb(); - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); - mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), - MTK_QTX_CRX_PTR); - mtk_w32(eth, - ring->phys + ((MTK_DMA_SIZE - 1) * sz), - MTK_QTX_DRX_PTR); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_CRX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, + MTK_QTX_CFG(0)); + } else { + mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); + mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); + mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); + } return 0; @@ -1247,6 +1601,14 @@ static void mtk_tx_clean(struct mtk_eth *eth) ring->phys); ring->dma = NULL; } + + if (ring->dma_pdma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma_pdma), + ring->dma_pdma, + ring->phys_pdma); + ring->dma_pdma = NULL; + } } static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) @@ -1294,14 +1656,17 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) for (i = 0; i < rx_dma_size; i++) { dma_addr_t dma_addr = dma_map_single(eth->dev, - ring->data[i] + NET_SKB_PAD, + ring->data[i] + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(eth->dev, dma_addr))) return -ENOMEM; ring->dma[i].rxd1 = (unsigned int)dma_addr; - ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + ring->dma[i].rxd2 = RX_DMA_LSO; + else + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); } ring->dma_size = rx_dma_size; ring->calc_idx_update = false; @@ -1617,9 +1982,16 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth) unsigned long t_start = jiffies; while (1) { - if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & - (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) - return 0; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + } else { + if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + } + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) break; } @@ -1636,20 +2008,24 @@ static int mtk_dma_init(struct mtk_eth *eth) if (mtk_dma_busy_wait(eth)) return -EBUSY; - /* QDMA needs scratch memory for internal reordering of the - * descriptors - */ - err = mtk_init_fq_dma(eth); - if (err) - return err; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* QDMA needs scratch memory for internal reordering of the + * descriptors + */ + err = mtk_init_fq_dma(eth); + if (err) + return err; + } err = mtk_tx_alloc(eth); if (err) return err; - err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); - if (err) - return err; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + } err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); if (err) @@ -1666,10 +2042,14 @@ static int mtk_dma_init(struct mtk_eth *eth) return err; } - /* Enable random early drop and set drop threshold automatically */ - mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, - MTK_QDMA_FC_THRES); - mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* Enable random early drop and set drop threshold + * automatically + */ + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | + FC_THRES_MIN, MTK_QDMA_FC_THRES); + mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + } return 0; } @@ -1745,8 +2125,8 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth) if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) mtk_handle_irq_rx(irq, _eth); } - if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) { - if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT) + if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { + if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) mtk_handle_irq_tx(irq, _eth); } @@ -1778,17 +2158,23 @@ static int mtk_start_dma(struct mtk_eth *eth) return err; } - mtk_w32(eth, - MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | - MTK_RX_BT_32DWORDS, - MTK_QDMA_GLO_CFG); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + mtk_w32(eth, + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, + MTK_QDMA_GLO_CFG); - mtk_w32(eth, - MTK_RX_DMA_EN | rx_2b_offset | - MTK_RX_BT_32DWORDS | MTK_MULTI_EN, - MTK_PDMA_GLO_CFG); + mtk_w32(eth, + MTK_RX_DMA_EN | rx_2b_offset | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + MTK_PDMA_GLO_CFG); + } else { + mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, + MTK_PDMA_GLO_CFG); + } return 0; } @@ -1797,6 +2183,14 @@ static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + int err; + + err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); + if (err) { + netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, + err); + return err; + } /* we run 2 netdevs on the same dma ring so we only bring it up once */ if (!refcount_read(ð->dma_refcnt)) { @@ -1814,9 +2208,8 @@ static int mtk_open(struct net_device *dev) else refcount_inc(ð->dma_refcnt); - phy_start(dev->phydev); + phylink_start(mac->phylink); netif_start_queue(dev); - return 0; } @@ -1848,8 +2241,11 @@ static int mtk_stop(struct net_device *dev) struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + phylink_stop(mac->phylink); + netif_tx_disable(dev); - phy_stop(dev->phydev); + + phylink_disconnect_phy(mac->phylink); /* only shutdown DMA if this is the last user */ if (!refcount_dec_and_test(ð->dma_refcnt)) @@ -1860,7 +2256,8 @@ static int mtk_stop(struct net_device *dev) napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); mtk_dma_free(eth); @@ -1922,17 +2319,26 @@ static int mtk_hw_init(struct mtk_eth *eth) if (ret) goto err_disable_pm; - ethsys_reset(eth, RSTCTRL_FE); - ethsys_reset(eth, RSTCTRL_PPE); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + ret = device_reset(eth->dev); + if (ret) { + dev_err(eth->dev, "MAC reset failed!\n"); + goto err_disable_pm; + } - regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->mac[i]) - continue; - val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); - val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); + /* enable interrupt delay for RX */ + mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); + + /* disable delay and normal interrupt */ + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + + return 0; } - regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + /* Non-MT7628 handling... */ + ethsys_reset(eth, RSTCTRL_FE); + ethsys_reset(eth, RSTCTRL_PPE); if (eth->pctl) { /* Set GE2 driving and slew rate */ @@ -1946,11 +2352,11 @@ static int mtk_hw_init(struct mtk_eth *eth) } /* Set linkdown as the default for each GMAC. Its own MCR would be set - * up with the more appropriate value when mtk_phy_link_adjust call is - * being invoked. + * up with the more appropriate value when mtk_mac_config call is being + * invoked. */ for (i = 0; i < MTK_MAC_COUNT; i++) - mtk_w32(eth, 0, MTK_MAC_MCR(i)); + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); /* Indicates CDM to parse the MTK special tag from CPU * which also is working out for untag packets. @@ -1978,7 +2384,7 @@ static int mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); - for (i = 0; i < 2; i++) { + for (i = 0; i < MTK_MAC_COUNT; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); /* setup the forward port to send frame to PDMA */ @@ -2030,7 +2436,7 @@ static int __init mtk_init(struct net_device *dev) dev->dev_addr); } - return mtk_phy_connect(dev); + return 0; } static void mtk_uninit(struct net_device *dev) @@ -2038,20 +2444,20 @@ static void mtk_uninit(struct net_device *dev) struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - phy_disconnect(dev->phydev); - if (of_phy_is_fixed_link(mac->of_node)) - of_phy_deregister_fixed_link(mac->of_node); + phylink_disconnect_phy(mac->phylink); mtk_tx_irq_disable(eth, ~0); mtk_rx_irq_disable(eth, ~0); } static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mtk_mac *mac = netdev_priv(dev); + switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return phylink_mii_ioctl(mac->phylink, ifr, cmd); default: break; } @@ -2092,16 +2498,6 @@ static void mtk_pending_work(struct work_struct *work) eth->dev->pins->default_state); mtk_hw_init(eth); - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->mac[i] || - of_phy_is_fixed_link(eth->mac[i]->of_node)) - continue; - err = phy_init_hw(eth->netdev[i]->phydev); - if (err) - dev_err(eth->dev, "%s: PHY init failed.\n", - eth->netdev[i]->name); - } - /* restart DMA and enable IRQs */ for (i = 0; i < MTK_MAC_COUNT; i++) { if (!test_bit(i, &restart)) @@ -2164,9 +2560,7 @@ static int mtk_get_link_ksettings(struct net_device *ndev, if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) return -EBUSY; - phy_ethtool_ksettings_get(ndev->phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(mac->phylink, cmd); } static int mtk_set_link_ksettings(struct net_device *ndev, @@ -2177,7 +2571,7 @@ static int mtk_set_link_ksettings(struct net_device *ndev, if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) return -EBUSY; - return phy_ethtool_ksettings_set(ndev->phydev, cmd); + return phylink_ethtool_ksettings_set(mac->phylink, cmd); } static void mtk_get_drvinfo(struct net_device *dev, @@ -2211,22 +2605,10 @@ static int mtk_nway_reset(struct net_device *dev) if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) return -EBUSY; - return genphy_restart_aneg(dev->phydev); -} + if (!mac->phylink) + return -ENOTSUPP; -static u32 mtk_get_link(struct net_device *dev) -{ - struct mtk_mac *mac = netdev_priv(dev); - int err; - - if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) - return -EBUSY; - - err = genphy_update_link(dev->phydev); - if (err) - return ethtool_op_get_link(dev); - - return dev->phydev->link; + return phylink_ethtool_nway_reset(mac->phylink); } static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -2346,7 +2728,7 @@ static const struct ethtool_ops mtk_ethtool_ops = { .get_msglevel = mtk_get_msglevel, .set_msglevel = mtk_set_msglevel, .nway_reset = mtk_nway_reset, - .get_link = mtk_get_link, + .get_link = ethtool_op_get_link, .get_strings = mtk_get_strings, .get_sset_count = mtk_get_sset_count, .get_ethtool_stats = mtk_get_ethtool_stats, @@ -2374,9 +2756,10 @@ static const struct net_device_ops mtk_netdev_ops = { static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) { - struct mtk_mac *mac; const __be32 *_id = of_get_property(np, "reg", NULL); - int id, err; + struct phylink *phylink; + int phy_mode, id, err; + struct mtk_mac *mac; if (!_id) { dev_err(eth->dev, "missing mac id\n"); @@ -2420,18 +2803,44 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) u64_stats_init(&mac->hw_stats->syncp); mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + /* phylink create */ + phy_mode = of_get_phy_mode(np); + if (phy_mode < 0) { + dev_err(eth->dev, "incorrect phy-mode\n"); + err = -EINVAL; + goto free_netdev; + } + + /* mac config is not set */ + mac->interface = PHY_INTERFACE_MODE_NA; + mac->mode = MLO_AN_PHY; + mac->speed = SPEED_UNKNOWN; + + mac->phylink_config.dev = ð->netdev[id]->dev; + mac->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&mac->phylink_config, + of_fwnode_handle(mac->of_node), + phy_mode, &mtk_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto free_netdev; + } + + mac->phylink = phylink; + SET_NETDEV_DEV(eth->netdev[id], eth->dev); eth->netdev[id]->watchdog_timeo = 5 * HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; - eth->netdev[id]->hw_features = MTK_HW_FEATURES; + eth->netdev[id]->hw_features = eth->soc->hw_features; if (eth->hwlro) eth->netdev[id]->hw_features |= NETIF_F_LRO; - eth->netdev[id]->vlan_features = MTK_HW_FEATURES & + eth->netdev[id]->vlan_features = eth->soc->hw_features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); - eth->netdev[id]->features |= MTK_HW_FEATURES; + eth->netdev[id]->features |= eth->soc->hw_features; eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; eth->netdev[id]->irq = eth->irq[0]; @@ -2446,11 +2855,9 @@ free_netdev: static int mtk_probe(struct platform_device *pdev) { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; struct mtk_eth *eth; - int err; - int i; + int err, i; eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); if (!eth) @@ -2459,19 +2866,36 @@ static int mtk_probe(struct platform_device *pdev) eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; - eth->base = devm_ioremap_resource(&pdev->dev, res); + eth->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; + eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; + } else { + eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; + eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; + eth->ip_align = NET_IP_ALIGN; + } else { + eth->rx_dma_l4_valid = RX_DMA_L4_VALID; + } + spin_lock_init(ð->page_lock); spin_lock_init(ð->tx_irq_lock); spin_lock_init(ð->rx_irq_lock); - eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "mediatek,ethsys"); - if (IS_ERR(eth->ethsys)) { - dev_err(&pdev->dev, "no ethsys regmap found\n"); - return PTR_ERR(eth->ethsys); + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); + if (IS_ERR(eth->ethsys)) { + dev_err(&pdev->dev, "no ethsys regmap found\n"); + return PTR_ERR(eth->ethsys); + } } if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { @@ -2572,9 +2996,12 @@ static int mtk_probe(struct platform_device *pdev) if (err) goto err_free_dev; - err = mtk_mdio_init(eth); - if (err) - goto err_free_dev; + /* No MT7628/88 support yet */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + err = mtk_mdio_init(eth); + if (err) + goto err_free_dev; + } for (i = 0; i < MTK_MAX_DEVS; i++) { if (!eth->netdev[i]) @@ -2616,6 +3043,7 @@ err_deinit_hw: static int mtk_remove(struct platform_device *pdev) { struct mtk_eth *eth = platform_get_drvdata(pdev); + struct mtk_mac *mac; int i; /* stop all devices to make sure that dma is properly shut down */ @@ -2623,6 +3051,8 @@ static int mtk_remove(struct platform_device *pdev) if (!eth->netdev[i]) continue; mtk_stop(eth->netdev[i]); + mac = netdev_priv(eth->netdev[i]); + phylink_disconnect_phy(mac->phylink); } mtk_hw_deinit(eth); @@ -2637,12 +3067,14 @@ static int mtk_remove(struct platform_device *pdev) static const struct mtk_soc_data mt2701_data = { .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; static const struct mtk_soc_data mt7621_data = { .caps = MT7621_CAPS, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, }; @@ -2650,12 +3082,14 @@ static const struct mtk_soc_data mt7621_data = { static const struct mtk_soc_data mt7622_data = { .ana_rgc3 = 0x2028, .caps = MT7622_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, }; static const struct mtk_soc_data mt7623_data = { .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, }; @@ -2663,16 +3097,25 @@ static const struct mtk_soc_data mt7623_data = { static const struct mtk_soc_data mt7629_data = { .ana_rgc3 = 0x128, .caps = MT7629_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, .required_clks = MT7629_CLKS_BITMAP, .required_pctl = false, }; +static const struct mtk_soc_data rt5350_data = { + .caps = MT7628_CAPS, + .hw_features = MTK_HW_FEATURES_MT7628, + .required_clks = MT7628_CLKS_BITMAP, + .required_pctl = false, +}; + const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data}, { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); |