diff options
Diffstat (limited to 'drivers/net/ethernet/freescale')
32 files changed, 2549 insertions, 1472 deletions
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ce866ae3df03..f1e80d6996ef 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -29,6 +29,7 @@ config FEC select CRC32 select PHYLIB select PAGE_POOL + select PAGE_POOL_STATS imply NET_SELFTESTS help Say Y here if you want to use the built-in 10/100 Fast ethernet diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index 0e1439fd00bd..2b560661c82a 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -2,8 +2,8 @@ menuconfig FSL_DPAA_ETH tristate "DPAA Ethernet" depends on FSL_DPAA && FSL_FMAN - select PHYLIB - select FIXED_PHY + select PHYLINK + select PCS_LYNX help Data Path Acceleration Architecture Ethernet driver, supporting the Freescale QorIQ chips. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index fc68a32ce2f7..3f8032947d86 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -264,8 +264,19 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->needed_headroom = priv->tx_headroom; net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); - mac_dev->net_dev = net_dev; + /* The rest of the config is filled in by the mac device already */ + mac_dev->phylink_config.dev = &net_dev->dev; + mac_dev->phylink_config.type = PHYLINK_NETDEV; mac_dev->update_speed = dpaa_eth_cgr_set_speed; + mac_dev->phylink = phylink_create(&mac_dev->phylink_config, + dev_fwnode(mac_dev->dev), + mac_dev->phy_if, + mac_dev->phylink_ops); + if (IS_ERR(mac_dev->phylink)) { + err = PTR_ERR(mac_dev->phylink); + dev_err_probe(dev, err, "Could not create phylink\n"); + return err; + } /* start without the RUNNING flag, phylib controls it later */ netif_carrier_off(net_dev); @@ -273,6 +284,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, err = register_netdev(net_dev); if (err < 0) { dev_err(dev, "register_netdev() = %d\n", err); + phylink_destroy(mac_dev->phylink); return err; } @@ -294,8 +306,7 @@ static int dpaa_stop(struct net_device *net_dev) */ msleep(200); - if (mac_dev->phy_dev) - phy_stop(mac_dev->phy_dev); + phylink_stop(mac_dev->phylink); mac_dev->disable(mac_dev->fman_mac); for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { @@ -304,8 +315,7 @@ static int dpaa_stop(struct net_device *net_dev) err = error; } - if (net_dev->phydev) - phy_disconnect(net_dev->phydev); + phylink_disconnect_phy(mac_dev->phylink); net_dev->phydev = NULL; msleep(200); @@ -833,10 +843,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv) /* Set different thresholds based on the configured MAC speed. * This may turn suboptimal if the MAC is reconfigured at another - * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link + * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up * callback. */ - if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) + if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD) cs_th = DPAA_CS_THRESHOLD_10G; else cs_th = DPAA_CS_THRESHOLD_1G; @@ -865,7 +875,7 @@ out_error: static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed) { - struct net_device *net_dev = mac_dev->net_dev; + struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev); struct dpaa_priv *priv = netdev_priv(net_dev); struct qm_mcc_initcgr opts = { }; u32 cs_th; @@ -2904,58 +2914,6 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv) } } -static void dpaa_adjust_link(struct net_device *net_dev) -{ - struct mac_device *mac_dev; - struct dpaa_priv *priv; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - mac_dev->adjust_link(mac_dev); -} - -/* The Aquantia PHYs are capable of performing rate adaptation */ -#define PHY_VEND_AQUANTIA 0x03a1b400 -#define PHY_VEND_AQUANTIA2 0x31c31c00 - -static int dpaa_phy_init(struct net_device *net_dev) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct mac_device *mac_dev; - struct phy_device *phy_dev; - struct dpaa_priv *priv; - u32 phy_vendor; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - - phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, - &dpaa_adjust_link, 0, - mac_dev->phy_if); - if (!phy_dev) { - netif_err(priv, ifup, net_dev, "init_phy() failed\n"); - return -ENODEV; - } - - phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10); - /* Unless the PHY is capable of rate adaptation */ - if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || - (phy_vendor != PHY_VEND_AQUANTIA && - phy_vendor != PHY_VEND_AQUANTIA2)) { - /* remove any features not supported by the controller */ - ethtool_convert_legacy_u32_to_link_mode(mask, - mac_dev->if_support); - linkmode_and(phy_dev->supported, phy_dev->supported, mask); - } - - phy_support_asym_pause(phy_dev); - - mac_dev->phy_dev = phy_dev; - net_dev->phydev = phy_dev; - - return 0; -} - static int dpaa_open(struct net_device *net_dev) { struct mac_device *mac_dev; @@ -2966,7 +2924,8 @@ static int dpaa_open(struct net_device *net_dev) mac_dev = priv->mac_dev; dpaa_eth_napi_enable(priv); - err = dpaa_phy_init(net_dev); + err = phylink_of_phy_connect(mac_dev->phylink, + mac_dev->dev->of_node, 0); if (err) goto phy_init_failed; @@ -2981,7 +2940,7 @@ static int dpaa_open(struct net_device *net_dev) netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err); goto mac_start_failed; } - phy_start(priv->mac_dev->phy_dev); + phylink_start(mac_dev->phylink); netif_tx_start_all_queues(net_dev); @@ -2990,6 +2949,7 @@ static int dpaa_open(struct net_device *net_dev) mac_start_failed: for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) fman_port_disable(mac_dev->port[i]); + phylink_disconnect_phy(mac_dev->phylink); phy_init_failed: dpaa_eth_napi_disable(priv); @@ -3145,10 +3105,12 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { int ret = -EINVAL; + struct dpaa_priv *priv = netdev_priv(net_dev); if (cmd == SIOCGMIIREG) { if (net_dev->phydev) - return phy_mii_ioctl(net_dev->phydev, rq, cmd); + return phylink_mii_ioctl(priv->mac_dev->phylink, rq, + cmd); } if (cmd == SIOCSHWTSTAMP) @@ -3551,6 +3513,7 @@ static int dpaa_remove(struct platform_device *pdev) dev_set_drvdata(dev, NULL); unregister_netdev(net_dev); + phylink_destroy(priv->mac_dev->phylink); err = dpaa_fq_free(dev, &priv->dpaa_fq_list); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 769e936a263c..9c71cbbb13d8 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -54,27 +54,19 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = { static int dpaa_get_link_ksettings(struct net_device *net_dev, struct ethtool_link_ksettings *cmd) { - if (!net_dev->phydev) - return 0; + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - phy_ethtool_ksettings_get(net_dev->phydev, cmd); - - return 0; + return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd); } static int dpaa_set_link_ksettings(struct net_device *net_dev, const struct ethtool_link_ksettings *cmd) { - int err; - - if (!net_dev->phydev) - return -ENODEV; - - err = phy_ethtool_ksettings_set(net_dev->phydev, cmd); - if (err < 0) - netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err); + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - return err; + return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd); } static void dpaa_get_drvinfo(struct net_device *net_dev, @@ -99,80 +91,28 @@ static void dpaa_set_msglevel(struct net_device *net_dev, static int dpaa_nway_reset(struct net_device *net_dev) { - int err; - - if (!net_dev->phydev) - return -ENODEV; - - err = 0; - if (net_dev->phydev->autoneg) { - err = phy_start_aneg(net_dev->phydev); - if (err < 0) - netdev_err(net_dev, "phy_start_aneg() = %d\n", - err); - } + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - return err; + return phylink_ethtool_nway_reset(mac_dev->phylink); } static void dpaa_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *epause) { - struct mac_device *mac_dev; - struct dpaa_priv *priv; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - - if (!net_dev->phydev) - return; + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - epause->autoneg = mac_dev->autoneg_pause; - epause->rx_pause = mac_dev->rx_pause_active; - epause->tx_pause = mac_dev->tx_pause_active; + phylink_ethtool_get_pauseparam(mac_dev->phylink, epause); } static int dpaa_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *epause) { - struct mac_device *mac_dev; - struct phy_device *phydev; - bool rx_pause, tx_pause; - struct dpaa_priv *priv; - int err; - - priv = netdev_priv(net_dev); - mac_dev = priv->mac_dev; - - phydev = net_dev->phydev; - if (!phydev) { - netdev_err(net_dev, "phy device not initialized\n"); - return -ENODEV; - } - - if (!phy_validate_pause(phydev, epause)) - return -EINVAL; - - /* The MAC should know how to handle PAUSE frame autonegotiation before - * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE - * settings. - */ - mac_dev->autoneg_pause = !!epause->autoneg; - mac_dev->rx_pause_req = !!epause->rx_pause; - mac_dev->tx_pause_req = !!epause->tx_pause; - - /* Determine the sym/asym advertised PAUSE capabilities from the desired - * rx/tx pause settings. - */ - - phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); - - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); - if (err < 0) - netdev_err(net_dev, "set_mac_active_pause() = %d\n", err); + struct dpaa_priv *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; - return err; + return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause); } static int dpaa_get_sset_count(struct net_device *net_dev, int type) diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 3d9842af7f10..1b05ba8d1cbf 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index 8356af4631fd..1af254caeb0d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) int i; seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); - seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n", - "CHID", "CPU", "Deq busy", "Frames", "CDANs", + seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n", + "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs", "Avg Frm/CDAN", "Buf count"); for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; - seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n", - ch->ch_id, + seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n", + "CH#", i, ch->ch_id, ch->nctx.desired_cpu, ch->stats.dequeue_portal_busy, ch->stats.frames, @@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch); +static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + int i, j, num_queues, buf_cnt; + struct dpaa2_eth_bp *bp; + char ch_name[10]; + int err; + + /* Print out the header */ + seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name); + seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count"); + num_queues = dpaa2_eth_queue_count(priv); + for (i = 0; i < num_queues; i++) { + snprintf(ch_name, sizeof(ch_name), "CH#%d", i); + seq_printf(file, "%10s", ch_name); + } + seq_printf(file, "\n"); + + /* For each buffer pool, print out its BPID, the number of buffers in + * that buffer pool and the channels which are using it. + */ + for (i = 0; i < priv->num_bps; i++) { + bp = priv->bp[i]; + + err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt); + if (err) { + netdev_warn(priv->net_dev, "Buffer count query error %d\n", err); + return err; + } + + seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt); + for (j = 0; j < num_queues; j++) { + if (priv->channel[j]->bp == bp) + seq_printf(file, "%10s", "x"); + else + seq_printf(file, "%10s", ""); + } + seq_printf(file, "\n"); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp); + void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) { struct fsl_mc_device *dpni_dev; @@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) /* per-fq stats file */ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops); + + /* per buffer pool stats file */ + debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops); + } void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 7fefe1574b6a..76f808d38066 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -37,18 +37,9 @@ static int dpaa2_eth_dl_info_get(struct devlink *devlink, struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink); struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv; char buf[10]; - int err; - - err = devlink_info_driver_name_put(req, KBUILD_MODNAME); - if (err) - return err; scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor); - err = devlink_info_version_running_put(req, "dpni", buf); - if (err) - return err; - - return 0; + return devlink_info_version_running_put(req, "dpni", buf); } static struct dpaa2_eth_trap_item * @@ -226,25 +217,16 @@ int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv) { struct devlink_port *devlink_port = &priv->devlink_port; struct devlink_port_attrs attrs = {}; - int err; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(devlink_port, &attrs); - - err = devlink_port_register(priv->devlink, devlink_port, 0); - if (err) - return err; - - devlink_port_type_eth_set(devlink_port, priv->net_dev); - - return 0; + return devlink_port_register(priv->devlink, devlink_port, 0); } void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv) { struct devlink_port *devlink_port = &priv->devlink_port; - devlink_port_type_clear(devlink_port); devlink_port_unregister(devlink_port); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h index 5fb5f14e01ec..9b43fadb9b11 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, TP_ARGS(netdev, fd) ); +/* Tx (egress) XSK fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + /* Rx fd */ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, TP_PROTO(struct net_device *netdev, @@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, TP_ARGS(netdev, fd) ); +/* Rx XSK fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + /* Tx confirmation fd */ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, TP_PROTO(struct net_device *netdev, @@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, ); /* Log data about raw buffers. Useful for tracing DPBP content. */ -TRACE_EVENT(dpaa2_eth_buf_seed, - /* Trace function prototype */ - TP_PROTO(struct net_device *netdev, - /* virtual address and size */ - void *vaddr, - size_t size, - /* dma map address and size */ - dma_addr_t dma_addr, - size_t map_size, - /* buffer pool id, if relevant */ - u16 bpid), - - /* Repeat argument list here */ - TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), - - /* A structure containing the relevant information we want - * to record. Declare name and type for each normal element, - * name, type and size for arrays. Use __string for variable - * length strings. - */ - TP_STRUCT__entry( - __field(void *, vaddr) - __field(size_t, size) - __field(dma_addr_t, dma_addr) - __field(size_t, map_size) - __field(u16, bpid) - __string(name, netdev->name) - ), - - /* The function that assigns values to the above declared - * fields - */ - TP_fast_assign( - __entry->vaddr = vaddr; - __entry->size = size; - __entry->dma_addr = dma_addr; - __entry->map_size = map_size; - __entry->bpid = bpid; - __assign_str(name, netdev->name); - ), - - /* This is what gets printed when the trace event is - * triggered. - */ - TP_printk(TR_BUF_FMT, - __get_str(name), - __entry->vaddr, - __entry->size, - &__entry->dma_addr, - __entry->map_size, - __entry->bpid) +DECLARE_EVENT_CLASS(dpaa2_eth_buf, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + /* virtual address and size */ + void *vaddr, + size_t size, + /* dma map address and size */ + dma_addr_t dma_addr, + size_t map_size, + /* buffer pool id, if relevant */ + u16 bpid), + + /* Repeat argument list here */ + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(void *, vaddr) + __field(size_t, size) + __field(dma_addr_t, dma_addr) + __field(size_t, map_size) + __field(u16, bpid) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->size = size; + __entry->dma_addr = dma_addr; + __entry->map_size = map_size; + __entry->bpid = bpid; + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_BUF_FMT, + __get_str(name), + __entry->vaddr, + __entry->size, + &__entry->dma_addr, + __entry->map_size, + __entry->bpid) +); + +/* Main memory buff seeding */ +DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed, + TP_PROTO(struct net_device *netdev, + void *vaddr, + size_t size, + dma_addr_t dma_addr, + size_t map_size, + u16 bpid), + + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid) +); + +/* UMEM buff seeding on AF_XDP fast path */ +DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed, + TP_PROTO(struct net_device *netdev, + void *vaddr, + size_t size, + dma_addr_t dma_addr, + size_t map_size, + u16 bpid), + + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid) ); /* If only one event of a certain type needs to be declared, use TRACE_EVENT(). diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 8d029addddad..0c35abb7d065 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #include <linux/init.h> #include <linux/module.h> @@ -8,7 +8,6 @@ #include <linux/etherdevice.h> #include <linux/of_net.h> #include <linux/interrupt.h> -#include <linux/msi.h> #include <linux/kthread.h> #include <linux/iommu.h> #include <linux/fsl/mc.h> @@ -19,6 +18,7 @@ #include <net/pkt_cls.h> #include <net/sock.h> #include <net/tso.h> +#include <net/xdp_sock_drv.h> #include "dpaa2-eth.h" @@ -104,8 +104,8 @@ static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv) priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct; } -static void *dpaa2_iova_to_virt(struct iommu_domain *domain, - dma_addr_t iova_addr) +void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) { phys_addr_t phys_addr; @@ -279,23 +279,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, * be released in the pool */ static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, - int count) + int count, bool xsk_zc) { struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_eth_swa *swa; + struct xdp_buff *xdp_buff; void *vaddr; int i; for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, - DMA_BIDIRECTIONAL); - free_pages((unsigned long)vaddr, 0); + + if (!xsk_zc) { + dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); + } else { + swa = (struct dpaa2_eth_swa *) + (vaddr + DPAA2_ETH_RX_HWA_SIZE); + xdp_buff = swa->xsk.xdp_buff; + xsk_buff_free(xdp_buff); + } } } -static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - dma_addr_t addr) +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) { int retries = 0; int err; @@ -304,7 +314,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD) return; - while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, ch->recycled_bufs, ch->recycled_bufs_cnt)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) @@ -313,7 +323,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, } if (err) { - dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt); + dpaa2_eth_free_bufs(priv, ch->recycled_bufs, + ch->recycled_bufs_cnt, ch->xsk_zc); ch->buf_count -= ch->recycled_bufs_cnt; } @@ -377,10 +388,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, fq->xdp_tx_fds.num = 0; } -static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - struct dpaa2_fd *fd, - void *buf_start, u16 queue_id) +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) { struct dpaa2_faead *faead; struct dpaa2_fd *dest_fd; @@ -485,19 +496,15 @@ out: return xdp_act; } -static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - void *fd_vaddr) +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr) { u16 fd_offset = dpaa2_fd_get_offset(fd); - struct dpaa2_eth_priv *priv = ch->priv; - u32 fd_length = dpaa2_fd_get_len(fd); struct sk_buff *skb = NULL; unsigned int skb_len; - if (fd_length > priv->rx_copybreak) - return NULL; - skb_len = fd_length + dpaa2_eth_needed_headroom(NULL); skb = napi_alloc_skb(&ch->napi, skb_len); @@ -514,11 +521,66 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, return skb; } +static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + struct dpaa2_eth_priv *priv = ch->priv; + u32 fd_length = dpaa2_fd_get_len(fd); + + if (fd_length > priv->rx_copybreak) + return NULL; + + return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr); +} + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb) +{ + struct dpaa2_fas *fas; + u32 status = 0; + + fas = dpaa2_get_fas(vaddr, false); + prefetch(fas); + prefetch(skb->data); + + /* Get the timestamp value */ + if (priv->rx_tstamp) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + __le64 *ts = dpaa2_get_ts(vaddr, false); + u64 ns; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + } + + /* Check if we need to validate the L4 csum */ + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { + status = le32_to_cpu(fas->status); + dpaa2_eth_validate_rx_csum(priv, status, skb); + } + + skb->protocol = eth_type_trans(skb, priv->net_dev); + skb_record_rx_queue(skb, fq->flowid); + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); + + list_add_tail(&skb->list, ch->rx_list); +} + /* Main Rx frame processing routine */ -static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq) +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) { dma_addr_t addr = dpaa2_fd_get_addr(fd); u8 fd_format = dpaa2_fd_get_format(fd); @@ -527,9 +589,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; struct device *dev = priv->net_dev->dev.parent; - struct dpaa2_fas *fas; void *buf_data; - u32 status = 0; u32 xdp_act; /* Tracing point */ @@ -539,8 +599,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); - fas = dpaa2_get_fas(vaddr, false); - prefetch(fas); buf_data = vaddr + dpaa2_fd_get_offset(fd); prefetch(buf_data); @@ -578,35 +636,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, if (unlikely(!skb)) goto err_build_skb; - prefetch(skb->data); - - /* Get the timestamp value */ - if (priv->rx_tstamp) { - struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); - __le64 *ts = dpaa2_get_ts(vaddr, false); - u64 ns; - - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - - ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); - shhwtstamps->hwtstamp = ns_to_ktime(ns); - } - - /* Check if we need to validate the L4 csum */ - if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { - status = le32_to_cpu(fas->status); - dpaa2_eth_validate_rx_csum(priv, status, skb); - } - - skb->protocol = eth_type_trans(skb, priv->net_dev); - skb_record_rx_queue(skb, fq->flowid); - - percpu_stats->rx_packets++; - percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); - ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd); - - list_add_tail(&skb->list, ch->rx_list); - + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); return; err_build_skb: @@ -827,7 +857,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, } } -static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) { struct dpaa2_eth_sgt_cache *sgt_cache; void *sgt_buf = NULL; @@ -849,7 +879,7 @@ static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv) return sgt_buf; } -static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf) { struct dpaa2_eth_sgt_cache *sgt_cache; @@ -1084,9 +1114,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, * This can be called either from dpaa2_eth_tx_conf() or on the error path of * dpaa2_eth_tx(). */ -static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_fq *fq, - const struct dpaa2_fd *fd, bool in_napi) +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr, sg_addr; @@ -1153,6 +1184,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, if (!swa->tso.is_last_fd) should_free_skb = 0; + } else if (swa->type == DPAA2_ETH_SWA_XSK) { + /* Unmap the SGT Buffer */ + dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size, + DMA_BIDIRECTIONAL); } else { skb = swa->single.skb; @@ -1170,6 +1205,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, return; } + if (swa->type == DPAA2_ETH_SWA_XSK) { + ch->xsk_tx_pkts_sent++; + dpaa2_eth_sgt_recycle(priv, buffer_start); + return; + } + if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { fq->dq_frames++; fq->dq_bytes += fd_len; @@ -1344,7 +1385,7 @@ err_alloc_tso_hdr: err_sgt_get: /* Free all the other FDs that were already fully created */ for (i = 0; i < index; i++) - dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false); + dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false); return err; } @@ -1460,7 +1501,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - dpaa2_eth_free_tx_fd(priv, fq, fd, false); + dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false); netdev_tx_completed_queue(nq, 1, fd_len); } else { percpu_stats->tx_packets += total_enqueued; @@ -1553,7 +1594,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - dpaa2_eth_free_tx_fd(priv, fq, fd, true); + dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true); if (likely(!fd_errors)) return; @@ -1631,44 +1672,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) * to the specified buffer pool */ static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, u16 bpid) + struct dpaa2_eth_channel *ch) { + struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD]; struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + struct dpaa2_eth_swa *swa; struct page *page; dma_addr_t addr; int retries = 0; - int i, err; - - for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { - /* Allocate buffer visible to WRIOP + skb shared info + - * alignment padding - */ - /* allocate one page for each Rx buffer. WRIOP sees - * the entire page except for a tailroom reserved for - * skb shared info + int i = 0, err; + u32 batch; + + /* Allocate buffers visible to WRIOP */ + if (!ch->xsk_zc) { + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Also allocate skb shared info and alignment padding. + * There is one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) + goto err_alloc; + + addr = dma_map_page(dev, page, 0, priv->rx_buf_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + page_address(page), + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } + } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) { + /* Allocate XSK buffers for AF_XDP fast path in batches + * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot + * provide enough buffers at the moment */ - page = dev_alloc_pages(0); - if (!page) + batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs, + DPAA2_ETH_BUFS_PER_CMD); + if (!batch) goto err_alloc; - addr = dma_map_page(dev, page, 0, priv->rx_buf_size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(dev, addr))) - goto err_map; + for (i = 0; i < batch; i++) { + swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start + + DPAA2_ETH_RX_HWA_SIZE); + swa->xsk.xdp_buff = xdp_buffs[i]; + + addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; - buf_array[i] = addr; + buf_array[i] = addr; - /* tracing point */ - trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page), - DPAA2_ETH_RX_BUF_RAW_SIZE, - addr, priv->rx_buf_size, - bpid); + trace_dpaa2_xsk_buf_seed(priv->net_dev, + xdp_buffs[i]->data_hard_start, + DPAA2_ETH_RX_BUF_RAW_SIZE, + addr, priv->rx_buf_size, + ch->bp->bpid); + } } release_bufs: /* In case the portal is busy, retry until successful */ - while ((err = dpaa2_io_service_release(ch->dpio, bpid, + while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid, buf_array, i)) == -EBUSY) { if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) break; @@ -1679,14 +1752,19 @@ release_bufs: * not much else we can do about it */ if (err) { - dpaa2_eth_free_bufs(priv, buf_array, i); + dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc); return 0; } return i; err_map: - __free_pages(page, 0); + if (!ch->xsk_zc) { + __free_pages(page, 0); + } else { + for (; i < batch; i++) + xsk_buff_free(xdp_buffs[i]); + } err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -1697,39 +1775,64 @@ err_alloc: return 0; } -static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) { - int i, j; + int i; int new_count; - for (j = 0; j < priv->num_channels; j++) { - for (i = 0; i < DPAA2_ETH_NUM_BUFS; - i += DPAA2_ETH_BUFS_PER_CMD) { - new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); - priv->channel[j]->buf_count += new_count; + for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) { + new_count = dpaa2_eth_add_bufs(priv, ch); + ch->buf_count += new_count; - if (new_count < DPAA2_ETH_BUFS_PER_CMD) { - return -ENOMEM; - } - } + if (new_count < DPAA2_ETH_BUFS_PER_CMD) + return -ENOMEM; } return 0; } +static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct dpaa2_eth_channel *channel; + int i, err = 0; + + for (i = 0; i < priv->num_channels; i++) { + channel = priv->channel[i]; + + err = dpaa2_eth_seed_pool(priv, channel); + + /* Not much to do; the buffer pool, though not filled up, + * may still contain some buffers which would enable us + * to limp on. + */ + if (err) + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", + channel->bp->dev->obj_desc.id, + channel->bp->bpid); + } +} + /* - * Drain the specified number of buffers from the DPNI's private buffer pool. + * Drain the specified number of buffers from one of the DPNI's private buffer + * pools. * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD */ -static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) +static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid, + int count) { u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + bool xsk_zc = false; int retries = 0; - int ret; + int i, ret; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->bp->bpid == bpid) + xsk_zc = priv->channel[i]->xsk_zc; do { - ret = dpaa2_io_service_acquire(NULL, priv->bpid, - buf_array, count); + ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count); if (ret < 0) { if (ret == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) @@ -1737,28 +1840,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - dpaa2_eth_free_bufs(priv, buf_array, ret); + dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc); retries = 0; } while (ret); } -static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) +static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid) { int i; - dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); - dpaa2_eth_drain_bufs(priv, 1); + /* Drain the buffer pool */ + dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD); + dpaa2_eth_drain_bufs(priv, bpid, 1); + /* Setup to zero the buffer count of all channels which were + * using this buffer pool. + */ for (i = 0; i < priv->num_channels; i++) - priv->channel[i]->buf_count = 0; + if (priv->channel[i]->bp->bpid == bpid) + priv->channel[i]->buf_count = 0; +} + +static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_bps; i++) + dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid); } /* Function is called from softirq context only, so we don't need to guard * the access to percpu count */ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - u16 bpid) + struct dpaa2_eth_channel *ch) { int new_count; @@ -1766,7 +1881,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, return 0; do { - new_count = dpaa2_eth_add_bufs(priv, ch, bpid); + new_count = dpaa2_eth_add_bufs(priv, ch); if (unlikely(!new_count)) { /* Out of memory; abort for now, we'll try later on */ break; @@ -1830,6 +1945,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) struct dpaa2_eth_fq *fq, *txc_fq = NULL; struct netdev_queue *nq; int store_cleaned, work_done; + bool work_done_zc = false; struct list_head rx_list; int retries = 0; u16 flowid; @@ -1842,13 +1958,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) INIT_LIST_HEAD(&rx_list); ch->rx_list = &rx_list; + if (ch->xsk_zc) { + work_done_zc = dpaa2_xsk_tx(priv, ch); + /* If we reached the XSK Tx per NAPI threshold, we're done */ + if (work_done_zc) { + work_done = budget; + goto out; + } + } + do { err = dpaa2_eth_pull_channel(ch); if (unlikely(err)) break; /* Refill pool if appropriate */ - dpaa2_eth_refill_pool(priv, ch, priv->bpid); + dpaa2_eth_refill_pool(priv, ch); store_cleaned = dpaa2_eth_consume_frames(ch, &fq); if (store_cleaned <= 0) @@ -1894,6 +2019,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) out: netif_receive_skb_list(ch->rx_list); + if (ch->xsk_tx_pkts_sent) { + xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent); + ch->xsk_tx_pkts_sent = 0; + } + if (txc_fq && txc_fq->dq_frames) { nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); netdev_tx_completed_queue(nq, txc_fq->dq_frames, @@ -2017,8 +2147,11 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) /* When we manage the MAC/PHY using phylink there is no need * to manually update the netif_carrier. + * We can avoid locking because we are called from the "link changed" + * IRQ handler, which is the same as the "endpoint changed" IRQ handler + * (the writer to priv->mac), so we cannot race with it. */ - if (dpaa2_eth_is_type_phy(priv)) + if (dpaa2_mac_is_type_phy(priv->mac)) goto out; /* Chech link state; speed / duplex changes are not treated yet */ @@ -2047,15 +2180,9 @@ static int dpaa2_eth_open(struct net_device *net_dev) struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int err; - err = dpaa2_eth_seed_pool(priv, priv->bpid); - if (err) { - /* Not much to do; the buffer pool, though not filled up, - * may still contain some buffers which would enable us - * to limp on. - */ - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", - priv->dpbp_dev->obj_desc.id, priv->bpid); - } + dpaa2_eth_seed_pools(priv); + + mutex_lock(&priv->mac_lock); if (!dpaa2_eth_is_type_phy(priv)) { /* We'll only start the txqs when the link is actually ready; @@ -2075,20 +2202,21 @@ static int dpaa2_eth_open(struct net_device *net_dev) err = dpni_enable(priv->mc_io, 0, priv->mc_token); if (err < 0) { + mutex_unlock(&priv->mac_lock); netdev_err(net_dev, "dpni_enable() failed\n"); goto enable_err; } - if (dpaa2_eth_is_type_phy(priv)) { + if (dpaa2_eth_is_type_phy(priv)) dpaa2_mac_start(priv->mac); - phylink_start(priv->mac->phylink); - } + + mutex_unlock(&priv->mac_lock); return 0; enable_err: dpaa2_eth_disable_ch_napi(priv); - dpaa2_eth_drain_pool(priv); + dpaa2_eth_drain_pools(priv); return err; } @@ -2155,14 +2283,17 @@ static int dpaa2_eth_stop(struct net_device *net_dev) int dpni_enabled = 0; int retries = 10; + mutex_lock(&priv->mac_lock); + if (dpaa2_eth_is_type_phy(priv)) { - phylink_stop(priv->mac->phylink); dpaa2_mac_stop(priv->mac); } else { netif_tx_stop_all_queues(net_dev); netif_carrier_off(net_dev); } + mutex_unlock(&priv->mac_lock); + /* On dpni_disable(), the MC firmware will: * - stop MAC Rx and wait for all Rx frames to be enqueued to software * - cut off WRIOP dequeues from egress FQs and wait until transmission @@ -2193,7 +2324,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) dpaa2_eth_disable_ch_napi(priv); /* Empty the buffer pool */ - dpaa2_eth_drain_pool(priv); + dpaa2_eth_drain_pools(priv); /* Empty the Scatter-Gather Buffer cache */ dpaa2_eth_sgt_cache_drain(priv); @@ -2488,12 +2619,20 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct dpaa2_eth_priv *priv = netdev_priv(dev); + int err; if (cmd == SIOCSHWTSTAMP) return dpaa2_eth_ts_ioctl(dev, rq, cmd); - if (dpaa2_eth_is_type_phy(priv)) - return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) { + err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd); + mutex_unlock(&priv->mac_lock); + return err; + } + + mutex_unlock(&priv->mac_lock); return -EOPNOTSUPP; } @@ -2602,7 +2741,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) need_update = (!!priv->xdp_prog != !!prog); if (up) - dpaa2_eth_stop(dev); + dev_close(dev); /* While in xdp mode, enforce a maximum Rx frame size based on MTU. * Also, when switching between xdp/non-xdp modes we need to reconfigure @@ -2630,7 +2769,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) } if (up) { - err = dpaa2_eth_open(dev); + err = dev_open(dev, NULL); if (err) return err; } @@ -2641,7 +2780,7 @@ out_err: if (prog) bpf_prog_sub(prog, priv->num_channels); if (up) - dpaa2_eth_open(dev); + dev_open(dev, NULL); return err; } @@ -2651,6 +2790,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return dpaa2_eth_setup_xdp(dev, xdp->prog); + case XDP_SETUP_XSK_POOL: + return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } @@ -2881,6 +3022,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, + .ndo_xsk_wakeup = dpaa2_xsk_wakeup, .ndo_setup_tc = dpaa2_eth_setup_tc, .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid @@ -2895,7 +3037,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) /* Update NAPI statistics */ ch->stats.cdan++; - napi_schedule(&ch->napi); + /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed + * so that it can be rescheduled again. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); } /* Allocate and configure a DPCON object */ @@ -2908,10 +3054,12 @@ static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPCON, &dpcon); if (err) { - if (err == -ENXIO) + if (err == -ENXIO) { + dev_dbg(dev, "Waiting for DPCON\n"); err = -EPROBE_DEFER; - else + } else { dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + } return ERR_PTR(err); } @@ -3021,7 +3169,9 @@ static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) channel = dpaa2_eth_alloc_channel(priv); if (IS_ERR_OR_NULL(channel)) { err = PTR_ERR_OR_ZERO(channel); - if (err != -EPROBE_DEFER) + if (err == -EPROBE_DEFER) + dev_dbg(dev, "waiting for affine channel\n"); + else dev_info(dev, "No affine channel for cpu %d and above\n", i); goto err_alloc_ch; @@ -3204,13 +3354,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) dpaa2_eth_set_fq_affinity(priv); } -/* Allocate and configure one buffer pool for each interface */ -static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) +/* Allocate and configure a buffer pool */ +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv) { - int err; - struct fsl_mc_device *dpbp_dev; struct device *dev = priv->net_dev->dev.parent; + struct fsl_mc_device *dpbp_dev; struct dpbp_attr dpbp_attrs; + struct dpaa2_eth_bp *bp; + int err; err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, &dpbp_dev); @@ -3219,12 +3370,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) err = -EPROBE_DEFER; else dev_err(dev, "DPBP device allocation failed\n"); - return err; + return ERR_PTR(err); } - priv->dpbp_dev = dpbp_dev; + bp = kzalloc(sizeof(*bp), GFP_KERNEL); + if (!bp) { + err = -ENOMEM; + goto err_alloc; + } - err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, + err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id, &dpbp_dev->mc_handle); if (err) { dev_err(dev, "dpbp_open() failed\n"); @@ -3249,9 +3404,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) dev_err(dev, "dpbp_get_attributes() failed\n"); goto err_get_attr; } - priv->bpid = dpbp_attrs.bpid; - return 0; + bp->dev = dpbp_dev; + bp->bpid = dpbp_attrs.bpid; + + return bp; err_get_attr: dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); @@ -3259,17 +3416,58 @@ err_enable: err_reset: dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); err_open: + kfree(bp); +err_alloc: fsl_mc_object_free(dpbp_dev); - return err; + return ERR_PTR(err); } -static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) +static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv) { - dpaa2_eth_drain_pool(priv); - dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); - fsl_mc_object_free(priv->dpbp_dev); + struct dpaa2_eth_bp *bp; + int i; + + bp = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(bp)) + return PTR_ERR(bp); + + priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp; + priv->num_bps++; + + for (i = 0; i < priv->num_channels; i++) + priv->channel[i]->bp = bp; + + return 0; +} + +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp) +{ + int idx_bp; + + /* Find the index at which this BP is stored */ + for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++) + if (priv->bp[idx_bp] == bp) + break; + + /* Drain the pool and disable the associated MC object */ + dpaa2_eth_drain_pool(priv, bp->bpid); + dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle); + dpbp_close(priv->mc_io, 0, bp->dev->mc_handle); + fsl_mc_object_free(bp->dev); + kfree(bp); + + /* Move the last in use DPBP over in this position */ + priv->bp[idx_bp] = priv->bp[priv->num_bps - 1]; + priv->num_bps--; +} + +static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_bps; i++) + dpaa2_eth_free_dpbp(priv, priv->bp[i]); } static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) @@ -3610,7 +3808,7 @@ static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", priv->dpni_ver_major, priv->dpni_ver_minor, DPNI_VER_MAJOR, DPNI_VER_MINOR); - err = -ENOTSUPP; + err = -EOPNOTSUPP; goto close; } @@ -4154,15 +4352,16 @@ out: */ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) { + struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; struct net_device *net_dev = priv->net_dev; + struct dpni_pools_cfg pools_params = { 0 }; struct device *dev = net_dev->dev.parent; - struct dpni_pools_cfg pools_params; struct dpni_error_cfg err_cfg; int err = 0; int i; pools_params.num_dpbp = 1; - pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id; pools_params.pools[0].backup_pool = 0; pools_params.pools[0].buffer_size = priv->rx_buf_size; err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); @@ -4426,8 +4625,10 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0); - if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) + if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) { + netdev_dbg(priv->net_dev, "waiting for mac\n"); return PTR_ERR(dpmac_dev); + } if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) return 0; @@ -4443,22 +4644,29 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) err = dpaa2_mac_open(mac); if (err) goto err_free_mac; - priv->mac = mac; - if (dpaa2_eth_is_type_phy(priv)) { + if (dpaa2_mac_is_type_phy(mac)) { err = dpaa2_mac_connect(mac); - if (err && err != -EPROBE_DEFER) - netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe", - ERR_PTR(err)); - if (err) + if (err) { + if (err == -EPROBE_DEFER) + netdev_dbg(priv->net_dev, + "could not connect to MAC\n"); + else + netdev_err(priv->net_dev, + "Error connecting to the MAC endpoint: %pe", + ERR_PTR(err)); goto err_close_mac; + } } + mutex_lock(&priv->mac_lock); + priv->mac = mac; + mutex_unlock(&priv->mac_lock); + return 0; err_close_mac: dpaa2_mac_close(mac); - priv->mac = NULL; err_free_mac: kfree(mac); return err; @@ -4466,15 +4674,21 @@ err_free_mac: static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) { - if (dpaa2_eth_is_type_phy(priv)) - dpaa2_mac_disconnect(priv->mac); + struct dpaa2_mac *mac; + + mutex_lock(&priv->mac_lock); + mac = priv->mac; + priv->mac = NULL; + mutex_unlock(&priv->mac_lock); - if (!dpaa2_eth_has_mac(priv)) + if (!mac) return; - dpaa2_mac_close(priv->mac); - kfree(priv->mac); - priv->mac = NULL; + if (dpaa2_mac_is_type_phy(mac)) + dpaa2_mac_disconnect(mac); + + dpaa2_mac_close(mac); + kfree(mac); } static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) @@ -4484,6 +4698,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); struct net_device *net_dev = dev_get_drvdata(dev); struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + bool had_mac; int err; err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, @@ -4500,12 +4715,15 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); dpaa2_eth_update_tx_fqids(priv); - rtnl_lock(); - if (dpaa2_eth_has_mac(priv)) + /* We can avoid locking because the "endpoint changed" IRQ + * handler is the only one who changes priv->mac at runtime, + * so we are not racing with anyone. + */ + had_mac = !!priv->mac; + if (had_mac) dpaa2_eth_disconnect_mac(priv); else dpaa2_eth_connect_mac(priv); - rtnl_unlock(); } return IRQ_HANDLED; @@ -4601,6 +4819,9 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) priv = netdev_priv(net_dev); priv->net_dev = net_dev; + SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port); + + mutex_init(&priv->mac_lock); priv->iommu_domain = iommu_get_domain_for_dev(dev); @@ -4623,10 +4844,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &priv->mc_io); if (err) { - if (err == -ENXIO) + if (err == -ENXIO) { + dev_dbg(dev, "waiting for MC portal\n"); err = -EPROBE_DEFER; - else + } else { dev_err(dev, "MC portal allocation failed\n"); + } goto err_portal_alloc; } @@ -4641,7 +4864,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dpaa2_eth_setup_fqs(priv); - err = dpaa2_eth_setup_dpbp(priv); + err = dpaa2_eth_setup_default_dpbp(priv); if (err) goto err_dpbp_setup; @@ -4707,6 +4930,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } #endif + err = dpaa2_eth_connect_mac(priv); + if (err) + goto err_connect_mac; + err = dpaa2_eth_setup_irqs(dpni_dev); if (err) { netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); @@ -4719,10 +4946,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) priv->do_link_poll = true; } - err = dpaa2_eth_connect_mac(priv); - if (err) - goto err_connect_mac; - err = dpaa2_eth_dl_alloc(priv); if (err) goto err_dl_register; @@ -4756,13 +4979,13 @@ err_dl_port_add: err_dl_trap_register: dpaa2_eth_dl_free(priv); err_dl_register: - dpaa2_eth_disconnect_mac(priv); -err_connect_mac: if (priv->do_link_poll) kthread_stop(priv->poll_thread); else fsl_mc_free_irqs(dpni_dev); err_poll_thread: + dpaa2_eth_disconnect_mac(priv); +err_connect_mac: dpaa2_eth_free_rings(priv); err_alloc_rings: err_csum: @@ -4777,7 +5000,7 @@ err_alloc_percpu_extras: err_alloc_percpu_stats: dpaa2_eth_del_ch_napi(priv); err_bind: - dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpbps(priv); err_dpbp_setup: dpaa2_eth_free_dpio(priv); err_dpio_setup: @@ -4810,9 +5033,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) #endif unregister_netdev(net_dev); - rtnl_lock(); - dpaa2_eth_disconnect_mac(priv); - rtnl_unlock(); dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); @@ -4823,6 +5043,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) else fsl_mc_free_irqs(ls_dev); + dpaa2_eth_disconnect_mac(priv); dpaa2_eth_free_rings(priv); free_percpu(priv->fd); free_percpu(priv->sgt_cache); @@ -4830,7 +5051,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) free_percpu(priv->percpu_extras); dpaa2_eth_del_ch_napi(priv); - dpaa2_eth_free_dpbp(priv); + dpaa2_eth_free_dpbps(priv); dpaa2_eth_free_dpio(priv); dpaa2_eth_free_dpni(priv); if (priv->onestep_reg_base) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 447718483ef4..d56d7a13262e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #ifndef __DPAA2_ETH_H @@ -53,6 +53,12 @@ */ #define DPAA2_ETH_TXCONF_PER_NAPI 256 +/* Maximum number of Tx frames to be processed in a single NAPI + * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI + * to maximize the throughput. + */ +#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI + /* Buffer qouta per channel. We want to keep in check number of ingress frames * in flight: for small sized frames, congestion group taildrop may kick in * first; for large sizes, Rx FQ taildrop threshold will ensure only a @@ -109,6 +115,14 @@ #define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 #define DPAA2_ETH_RX_BUF_ALIGN 64 +/* The firmware allows assigning multiple buffer pools to a single DPNI - + * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for + * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs + * object: the default and 8 other distinct buffer pools, one for each queue. + */ +#define DPAA2_ETH_DEFAULT_BP_IDX 0 +#define DPAA2_ETH_MAX_BPS 9 + /* We are accommodating a skb backpointer and some S/G info * in the frame's software annotation. The hardware * options are either 0 or 64, so we choose the latter. @@ -122,6 +136,7 @@ enum dpaa2_eth_swa_type { DPAA2_ETH_SWA_SINGLE, DPAA2_ETH_SWA_SG, DPAA2_ETH_SWA_XDP, + DPAA2_ETH_SWA_XSK, DPAA2_ETH_SWA_SW_TSO, }; @@ -144,6 +159,10 @@ struct dpaa2_eth_swa { struct xdp_frame *xdpf; } xdp; struct { + struct xdp_buff *xdp_buff; + int sgt_size; + } xsk; + struct { struct sk_buff *skb; int num_sg; int sgt_size; @@ -421,12 +440,19 @@ enum dpaa2_eth_fq_type { }; struct dpaa2_eth_priv; +struct dpaa2_eth_channel; +struct dpaa2_eth_fq; struct dpaa2_eth_xdp_fds { struct dpaa2_fd fds[DEV_MAP_BULK_SIZE]; ssize_t num; }; +typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; @@ -439,10 +465,7 @@ struct dpaa2_eth_fq { struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; - void (*consume)(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq); + dpaa2_eth_consume_cb_t *consume; struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_xdp_fds xdp_redirect_fds; @@ -454,6 +477,11 @@ struct dpaa2_eth_ch_xdp { unsigned int res; }; +struct dpaa2_eth_bp { + struct fsl_mc_device *dev; + int bpid; +}; + struct dpaa2_eth_channel { struct dpaa2_io_notification_ctx nctx; struct fsl_mc_device *dpcon; @@ -472,6 +500,11 @@ struct dpaa2_eth_channel { /* Buffers to be recycled back in the buffer pool */ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD]; int recycled_bufs_cnt; + + bool xsk_zc; + int xsk_tx_pkts_sent; + struct xsk_buff_pool *xsk_pool; + struct dpaa2_eth_bp *bp; }; struct dpaa2_eth_dist_fields { @@ -506,7 +539,7 @@ struct dpaa2_eth_trap_data { #define DPAA2_ETH_DEFAULT_COPYBREAK 512 -#define DPAA2_ETH_ENQUEUE_MAX_FDS 200 +#define DPAA2_ETH_ENQUEUE_MAX_FDS 256 struct dpaa2_eth_fds { struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS]; }; @@ -535,14 +568,16 @@ struct dpaa2_eth_priv { u8 ptp_correction_off; void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv, u32 offset, u8 udp); - struct fsl_mc_device *dpbp_dev; u16 rx_buf_size; - u16 bpid; struct iommu_domain *iommu_domain; enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */ bool rx_tstamp; /* Rx timestamping enabled */ + /* Buffer pool management */ + struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS]; + int num_bps; + u16 tx_qdid; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. @@ -580,6 +615,8 @@ struct dpaa2_eth_priv { #endif struct dpaa2_mac *mac; + /* Serializes changes to priv->mac */ + struct mutex mac_lock; struct workqueue_struct *dpaa2_ptp_wq; struct work_struct tx_onestep_tstamp; struct sk_buff_head tx_skbs; @@ -733,16 +770,15 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv) { - if (priv->mac && - (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || - priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) - return true; + lockdep_assert_held(&priv->mac_lock); - return false; + return dpaa2_mac_is_type_phy(priv->mac); } static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv) { + lockdep_assert_held(&priv->mac_lock); + return priv->mac ? true : false; } @@ -771,4 +807,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv); struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv, struct dpaa2_fapr *fapr); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp); + +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr); + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb); + +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_bp *bp); + +void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr); +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr); + +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id); + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid); + +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi); +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch); + +/* SGT (Scatter-Gather Table) cache management */ +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf); + #endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index eea7d7a07c00..e80e9388c71f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016 NXP - * Copyright 2020 NXP + * Copyright 2016-2022 NXP */ #include <linux/net_tstamp.h> @@ -86,11 +85,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, static int dpaa2_eth_nway_reset(struct net_device *net_dev) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = -EOPNOTSUPP; + + mutex_lock(&priv->mac_lock); if (dpaa2_eth_is_type_phy(priv)) - return phylink_ethtool_nway_reset(priv->mac->phylink); + err = phylink_ethtool_nway_reset(priv->mac->phylink); + + mutex_unlock(&priv->mac_lock); - return -EOPNOTSUPP; + return err; } static int @@ -98,10 +102,18 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev, struct ethtool_link_ksettings *link_settings) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; - if (dpaa2_eth_is_type_phy(priv)) - return phylink_ethtool_ksettings_get(priv->mac->phylink, - link_settings); + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) { + err = phylink_ethtool_ksettings_get(priv->mac->phylink, + link_settings); + mutex_unlock(&priv->mac_lock); + return err; + } + + mutex_unlock(&priv->mac_lock); link_settings->base.autoneg = AUTONEG_DISABLE; if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX)) @@ -116,11 +128,17 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev, const struct ethtool_link_ksettings *link_settings) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = -EOPNOTSUPP; + + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) + err = phylink_ethtool_ksettings_set(priv->mac->phylink, + link_settings); - if (!dpaa2_eth_is_type_phy(priv)) - return -ENOTSUPP; + mutex_unlock(&priv->mac_lock); - return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings); + return err; } static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, @@ -129,11 +147,16 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, struct dpaa2_eth_priv *priv = netdev_priv(net_dev); u64 link_options = priv->link_state.options; + mutex_lock(&priv->mac_lock); + if (dpaa2_eth_is_type_phy(priv)) { phylink_ethtool_get_pauseparam(priv->mac->phylink, pause); + mutex_unlock(&priv->mac_lock); return; } + mutex_unlock(&priv->mac_lock); + pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options); pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options); pause->autoneg = AUTONEG_DISABLE; @@ -152,9 +175,17 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, return -EOPNOTSUPP; } - if (dpaa2_eth_is_type_phy(priv)) - return phylink_ethtool_set_pauseparam(priv->mac->phylink, - pause); + mutex_lock(&priv->mac_lock); + + if (dpaa2_eth_is_type_phy(priv)) { + err = phylink_ethtool_set_pauseparam(priv->mac->phylink, + pause); + mutex_unlock(&priv->mac_lock); + return err; + } + + mutex_unlock(&priv->mac_lock); + if (pause->autoneg) return -EOPNOTSUPP; @@ -186,7 +217,6 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev, static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct dpaa2_eth_priv *priv = netdev_priv(netdev); u8 *p = data; int i; @@ -200,22 +230,17 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (dpaa2_eth_has_mac(priv)) - dpaa2_mac_get_strings(p); + dpaa2_mac_get_strings(p); break; } } static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) { - int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - switch (sset) { case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ - if (dpaa2_eth_has_mac(priv)) - num_ss_stats += dpaa2_mac_get_sset_count(); - return num_ss_stats; + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS + + dpaa2_mac_get_sset_count(); default: return -EOPNOTSUPP; } @@ -227,17 +252,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { - int i = 0; - int j, k, err; - int num_cnt; - union dpni_statistics dpni_stats; - u32 fcnt, bcnt; - u32 fcnt_rx_total = 0, fcnt_tx_total = 0; - u32 bcnt_rx_total = 0, bcnt_tx_total = 0; - u32 buf_cnt; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct dpaa2_eth_drv_stats *extras; - struct dpaa2_eth_ch_stats *ch_stats; + union dpni_statistics dpni_stats; int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { sizeof(dpni_stats.page_0), sizeof(dpni_stats.page_1), @@ -247,6 +263,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, sizeof(dpni_stats.page_5), sizeof(dpni_stats.page_6), }; + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; + struct dpaa2_eth_ch_stats *ch_stats; + struct dpaa2_eth_drv_stats *extras; + u32 buf_cnt, buf_cnt_total = 0; + int j, k, err, num_cnt, i = 0; + u32 fcnt, bcnt; memset(data, 0, sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); @@ -308,15 +331,22 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, *(data + i++) = fcnt_tx_total; *(data + i++) = bcnt_tx_total; - err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); - if (err) { - netdev_warn(net_dev, "Buffer count query error %d\n", err); - return; + for (j = 0; j < priv->num_bps; j++) { + err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt); + if (err) { + netdev_warn(net_dev, "Buffer count query error %d\n", err); + return; + } + buf_cnt_total += buf_cnt; } - *(data + i++) = buf_cnt; + *(data + i++) = buf_cnt_total; + + mutex_lock(&priv->mac_lock); if (dpaa2_eth_has_mac(priv)) dpaa2_mac_get_ethtool_stats(priv->mac, data + i); + + mutex_unlock(&priv->mac_lock); } static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, @@ -876,6 +906,29 @@ restore_rx_usecs: return err; } +static void dpaa2_eth_get_channels(struct net_device *net_dev, + struct ethtool_channels *channels) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int queue_count = dpaa2_eth_queue_count(priv); + + channels->max_rx = queue_count; + channels->max_tx = queue_count; + channels->rx_count = queue_count; + channels->tx_count = queue_count; + + /* Tx confirmation and Rx error */ + channels->max_other = queue_count + 1; + channels->max_combined = channels->max_rx + + channels->max_tx + + channels->max_other; + /* Tx conf and Rx err */ + channels->other_count = queue_count + 1; + channels->combined_count = channels->rx_count + + channels->tx_count + + channels->other_count; +} + const struct ethtool_ops dpaa2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, @@ -896,4 +949,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = { .set_tunable = dpaa2_eth_set_tunable, .get_coalesce = dpaa2_eth_get_coalesce, .set_coalesce = dpaa2_eth_set_coalesce, + .get_channels = dpaa2_eth_get_channels, }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index 49ff85633783..c886f33f8c6f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -105,6 +105,7 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev, * thus the fwnode field is not yet set. Defer probe if we are * facing this situation. */ + dev_dbg(dev, "dprc not finished probing\n"); return ERR_PTR(-EPROBE_DEFER); } @@ -235,7 +236,6 @@ static void dpaa2_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops dpaa2_mac_phylink_ops = { - .validate = phylink_generic_validate, .mac_select_pcs = dpaa2_mac_select_pcs, .mac_config = dpaa2_mac_config, .mac_link_up = dpaa2_mac_link_up, @@ -264,8 +264,10 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac, mdiodev = fwnode_mdio_find_device(node); fwnode_handle_put(node); - if (!mdiodev) + if (!mdiodev) { + netdev_dbg(mac->net_dev, "missing PCS device\n"); return -EPROBE_DEFER; + } mac->pcs = lynx_pcs_create(mdiodev); if (!mac->pcs) { @@ -336,12 +338,20 @@ static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac) void dpaa2_mac_start(struct dpaa2_mac *mac) { + ASSERT_RTNL(); + if (mac->serdes_phy) phy_power_on(mac->serdes_phy); + + phylink_start(mac->phylink); } void dpaa2_mac_stop(struct dpaa2_mac *mac) { + ASSERT_RTNL(); + + phylink_stop(mac->phylink); + if (mac->serdes_phy) phy_power_off(mac->serdes_phy); } @@ -420,7 +430,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac) } mac->phylink = phylink; + rtnl_lock(); err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0); + rtnl_unlock(); if (err) { netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err); goto err_phylink_destroy; @@ -438,10 +450,10 @@ err_pcs_destroy: void dpaa2_mac_disconnect(struct dpaa2_mac *mac) { - if (!mac->phylink) - return; - + rtnl_lock(); phylink_disconnect_phy(mac->phylink); + rtnl_unlock(); + phylink_destroy(mac->phylink); dpaa2_pcs_destroy(mac); of_phy_put(mac->serdes_phy); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h index a58cab188a99..c1ec9efd413a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h @@ -30,8 +30,14 @@ struct dpaa2_mac { struct phy *serdes_phy; }; -bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev, - struct fsl_mc_io *mc_io); +static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac) +{ + if (!mac) + return false; + + return mac->attr.link_type == DPMAC_LINK_TYPE_PHY || + mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE; +} int dpaa2_mac_open(struct dpaa2_mac *mac); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index c8cb541572ff..90d23ab1ce9d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -8,7 +8,6 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/msi.h> #include <linux/fsl/mc.h> #include "dpaa2-ptp.h" diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 720c9230cab5..6bc1988be311 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev, { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct dpsw_link_state state = {0}; - int err = 0; + int err; + + mutex_lock(&port_priv->mac_lock); + + if (dpaa2_switch_port_is_type_phy(port_priv)) { + err = phylink_ethtool_ksettings_get(port_priv->mac->phylink, + link_ksettings); + mutex_unlock(&port_priv->mac_lock); + return err; + } - if (dpaa2_switch_port_is_type_phy(port_priv)) - return phylink_ethtool_ksettings_get(port_priv->mac->phylink, - link_ksettings); + mutex_unlock(&port_priv->mac_lock); err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, @@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, bool if_running; int err = 0, ret; - if (dpaa2_switch_port_is_type_phy(port_priv)) - return phylink_ethtool_ksettings_set(port_priv->mac->phylink, - link_ksettings); + mutex_lock(&port_priv->mac_lock); + + if (dpaa2_switch_port_is_type_phy(port_priv)) { + err = phylink_ethtool_ksettings_set(port_priv->mac->phylink, + link_ksettings); + mutex_unlock(&port_priv->mac_lock); + return err; + } + + mutex_unlock(&port_priv->mac_lock); /* Interface needs to be down to change link settings */ if_running = netif_running(netdev); @@ -145,14 +159,9 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, static int dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) { - struct ethsw_port_priv *port_priv = netdev_priv(netdev); - int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS; - switch (sset) { case ETH_SS_STATS: - if (port_priv->mac) - num_ss_stats += dpaa2_mac_get_sset_count(); - return num_ss_stats; + return DPAA2_SWITCH_NUM_COUNTERS + dpaa2_mac_get_sset_count(); default: return -EOPNOTSUPP; } @@ -161,7 +170,6 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct ethsw_port_priv *port_priv = netdev_priv(netdev); u8 *p = data; int i; @@ -172,8 +180,7 @@ static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - if (port_priv->mac) - dpaa2_mac_get_strings(p); + dpaa2_mac_get_strings(p); break; } } @@ -196,8 +203,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev, dpaa2_switch_ethtool_counters[i].name, err); } - if (port_priv->mac) + mutex_lock(&port_priv->mac_lock); + + if (dpaa2_switch_port_has_mac(port_priv)) dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i); + + mutex_unlock(&port_priv->mac_lock); } const struct ethtool_ops dpaa2_switch_port_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 2b5909fa93cf..f4ae4289c41a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/msi.h> #include <linux/kthread.h> #include <linux/workqueue.h> #include <linux/iommu.h> @@ -603,8 +602,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev) /* When we manage the MAC/PHY using phylink there is no need * to manually update the netif_carrier. + * We can avoid locking because we are called from the "link changed" + * IRQ handler, which is the same as the "endpoint changed" IRQ handler + * (the writer to port_priv->mac), so we cannot race with it. */ - if (dpaa2_switch_port_is_type_phy(port_priv)) + if (dpaa2_mac_is_type_phy(port_priv->mac)) return 0; /* Interrupts are received even though no one issued an 'ifconfig up' @@ -684,6 +686,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; + mutex_lock(&port_priv->mac_lock); + if (!dpaa2_switch_port_is_type_phy(port_priv)) { /* Explicitly set carrier off, otherwise * netif_carrier_ok() will return true and cause 'ip link show' @@ -697,16 +701,17 @@ static int dpaa2_switch_port_open(struct net_device *netdev) port_priv->ethsw_data->dpsw_handle, port_priv->idx); if (err) { + mutex_unlock(&port_priv->mac_lock); netdev_err(netdev, "dpsw_if_enable err %d\n", err); return err; } dpaa2_switch_enable_ctrl_if_napi(ethsw); - if (dpaa2_switch_port_is_type_phy(port_priv)) { + if (dpaa2_switch_port_is_type_phy(port_priv)) dpaa2_mac_start(port_priv->mac); - phylink_start(port_priv->mac->phylink); - } + + mutex_unlock(&port_priv->mac_lock); return 0; } @@ -717,14 +722,17 @@ static int dpaa2_switch_port_stop(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; + mutex_lock(&port_priv->mac_lock); + if (dpaa2_switch_port_is_type_phy(port_priv)) { - phylink_stop(port_priv->mac->phylink); dpaa2_mac_stop(port_priv->mac); } else { netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); } + mutex_unlock(&port_priv->mac_lock); + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, port_priv->idx); @@ -1453,9 +1461,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) err = dpaa2_mac_open(mac); if (err) goto err_free_mac; - port_priv->mac = mac; - if (dpaa2_switch_port_is_type_phy(port_priv)) { + if (dpaa2_mac_is_type_phy(mac)) { err = dpaa2_mac_connect(mac); if (err) { netdev_err(port_priv->netdev, @@ -1465,11 +1472,14 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) } } + mutex_lock(&port_priv->mac_lock); + port_priv->mac = mac; + mutex_unlock(&port_priv->mac_lock); + return 0; err_close_mac: dpaa2_mac_close(mac); - port_priv->mac = NULL; err_free_mac: kfree(mac); return err; @@ -1477,15 +1487,21 @@ err_free_mac: static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) { - if (dpaa2_switch_port_is_type_phy(port_priv)) - dpaa2_mac_disconnect(port_priv->mac); + struct dpaa2_mac *mac; + + mutex_lock(&port_priv->mac_lock); + mac = port_priv->mac; + port_priv->mac = NULL; + mutex_unlock(&port_priv->mac_lock); - if (!dpaa2_switch_port_has_mac(port_priv)) + if (!mac) return; - dpaa2_mac_close(port_priv->mac); - kfree(port_priv->mac); - port_priv->mac = NULL; + if (dpaa2_mac_is_type_phy(mac)) + dpaa2_mac_disconnect(mac); + + dpaa2_mac_close(mac); + kfree(mac); } static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) @@ -1495,6 +1511,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) struct ethsw_port_priv *port_priv; u32 status = ~0; int err, if_id; + bool had_mac; err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, DPSW_IRQ_INDEX_IF, &status); @@ -1512,12 +1529,15 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) } if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { - rtnl_lock(); - if (dpaa2_switch_port_has_mac(port_priv)) + /* We can avoid locking because the "endpoint changed" IRQ + * handler is the only one who changes priv->mac at runtime, + * so we are not racing with anyone. + */ + had_mac = !!port_priv->mac; + if (had_mac) dpaa2_switch_port_disconnect_mac(port_priv); else dpaa2_switch_port_connect_mac(port_priv); - rtnl_unlock(); } out: @@ -2935,9 +2955,7 @@ static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, { struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; - rtnl_lock(); dpaa2_switch_port_disconnect_mac(port_priv); - rtnl_unlock(); free_netdev(port_priv->netdev); ethsw->ports[port_idx] = NULL; } @@ -3256,6 +3274,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, port_priv->netdev = port_netdev; port_priv->ethsw_data = ethsw; + mutex_init(&port_priv->mac_lock); + port_priv->idx = port_idx; port_priv->stp_state = BR_STATE_FORWARDING; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h index 0002dca4d417..42b3ca73f55d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -161,6 +161,8 @@ struct ethsw_port_priv { struct dpaa2_switch_filter_block *filter_block; struct dpaa2_mac *mac; + /* Protects against changes to port_priv->mac */ + struct mutex mac_lock; }; /* Switch data */ @@ -230,12 +232,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) static inline bool dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv) { - if (port_priv->mac && - (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || - port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) - return true; - - return false; + return dpaa2_mac_is_type_phy(port_priv->mac); } static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c new file mode 100644 index 000000000000..051748b997f3 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2022 NXP + */ +#include <linux/filter.h> +#include <linux/compiler.h> +#include <linux/bpf_trace.h> +#include <net/xdp.h> +#include <net/xdp_sock_drv.h> + +#include "dpaa2-eth.h" + +static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type type, + dpaa2_eth_consume_cb_t *consume) +{ + struct dpaa2_eth_fq *fq; + int i; + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + + if (fq->type != type) + continue; + if (fq->channel != ch) + continue; + + fq->consume = consume; + } +} + +static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + struct bpf_prog *xdp_prog; + struct xdp_buff *xdp_buff; + struct dpaa2_eth_swa *swa; + u32 xdp_act = XDP_PASS; + int err; + + xdp_prog = READ_ONCE(ch->xdp.prog); + if (!xdp_prog) + goto out; + + swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE + + ch->xsk_pool->umem->headroom); + xdp_buff = swa->xsk.xdp_buff; + + xdp_buff->data_hard_start = vaddr; + xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd); + xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd); + xdp_set_data_meta_invalid(xdp_buff); + xdp_buff->rxq = &ch->xdp_rxq; + + xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool); + xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff); + + /* xdp.data pointer may have changed */ + dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr); + dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data); + + if (likely(xdp_act == XDP_REDIRECT)) { + err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog); + if (unlikely(err)) { + ch->stats.xdp_drop++; + dpaa2_eth_recycle_buf(priv, ch, addr); + } else { + ch->buf_count--; + ch->stats.xdp_redirect++; + } + + goto xdp_redir; + } + + switch (xdp_act) { + case XDP_PASS: + break; + case XDP_TX: + dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); + break; + default: + bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + dpaa2_eth_recycle_buf(priv, ch, addr); + ch->stats.xdp_drop++; + break; + } + +xdp_redir: + ch->xdp.res |= xdp_act; +out: + return xdp_act; +} + +/* Rx frame processing routine for the AF_XDP fast path */ +static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct rtnl_link_stats64 *percpu_stats; + u32 fd_length = dpaa2_fd_get_len(fd); + struct sk_buff *skb; + void *vaddr; + u32 xdp_act; + + trace_dpaa2_rx_xsk_fd(priv->net_dev, fd); + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + percpu_stats = this_cpu_ptr(priv->percpu_stats); + + if (fd_format != dpaa2_fd_single) { + WARN_ON(priv->xdp_prog); + /* AF_XDP doesn't support any other formats */ + goto err_frame_format; + } + + xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + if (xdp_act != XDP_PASS) { + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + return; + } + + /* Build skb */ + skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr); + if (!skb) + /* Nothing else we can do, recycle the buffer and + * drop the frame. + */ + goto err_alloc_skb; + + /* Send the skb to the Linux networking stack */ + dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); + + return; + +err_alloc_skb: + dpaa2_eth_recycle_buf(priv, ch, addr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv, + struct dpni_pools_cfg *pools_params) +{ + int curr_bp = 0, i, j; + + pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN; + for (i = 0; i < priv->num_bps; i++) { + for (j = 0; j < priv->num_channels; j++) + if (priv->bp[i] == priv->channel[j]->bp) + pools_params->pools[curr_bp].priority_mask |= (1 << j); + if (!pools_params->pools[curr_bp].priority_mask) + continue; + + pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; + pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; + pools_params->pools[curr_bp++].backup_pool = 0; + } + pools_params->num_dpbp = curr_bp; +} + +static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid); + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err; + bool up; + + ch = priv->channel[qid]; + if (!ch->xsk_pool) + return -EINVAL; + + up = netif_running(dev); + if (up) + dev_close(dev); + + xsk_pool_dma_unmap(pool, 0); + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n", + err); + + dpaa2_eth_free_dpbp(priv, ch->bp); + + ch->xsk_zc = false; + ch->xsk_pool = NULL; + ch->xsk_tx_pkts_sent = 0; + ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) + netdev_err(dev, "dpni_set_pools() failed\n"); + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_xsk_enable_pool(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpni_pools_cfg pools_params = { 0 }; + struct dpaa2_eth_channel *ch; + int err, err2; + bool up; + + if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) { + netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n"); + return -EOPNOTSUPP; + } + + if (priv->dpni_attrs.num_queues > 8) { + netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n"); + return -EOPNOTSUPP; + } + + up = netif_running(dev); + if (up) + dev_close(dev); + + err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0); + if (err) { + netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n", + err); + goto err_dma_unmap; + } + + ch = priv->channel[qid]; + err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err) { + netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err); + goto err_mem_model; + } + xsk_pool_set_rxq_info(pool, &ch->xdp_rxq); + + priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv); + if (IS_ERR(priv->bp[priv->num_bps])) { + err = PTR_ERR(priv->bp[priv->num_bps]); + goto err_bp_alloc; + } + ch->xsk_zc = true; + ch->xsk_pool = pool; + ch->bp = priv->bp[priv->num_bps++]; + + dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx); + + dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params); + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + netdev_err(dev, "dpni_set_pools() failed\n"); + goto err_set_pools; + } + + if (up) { + err = dev_open(dev, NULL); + if (err) + return err; + } + + return 0; + +err_set_pools: + err2 = dpaa2_xsk_disable_pool(dev, qid); + if (err2) + netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2); +err_bp_alloc: + err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err2) + netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2); +err_mem_model: + xsk_pool_dma_unmap(pool, 0); +err_dma_unmap: + if (up) + dev_open(dev, NULL); + + return err; +} + +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid) +{ + return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) : + dpaa2_xsk_disable_pool(dev, qid); +} + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_eth_channel *ch = priv->channel[qid]; + + if (!priv->link_state.up) + return -ENETDOWN; + + if (!priv->xdp_prog) + return -EINVAL; + + if (!ch->xsk_zc) + return -EINVAL; + + /* We do not have access to a per channel SW interrupt, so instead we + * schedule a NAPI instance. + */ + if (!napi_if_scheduled_mark_missed(&ch->napi)) + napi_schedule(&ch->napi); + + return 0; +} + +static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + struct xdp_desc *xdp_desc) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_sg_entry *sgt; + struct dpaa2_eth_swa *swa; + void *sgt_buf = NULL; + dma_addr_t sgt_addr; + int sgt_buf_size; + dma_addr_t addr; + int err = 0; + + /* Prepare the HW SGT structure */ + sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); + sgt_buf = dpaa2_eth_sgt_get(priv); + if (unlikely(!sgt_buf)) + return -ENOMEM; + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + /* Get the address of the XSK Tx buffer */ + addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr); + xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len); + + /* Fill in the HW SGT structure */ + dpaa2_sg_set_addr(sgt, addr); + dpaa2_sg_set_len(sgt, xdp_desc->len); + dpaa2_sg_set_final(sgt, true); + + /* Store the necessary info in the SGT buffer */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->type = DPAA2_ETH_SWA_XSK; + swa->xsk.sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, sgt_addr))) { + err = -ENOMEM; + goto sgt_map_failed; + } + + /* Initialize FD fields */ + memset(fd, 0, sizeof(struct dpaa2_fd)); + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, sgt_addr); + dpaa2_fd_set_len(fd, xdp_desc->len); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); + + return 0; + +sgt_map_failed: + dpaa2_eth_sgt_recycle(priv, sgt_buf); + + return err; +} + +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) +{ + struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs; + struct dpaa2_eth_drv_stats *percpu_extras; + struct rtnl_link_stats64 *percpu_stats; + int budget = DPAA2_ETH_TX_ZC_PER_NAPI; + int total_enqueued, enqueued; + int retries, max_retries; + struct dpaa2_eth_fq *fq; + struct dpaa2_fd *fds; + int batch, i, err; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + fds = (this_cpu_ptr(priv->fd))->array; + + /* Use the FQ with the same idx as the affine CPU */ + fq = &priv->fq[ch->nctx.desired_cpu]; + + batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); + if (!batch) + return false; + + /* Create a FD for each XSK frame to be sent */ + for (i = 0; i < batch; i++) { + err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]); + if (err) { + batch = i; + break; + } + + trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]); + } + + /* Enqueue all the created FDs */ + max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES; + total_enqueued = 0; + enqueued = 0; + retries = 0; + while (total_enqueued < batch && retries < max_retries) { + err = priv->enqueue(priv, fq, &fds[total_enqueued], 0, + batch - total_enqueued, &enqueued); + if (err == -EBUSY) { + retries++; + continue; + } + + total_enqueued += enqueued; + } + percpu_extras->tx_portal_busy += retries; + + /* Update statistics */ + percpu_stats->tx_packets += total_enqueued; + for (i = 0; i < total_enqueued; i++) + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); + for (i = total_enqueued; i < batch; i++) { + dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false); + percpu_stats->tx_errors++; + } + + xsk_tx_release(ch->xsk_pool); + + return total_enqueued == budget; +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index 828f538097af..be9492b8d5dc 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -13,10 +13,12 @@ #define DPNI_VER_MINOR 0 #define DPNI_CMD_BASE_VERSION 1 #define DPNI_CMD_2ND_VERSION 2 +#define DPNI_CMD_3RD_VERSION 3 #define DPNI_CMD_ID_OFFSET 4 #define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) #define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION) +#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION) #define DPNI_CMDID_OPEN DPNI_CMD(0x801) #define DPNI_CMDID_CLOSE DPNI_CMD(0x800) @@ -39,7 +41,7 @@ #define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) #define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) -#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200) #define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) #define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) @@ -115,14 +117,19 @@ struct dpni_cmd_open { }; #define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) + +struct dpni_cmd_pool { + __le16 dpbp_id; + u8 priority_mask; + u8 pad; +}; + struct dpni_cmd_set_pools { - /* cmd word 0 */ u8 num_dpbp; u8 backup_pool_mask; - __le16 pad; - /* cmd word 0..4 */ - __le32 dpbp_id[DPNI_MAX_DPBP]; - /* cmd word 4..6 */ + u8 pad; + u8 pool_options; + struct dpni_cmd_pool pool[DPNI_MAX_DPBP]; __le16 buffer_size[DPNI_MAX_DPBP]; }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 6c3b36f20fb8..02601a283b59 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io, token); cmd_params = (struct dpni_cmd_set_pools *)cmd.params; cmd_params->num_dpbp = cfg->num_dpbp; + cmd_params->pool_options = cfg->pool_options; for (i = 0; i < DPNI_MAX_DPBP; i++) { - cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; cmd_params->buffer_size[i] = cpu_to_le16(cfg->pools[i].buffer_size); cmd_params->backup_pool_mask |= diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 6fffd519aa00..5c0a1d5ac934 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +#define DPNI_POOL_ASSOC_QPRI 0 +#define DPNI_POOL_ASSOC_QDBIN 1 + /** * struct dpni_pools_cfg - Structure representing buffer pools configuration * @num_dpbp: Number of DPBPs + * @pool_options: Buffer assignment options. + * This field is a combination of DPNI_POOL_ASSOC_flags * @pools: Array of buffer pools parameters; The number of valid entries * must match 'num_dpbp' value * @pools.dpbp_id: DPBP object ID + * @pools.priority: Priority mask that indicates TC's used with this buffer. + * If set to 0x00 MC will assume value 0xff. * @pools.buffer_size: Buffer size * @pools.backup_pool: Backup pool */ struct dpni_pools_cfg { u8 num_dpbp; + u8 pool_options; struct { int dpbp_id; + u8 priority_mask; u16 buffer_size; int backup_pool; } pools[DPNI_MAX_DPBP]; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index bdf94335ee99..9f6c4f5c0a6c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1111,7 +1111,6 @@ static void enetc_pl_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops enetc_mac_phylink_ops = { - .validate = phylink_generic_validate, .mac_select_pcs = enetc_pl_mac_select_pcs, .mac_config = enetc_pl_mac_config, .mac_link_up = enetc_pl_mac_link_up, diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 33f84a30e167..5ba1e0d71c68 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -348,7 +348,6 @@ struct bufdesc_ex { */ #define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM) - #define FEC_ENET_RX_PAGES 256 #define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -527,6 +526,19 @@ struct fec_enet_priv_txrx_info { struct sk_buff *skb; }; +enum { + RX_XDP_REDIRECT = 0, + RX_XDP_PASS, + RX_XDP_DROP, + RX_XDP_TX, + RX_XDP_TX_ERRORS, + TX_XDP_XMIT, + TX_XDP_XMIT_ERRORS, + + /* The following must be the last one */ + XDP_STATS_TOTAL, +}; + struct fec_enet_priv_tx_q { struct bufdesc_prop bd; unsigned char *tx_bounce[TX_RING_SIZE]; @@ -547,6 +559,7 @@ struct fec_enet_priv_rx_q { /* page_pool */ struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; + u32 stats[XDP_STATS_TOTAL]; /* rx queue number, in the range 0-7 */ u8 id; @@ -658,9 +671,14 @@ struct fec_enet_private { unsigned int reload_period; int pps_enable; unsigned int next_counter; + struct hrtimer perout_timer; + u64 perout_stime; struct imx_sc_ipc *ipc_handle; + /* XDP BPF Program */ + struct bpf_prog *xdp_prog; + u64 ethtool_stats[]; }; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 23e1a94b9ce4..5528b0af82ae 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -89,6 +89,11 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; #define FEC_ENET_OPD_V 0xFFF0 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ +#define FEC_ENET_XDP_PASS 0 +#define FEC_ENET_XDP_CONSUMED BIT(0) +#define FEC_ENET_XDP_TX BIT(1) +#define FEC_ENET_XDP_REDIR BIT(2) + struct fec_devinfo { u32 quirks; }; @@ -365,16 +370,6 @@ static void swap_buffer(void *bufaddr, int len) swab32s(buf); } -static void swap_buffer2(void *dst_buf, void *src_buf, int len) -{ - int i; - unsigned int *src = src_buf; - unsigned int *dst = dst_buf; - - for (i = 0; i < len; i += 4, src++, dst++) - *dst = swab32p(src); -} - static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -428,13 +423,14 @@ static int fec_enet_create_page_pool(struct fec_enet_private *fep, struct fec_enet_priv_rx_q *rxq, int size) { + struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); struct page_pool_params pp_params = { .order = 0, .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = size, .nid = dev_to_node(&fep->pdev->dev), .dev = &fep->pdev->dev, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, .offset = FEC_ENET_XDP_HEADROOM, .max_len = FEC_ENET_RX_FRSIZE, }; @@ -1494,53 +1490,6 @@ static void fec_enet_tx(struct net_device *ndev) fec_enet_tx_queue(ndev, i); } -static int __maybe_unused -fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - int off; - - off = ((unsigned long)skb->data) & fep->rx_align; - if (off) - skb_reserve(skb, fep->rx_align + 1 - off); - - bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); - if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { - if (net_ratelimit()) - netdev_err(ndev, "Rx DMA memory map failed\n"); - return -ENOMEM; - } - - return 0; -} - -static bool __maybe_unused -fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, - struct bufdesc *bdp, u32 length, bool swap) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct sk_buff *new_skb; - - if (length > fep->rx_copybreak) - return false; - - new_skb = netdev_alloc_skb(ndev, length); - if (!new_skb) - return false; - - dma_sync_single_for_cpu(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - if (!swap) - memcpy(new_skb->data, (*skb)->data, length); - else - swap_buffer2(new_skb->data, (*skb)->data, length); - *skb = new_skb; - - return true; -} - static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, struct bufdesc *bdp, int index) { @@ -1556,6 +1505,62 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); } +static u32 +fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, + struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) +{ + unsigned int sync, len = xdp->data_end - xdp->data; + u32 ret = FEC_ENET_XDP_PASS; + struct page *page; + int err; + u32 act; + + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + rxq->stats[RX_XDP_PASS]++; + ret = FEC_ENET_XDP_PASS; + break; + + case XDP_REDIRECT: + rxq->stats[RX_XDP_REDIRECT]++; + err = xdp_do_redirect(fep->netdev, xdp, prog); + if (!err) { + ret = FEC_ENET_XDP_REDIR; + } else { + ret = FEC_ENET_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + } + break; + + default: + bpf_warn_invalid_xdp_action(fep->netdev, prog, act); + fallthrough; + + case XDP_TX: + bpf_warn_invalid_xdp_action(fep->netdev, prog, act); + fallthrough; + + case XDP_ABORTED: + fallthrough; /* handle aborts by dropping packet */ + + case XDP_DROP: + rxq->stats[RX_XDP_DROP]++; + ret = FEC_ENET_XDP_CONSUMED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(rxq->page_pool, page, sync, true); + break; + } + + return ret; +} + /* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, @@ -1577,7 +1582,22 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) u16 vlan_tag; int index = 0; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); + u32 ret, xdp_result = FEC_ENET_XDP_PASS; + u32 data_start = FEC_ENET_XDP_HEADROOM; + struct xdp_buff xdp; struct page *page; + u32 sub_len = 4; + +#if !defined(CONFIG_M5272) + /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of + * FEC_RACC_SHIFT16 is set by default in the probe function. + */ + if (fep->quirks & FEC_QUIRK_HAS_RACC) { + data_start += 2; + sub_len += 2; + } +#endif #ifdef CONFIG_M532x flush_cache_all(); @@ -1588,6 +1608,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) * These get messed up if we get called due to a busy condition. */ bdp = rxq->bd.cur; + xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1637,23 +1658,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(page_address(page)); fec_enet_update_cbd(rxq, bdp, index); + if (xdp_prog) { + xdp_buff_clear_frags_flag(&xdp); + /* subtract 16bit shift and FCS */ + xdp_prepare_buff(&xdp, page_address(page), + data_start, pkt_len - sub_len, false); + ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); + xdp_result |= ret; + if (ret != FEC_ENET_XDP_PASS) + goto rx_processing_done; + } + /* The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ skb = build_skb(page_address(page), PAGE_SIZE); - skb_reserve(skb, FEC_ENET_XDP_HEADROOM); - skb_put(skb, pkt_len - 4); + skb_reserve(skb, data_start); + skb_put(skb, pkt_len - sub_len); skb_mark_for_recycle(skb); - data = skb->data; - if (need_swap) + if (unlikely(need_swap)) { + data = page_address(page) + FEC_ENET_XDP_HEADROOM; swap_buffer(data, pkt_len); - -#if !defined(CONFIG_M5272) - if (fep->quirks & FEC_QUIRK_HAS_RACC) - data = skb_pull_inline(skb, 2); -#endif + } + data = skb->data; /* Extract the enhanced buffer descriptor */ ebdp = NULL; @@ -1732,6 +1761,10 @@ rx_processing_done: writel(0, rxq->bd.reg_desc_active); } rxq->bd.cur = bdp; + + if (xdp_result & FEC_ENET_XDP_REDIR) + xdp_do_flush_map(); + return pkt_received; } @@ -2226,7 +2259,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) fep->link = 0; fep->full_duplex = 0; - phy_dev->mac_managed_pm = 1; + phy_dev->mac_managed_pm = true; phy_attached_info(phy_dev); @@ -2671,6 +2704,16 @@ static const struct fec_stat { #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) +static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { + "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ + "rx_xdp_pass", /* RX_XDP_PASS, */ + "rx_xdp_drop", /* RX_XDP_DROP, */ + "rx_xdp_tx", /* RX_XDP_TX, */ + "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ + "tx_xdp_xmit", /* TX_XDP_XMIT, */ + "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ +}; + static void fec_enet_update_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); @@ -2680,6 +2723,40 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev) fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); } +static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) +{ + u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; + struct fec_enet_priv_rx_q *rxq; + int i, j; + + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + + for (j = 0; j < XDP_STATS_TOTAL; j++) + xdp_stats[j] += rxq->stats[j]; + } + + memcpy(data, xdp_stats, sizeof(xdp_stats)); +} + +static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) +{ + struct page_pool_stats stats = {}; + struct fec_enet_priv_rx_q *rxq; + int i; + + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + + if (!rxq->page_pool) + continue; + + page_pool_get_stats(rxq->page_pool, &stats); + } + + page_pool_ethtool_stats_get(data, &stats); +} + static void fec_enet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -2689,6 +2766,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev, fec_enet_update_ethtool_stats(dev); memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); + data += FEC_STATS_SIZE / sizeof(u64); + + fec_enet_get_xdp_stats(fep, data); + data += XDP_STATS_TOTAL; + + fec_enet_page_pool_stats(fep, data); } static void fec_enet_get_strings(struct net_device *netdev, @@ -2697,9 +2780,16 @@ static void fec_enet_get_strings(struct net_device *netdev, int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(fec_stats); i++) - memcpy(data + i * ETH_GSTRING_LEN, - fec_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { + memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { + strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + page_pool_ethtool_stats_get_strings(data); + break; case ETH_SS_TEST: net_selftest_get_strings(data); @@ -2709,9 +2799,14 @@ static void fec_enet_get_strings(struct net_device *netdev, static int fec_enet_get_sset_count(struct net_device *dev, int sset) { + int count; + switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(fec_stats); + count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; + count += page_pool_ethtool_stats_get_count(); + return count; + case ETH_SS_TEST: return net_selftest_get_count(); default: @@ -2722,7 +2817,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static void fec_enet_clear_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - int i; + struct fec_enet_priv_rx_q *rxq; + int i, j; /* Disable MIB statistics counters */ writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); @@ -2730,6 +2826,12 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev) for (i = 0; i < ARRAY_SIZE(fec_stats); i++) writel(0, fep->hwp + fec_stats[i].offset); + for (i = fep->num_rx_queues - 1; i >= 0; i--) { + rxq = fep->rx_queue[i]; + for (j = 0; j < XDP_STATS_TOTAL; j++) + rxq->stats[j] = 0; + } + /* Don't disable MIB statistics counters */ writel(0, fep->hwp + FEC_MIB_CTRLSTAT); } @@ -3083,6 +3185,9 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (i = 0; i < rxq->bd.ring_size; i++) page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); + for (i = 0; i < XDP_STATS_TOTAL; i++) + rxq->stats[i] = 0; + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) xdp_rxq_info_unreg(&rxq->xdp_rxq); page_pool_destroy(rxq->page_pool); @@ -3562,6 +3667,150 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; } +static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct fec_enet_private *fep = netdev_priv(dev); + bool is_run = netif_running(dev); + struct bpf_prog *old_prog; + + switch (bpf->command) { + case XDP_SETUP_PROG: + /* No need to support the SoCs that require to + * do the frame swap because the performance wouldn't be + * better than the skb mode. + */ + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + return -EOPNOTSUPP; + + if (is_run) { + napi_disable(&fep->napi); + netif_tx_disable(dev); + } + + old_prog = xchg(&fep->xdp_prog, bpf->prog); + fec_restart(dev); + + if (is_run) { + napi_enable(&fep->napi); + netif_tx_start_all_queues(dev); + } + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; + + case XDP_SETUP_XSK_POOL: + return -EOPNOTSUPP; + + default: + return -EOPNOTSUPP; + } +} + +static int +fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) +{ + if (unlikely(index < 0)) + return 0; + + return (index % fep->num_tx_queues); +} + +static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq, + struct xdp_frame *frame) +{ + unsigned int index, status, estatus; + struct bufdesc *bdp, *last_bdp; + dma_addr_t dma_addr; + int entries_free; + + entries_free = fec_enet_get_free_txdesc_num(txq); + if (entries_free < MAX_SKB_FRAGS + 1) { + netdev_err(fep->netdev, "NOT enough BD for SG!\n"); + return NETDEV_TX_OK; + } + + /* Fill in a Tx ring entry */ + bdp = txq->bd.cur; + last_bdp = bdp; + status = fec16_to_cpu(bdp->cbd_sc); + status &= ~BD_ENET_TX_STATS; + + index = fec_enet_get_bd_index(bdp, &txq->bd); + + dma_addr = dma_map_single(&fep->pdev->dev, frame->data, + frame->len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dma_addr)) + return FEC_ENET_XDP_CONSUMED; + + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) + estatus = BD_ENET_TX_INT; + + bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); + bdp->cbd_datlen = cpu_to_fec16(frame->len); + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); + + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = cpu_to_fec32(estatus); + } + + index = fec_enet_get_bd_index(last_bdp, &txq->bd); + txq->tx_skbuff[index] = NULL; + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); + bdp->cbd_sc = cpu_to_fec16(status); + + /* If this was the last BD in the ring, start at the beginning again. */ + bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + + txq->bd.cur = bdp; + + return 0; +} + +static int fec_enet_xdp_xmit(struct net_device *dev, + int num_frames, + struct xdp_frame **frames, + u32 flags) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + int cpu = smp_processor_id(); + struct netdev_queue *nq; + unsigned int queue; + int i; + + queue = fec_enet_xdp_get_tx_queue(fep, cpu); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(fep->netdev, queue); + + __netif_tx_lock(nq, cpu); + + for (i = 0; i < num_frames; i++) + fec_enet_txq_xmit_frame(fep, txq, frames[i]); + + /* Make sure the update to bdp and tx_skbuff are performed. */ + wmb(); + + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + + __netif_tx_unlock(nq); + + return num_frames; +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, @@ -3576,6 +3825,8 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_poll_controller = fec_poll_controller, #endif .ndo_set_features = fec_set_features, + .ndo_bpf = fec_enet_bpf, + .ndo_xdp_xmit = fec_enet_xdp_xmit, }; static const unsigned short offset_des_active_rxq[] = { diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index cffd9ad499dd..ab86bb8562ef 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -88,6 +88,9 @@ #define FEC_CHANNLE_0 0 #define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 +#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL +#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL + /** * fec_ptp_enable_pps * @fep: the fec_enet_private structure handle @@ -198,6 +201,78 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) return 0; } +static int fec_ptp_pps_perout(struct fec_enet_private *fep) +{ + u32 compare_val, ptp_hc, temp_val; + u64 curr_time; + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* Update time counter */ + timecounter_read(&fep->tc); + + /* Get the current ptp hardware time counter */ + temp_val = readl(fep->hwp + FEC_ATIME_CTRL); + temp_val |= FEC_T_CTRL_CAPTURE; + writel(temp_val, fep->hwp + FEC_ATIME_CTRL); + if (fep->quirks & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + + ptp_hc = readl(fep->hwp + FEC_ATIME); + + /* Convert the ptp local counter to 1588 timestamp */ + curr_time = timecounter_cyc2time(&fep->tc, ptp_hc); + + /* If the pps start time less than current time add 100ms, just return. + * Because the software might not able to set the comparison time into + * the FEC_TCCR register in time and missed the start time. + */ + if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) { + dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n"); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + return -1; + } + + compare_val = fep->perout_stime - curr_time + ptp_hc; + compare_val &= fep->cc.mask; + + writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask; + + /* Enable compare event when overflow */ + temp_val = readl(fep->hwp + FEC_ATIME_CTRL); + temp_val |= FEC_T_CTRL_PINPER; + writel(temp_val, fep->hwp + FEC_ATIME_CTRL); + + /* Compare channel setting. */ + temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); + temp_val &= ~(1 << FEC_T_TDRE_OFFSET); + temp_val &= ~(FEC_T_TMODE_MASK); + temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET); + writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* Write the second compare event timestamp and calculate + * the third timestamp. Refer the TCCR register detail in the spec. + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer) +{ + struct fec_enet_private *fep = container_of(timer, + struct fec_enet_private, perout_timer); + + fec_ptp_pps_perout(fep); + + return HRTIMER_NORESTART; +} + /** * fec_ptp_read - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure @@ -263,18 +338,21 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) } /** - * fec_ptp_adjfreq - adjust ptp cycle frequency + * fec_ptp_adjfine - adjust ptp cycle frequency * @ptp: the ptp clock structure - * @ppb: parts per billion adjustment from base + * @scaled_ppm: scaled parts per million adjustment from base * * Adjust the frequency of the ptp cycle counter by the - * indicated ppb from the base frequency. + * indicated amount from the base frequency. + * + * Scaled parts per million is ppm with a 16-bit binary fractional field. * * Because ENET hardware frequency adjust is complex, * using software method to do that. */ -static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); unsigned long flags; int neg_adj = 0; u32 i, tmp; @@ -425,6 +503,17 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp, return 0; } +static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel) +{ + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + writel(0, fep->hwp + FEC_TCSR(channel)); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + /** * fec_ptp_enable * @ptp: the ptp clock structure @@ -437,14 +526,84 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, { struct fec_enet_private *fep = container_of(ptp, struct fec_enet_private, ptp_caps); + ktime_t timeout; + struct timespec64 start_time, period; + u64 curr_time, delta, period_ns; + unsigned long flags; int ret = 0; if (rq->type == PTP_CLK_REQ_PPS) { ret = fec_ptp_enable_pps(fep, on); return ret; + } else if (rq->type == PTP_CLK_REQ_PEROUT) { + /* Reject requests with unsupported flags */ + if (rq->perout.flags) + return -EOPNOTSUPP; + + if (rq->perout.index != DEFAULT_PPS_CHANNEL) + return -EOPNOTSUPP; + + fep->pps_channel = DEFAULT_PPS_CHANNEL; + period.tv_sec = rq->perout.period.sec; + period.tv_nsec = rq->perout.period.nsec; + period_ns = timespec64_to_ns(&period); + + /* FEC PTP timer only has 31 bits, so if the period exceed + * 4s is not supported. + */ + if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) { + dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n"); + return -EOPNOTSUPP; + } + + fep->reload_period = div_u64(period_ns, 2); + if (on && fep->reload_period) { + /* Convert 1588 timestamp to ns*/ + start_time.tv_sec = rq->perout.start.sec; + start_time.tv_nsec = rq->perout.start.nsec; + fep->perout_stime = timespec64_to_ns(&start_time); + + mutex_lock(&fep->ptp_clk_mutex); + if (!fep->ptp_clk_on) { + dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n"); + mutex_unlock(&fep->ptp_clk_mutex); + return -EOPNOTSUPP; + } + spin_lock_irqsave(&fep->tmreg_lock, flags); + /* Read current timestamp */ + curr_time = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); + + /* Calculate time difference */ + delta = fep->perout_stime - curr_time; + + if (fep->perout_stime <= curr_time) { + dev_err(&fep->pdev->dev, "Start time must larger than current time!\n"); + return -EINVAL; + } + + /* Because the timer counter of FEC only has 31-bits, correspondingly, + * the time comparison register FEC_TCCR also only low 31 bits can be + * set. If the start time of pps signal exceeds current time more than + * 0x80000000 ns, a software timer is used and the timer expires about + * 1 second before the start time to be able to set FEC_TCCR. + */ + if (delta > FEC_PTP_MAX_NSEC_COUNTER) { + timeout = ns_to_ktime(delta - NSEC_PER_SEC); + hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL); + } else { + return fec_ptp_pps_perout(fep); + } + } else { + fec_ptp_pps_disable(fep, fep->pps_channel); + } + + return 0; + } else { + return -EOPNOTSUPP; } - return -EOPNOTSUPP; } /** @@ -583,10 +742,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; fep->ptp_caps.n_ext_ts = 0; - fep->ptp_caps.n_per_out = 0; + fep->ptp_caps.n_per_out = 1; fep->ptp_caps.n_pins = 0; fep->ptp_caps.pps = 1; - fep->ptp_caps.adjfreq = fec_ptp_adjfreq; + fep->ptp_caps.adjfine = fec_ptp_adjfine; fep->ptp_caps.adjtime = fec_ptp_adjtime; fep->ptp_caps.gettime64 = fec_ptp_gettime; fep->ptp_caps.settime64 = fec_ptp_settime; @@ -605,6 +764,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); + hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL); + fep->perout_timer.function = fec_ptp_pps_perout_handler; + irq = platform_get_irq_byname_optional(pdev, "pps"); if (irq < 0) irq = platform_get_irq_optional(pdev, irq_idx); @@ -634,6 +796,7 @@ void fec_ptp_stop(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); cancel_delayed_work_sync(&fep->time_keep); + hrtimer_cancel(&fep->perout_timer); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index 48bf8088795d..a55542c1ad65 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -3,7 +3,8 @@ config FSL_FMAN tristate "FMan support" depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST select GENERIC_ALLOCATOR - select PHYLIB + select PHYLINK + select PCS_LYNX select CRC32 default n help diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 6617932fd3fd..d00bae15a901 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -17,6 +17,7 @@ #include <linux/crc32.h> #include <linux/of_mdio.h> #include <linux/mii.h> +#include <linux/netdevice.h> /* TBI register addresses */ #define MII_TBICON 0x11 @@ -29,9 +30,6 @@ #define TBICON_CLK_SELECT 0x0020 /* Clock select */ #define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */ -#define TBIANA_SGMII 0x4001 -#define TBIANA_1000X 0x01a0 - /* Interrupt Mask Register (IMASK) */ #define DTSEC_IMASK_BREN 0x80000000 #define DTSEC_IMASK_RXCEN 0x40000000 @@ -92,9 +90,10 @@ #define DTSEC_ECNTRL_GMIIM 0x00000040 #define DTSEC_ECNTRL_TBIM 0x00000020 -#define DTSEC_ECNTRL_SGMIIM 0x00000002 #define DTSEC_ECNTRL_RPM 0x00000010 #define DTSEC_ECNTRL_R100M 0x00000008 +#define DTSEC_ECNTRL_RMM 0x00000004 +#define DTSEC_ECNTRL_SGMIIM 0x00000002 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 #define TCTRL_TTSE 0x00000040 @@ -318,7 +317,8 @@ struct fman_mac { void *fm; struct fman_rev_info fm_rev_info; bool basex_if; - struct phy_device *tbiphy; + struct mdio_device *tbidev; + struct phylink_pcs pcs; }; static void set_dflts(struct dtsec_cfg *cfg) @@ -356,56 +356,14 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, phy_interface_t iface, u16 iface_speed, u64 addr, u32 exception_mask, u8 tbi_addr) { - bool is_rgmii, is_sgmii, is_qsgmii; enet_addr_t eth_addr; - u32 tmp; + u32 tmp = 0; int i; /* Soft reset */ iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1); iowrite32be(0, ®s->maccfg1); - /* dtsec_id2 */ - tmp = ioread32be(®s->tsec_id2); - - /* check RGMII support */ - if (iface == PHY_INTERFACE_MODE_RGMII || - iface == PHY_INTERFACE_MODE_RGMII_ID || - iface == PHY_INTERFACE_MODE_RGMII_RXID || - iface == PHY_INTERFACE_MODE_RGMII_TXID || - iface == PHY_INTERFACE_MODE_RMII) - if (tmp & DTSEC_ID2_INT_REDUCED_OFF) - return -EINVAL; - - if (iface == PHY_INTERFACE_MODE_SGMII || - iface == PHY_INTERFACE_MODE_MII) - if (tmp & DTSEC_ID2_INT_REDUCED_OFF) - return -EINVAL; - - is_rgmii = iface == PHY_INTERFACE_MODE_RGMII || - iface == PHY_INTERFACE_MODE_RGMII_ID || - iface == PHY_INTERFACE_MODE_RGMII_RXID || - iface == PHY_INTERFACE_MODE_RGMII_TXID; - is_sgmii = iface == PHY_INTERFACE_MODE_SGMII; - is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII; - - tmp = 0; - if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII) - tmp |= DTSEC_ECNTRL_GMIIM; - if (is_sgmii) - tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM); - if (is_qsgmii) - tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM | - DTSEC_ECNTRL_QSGMIIM); - if (is_rgmii) - tmp |= DTSEC_ECNTRL_RPM; - if (iface_speed == SPEED_100) - tmp |= DTSEC_ECNTRL_R100M; - - iowrite32be(tmp, ®s->ecntrl); - - tmp = 0; - if (cfg->tx_pause_time) tmp |= cfg->tx_pause_time; if (cfg->tx_pause_time_extd) @@ -446,17 +404,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg, tmp = 0; - if (iface_speed < SPEED_1000) - tmp |= MACCFG2_NIBBLE_MODE; - else if (iface_speed == SPEED_1000) - tmp |= MACCFG2_BYTE_MODE; - tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) & MACCFG2_PREAMBLE_LENGTH_MASK; if (cfg->tx_pad_crc) tmp |= MACCFG2_PAD_CRC_EN; - /* Full Duplex */ - tmp |= MACCFG2_FULL_DUPLEX; iowrite32be(tmp, ®s->maccfg2); tmp = (((cfg->non_back_to_back_ipg1 << @@ -525,10 +476,6 @@ static void set_bucket(struct dtsec_regs __iomem *regs, int bucket, static int check_init_parameters(struct fman_mac *dtsec) { - if (dtsec->max_speed >= SPEED_10000) { - pr_err("1G MAC driver supports 1G or lower speeds\n"); - return -EINVAL; - } if ((dtsec->dtsec_drv_param)->rx_prepend > MAX_PACKET_ALIGNMENT) { pr_err("packetAlignmentPadding can't be > than %d\n", @@ -630,22 +577,10 @@ static int get_exception_flag(enum fman_mac_exceptions exception) return bit_mask; } -static bool is_init_done(struct dtsec_cfg *dtsec_drv_params) -{ - /* Checks if dTSEC driver parameters were initialized */ - if (!dtsec_drv_params) - return true; - - return false; -} - static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec) { struct dtsec_regs __iomem *regs = dtsec->regs; - if (is_init_done(dtsec->dtsec_drv_param)) - return 0; - return (u16)ioread32be(®s->maxfrm); } @@ -682,6 +617,7 @@ static void dtsec_isr(void *handle) dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT); if (event & DTSEC_IMASK_XFUNEN) { /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */ + /* FIXME: This races with the rest of the driver! */ if (dtsec->fm_rev_info.major == 2) { u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i; /* a. Write 0x00E0_0C00 to DTSEC_ID @@ -814,6 +750,43 @@ static void free_init_resources(struct fman_mac *dtsec) dtsec->unicast_addr_hash = NULL; } +static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct fman_mac, pcs); +} + +static void dtsec_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct fman_mac *dtsec = pcs_to_dtsec(pcs); + + phylink_mii_c22_pcs_get_state(dtsec->tbidev, state); +} + +static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct fman_mac *dtsec = pcs_to_dtsec(pcs); + + return phylink_mii_c22_pcs_config(dtsec->tbidev, mode, interface, + advertising); +} + +static void dtsec_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct fman_mac *dtsec = pcs_to_dtsec(pcs); + + phylink_mii_c22_pcs_an_restart(dtsec->tbidev); +} + +static const struct phylink_pcs_ops dtsec_pcs_ops = { + .pcs_get_state = dtsec_pcs_get_state, + .pcs_config = dtsec_pcs_config, + .pcs_an_restart = dtsec_pcs_an_restart, +}; + static void graceful_start(struct fman_mac *dtsec) { struct dtsec_regs __iomem *regs = dtsec->regs; @@ -854,36 +827,11 @@ static void graceful_stop(struct fman_mac *dtsec) static int dtsec_enable(struct fman_mac *dtsec) { - struct dtsec_regs __iomem *regs = dtsec->regs; - u32 tmp; - - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - /* Enable */ - tmp = ioread32be(®s->maccfg1); - tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN; - iowrite32be(tmp, ®s->maccfg1); - - /* Graceful start - clear the graceful Rx/Tx stop bit */ - graceful_start(dtsec); - return 0; } static void dtsec_disable(struct fman_mac *dtsec) { - struct dtsec_regs __iomem *regs = dtsec->regs; - u32 tmp; - - WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param)); - - /* Graceful stop - Assert the graceful Rx/Tx stop bit */ - graceful_stop(dtsec); - - tmp = ioread32be(®s->maccfg1); - tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); - iowrite32be(tmp, ®s->maccfg1); } static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, @@ -894,11 +842,6 @@ static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, struct dtsec_regs __iomem *regs = dtsec->regs; u32 ptv = 0; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - graceful_stop(dtsec); - if (pause_time) { /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) { @@ -919,8 +862,6 @@ static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW, ®s->maccfg1); - graceful_start(dtsec); - return 0; } @@ -929,11 +870,6 @@ static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) struct dtsec_regs __iomem *regs = dtsec->regs; u32 tmp; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - graceful_stop(dtsec); - tmp = ioread32be(®s->maccfg1); if (en) tmp |= MACCFG1_RX_FLOW; @@ -941,17 +877,124 @@ static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) tmp &= ~MACCFG1_RX_FLOW; iowrite32be(tmp, ®s->maccfg1); + return 0; +} + +static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config, + phy_interface_t iface) +{ + struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac; + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return &dtsec->pcs; + default: + return NULL; + } +} + +static void dtsec_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs; + u32 tmp; + + switch (state->interface) { + case PHY_INTERFACE_MODE_RMII: + tmp = DTSEC_ECNTRL_RMM; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM; + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM; + break; + default: + dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n", + phy_modes(state->interface)); + return; + } + + iowrite32be(tmp, ®s->ecntrl); +} + +static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct fman_mac *dtsec = mac_dev->fman_mac; + struct dtsec_regs __iomem *regs = dtsec->regs; + u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE; + u32 tmp; + + dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0); + dtsec_accept_rx_pause_frames(dtsec, rx_pause); + + tmp = ioread32be(®s->ecntrl); + if (speed == SPEED_100) + tmp |= DTSEC_ECNTRL_R100M; + else + tmp &= ~DTSEC_ECNTRL_R100M; + iowrite32be(tmp, ®s->ecntrl); + + tmp = ioread32be(®s->maccfg2); + tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX); + if (speed >= SPEED_1000) + tmp |= MACCFG2_BYTE_MODE; + else + tmp |= MACCFG2_NIBBLE_MODE; + + if (duplex == DUPLEX_FULL) + tmp |= MACCFG2_FULL_DUPLEX; + + iowrite32be(tmp, ®s->maccfg2); + + mac_dev->update_speed(mac_dev, speed); + + /* Enable */ + tmp = ioread32be(®s->maccfg1); + tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN; + iowrite32be(tmp, ®s->maccfg1); + + /* Graceful start - clear the graceful Rx/Tx stop bit */ graceful_start(dtsec); +} - return 0; +static void dtsec_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac; + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + /* Graceful stop - Assert the graceful Rx/Tx stop bit */ + graceful_stop(dtsec); + + tmp = ioread32be(®s->maccfg1); + tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + iowrite32be(tmp, ®s->maccfg1); } +static const struct phylink_mac_ops dtsec_mac_ops = { + .mac_select_pcs = dtsec_select_pcs, + .mac_config = dtsec_mac_config, + .mac_link_up = dtsec_link_up, + .mac_link_down = dtsec_link_down, +}; + static int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr) { - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - graceful_stop(dtsec); /* Initialize MAC Station Address registers (1 & 2) @@ -975,9 +1018,6 @@ static int dtsec_add_hash_mac_address(struct fman_mac *dtsec, u32 crc = 0xFFFFFFFF; bool mcast, ghtx; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); @@ -1037,9 +1077,6 @@ static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable) u32 tmp; struct dtsec_regs __iomem *regs = dtsec->regs; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->rctrl); if (enable) tmp |= RCTRL_MPROM; @@ -1056,9 +1093,6 @@ static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable) struct dtsec_regs __iomem *regs = dtsec->regs; u32 rctrl, tctrl; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - rctrl = ioread32be(®s->rctrl); tctrl = ioread32be(®s->tctrl); @@ -1087,9 +1121,6 @@ static int dtsec_del_hash_mac_address(struct fman_mac *dtsec, u32 crc = 0xFFFFFFFF; bool mcast, ghtx; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); @@ -1153,9 +1184,6 @@ static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) struct dtsec_regs __iomem *regs = dtsec->regs; u32 tmp; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - /* Set unicast promiscuous */ tmp = ioread32be(®s->rctrl); if (new_val) @@ -1177,90 +1205,12 @@ static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) return 0; } -static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) -{ - struct dtsec_regs __iomem *regs = dtsec->regs; - u32 tmp; - - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - graceful_stop(dtsec); - - tmp = ioread32be(®s->maccfg2); - - /* Full Duplex */ - tmp |= MACCFG2_FULL_DUPLEX; - - tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE); - if (speed < SPEED_1000) - tmp |= MACCFG2_NIBBLE_MODE; - else if (speed == SPEED_1000) - tmp |= MACCFG2_BYTE_MODE; - iowrite32be(tmp, ®s->maccfg2); - - tmp = ioread32be(®s->ecntrl); - if (speed == SPEED_100) - tmp |= DTSEC_ECNTRL_R100M; - else - tmp &= ~DTSEC_ECNTRL_R100M; - iowrite32be(tmp, ®s->ecntrl); - - graceful_start(dtsec); - - return 0; -} - -static int dtsec_restart_autoneg(struct fman_mac *dtsec) -{ - u16 tmp_reg16; - - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - - tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR); - - tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000); - tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART | - BMCR_FULLDPLX | BMCR_SPEED1000); - - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); - - return 0; -} - -static void adjust_link_dtsec(struct mac_device *mac_dev) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - struct fman_mac *fman_mac; - bool rx_pause, tx_pause; - int err; - - fman_mac = mac_dev->fman_mac; - if (!phy_dev->link) { - dtsec_restart_autoneg(fman_mac); - - return; - } - - dtsec_adjust_link(fman_mac, phy_dev->speed); - mac_dev->update_speed(mac_dev, phy_dev->speed); - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); - if (err < 0) - dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n", - err); -} - static int dtsec_set_exception(struct fman_mac *dtsec, enum fman_mac_exceptions exception, bool enable) { struct dtsec_regs __iomem *regs = dtsec->regs; u32 bit_mask = 0; - if (!is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) { bit_mask = get_exception_flag(exception); if (bit_mask) { @@ -1310,12 +1260,9 @@ static int dtsec_init(struct fman_mac *dtsec) { struct dtsec_regs __iomem *regs = dtsec->regs; struct dtsec_cfg *dtsec_drv_param; - u16 max_frm_ln; + u16 max_frm_ln, tbicon; int err; - if (is_init_done(dtsec->dtsec_drv_param)) - return -EINVAL; - if (DEFAULT_RESET_ON_INIT && (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) { pr_err("Can't reset MAC!\n"); @@ -1330,38 +1277,19 @@ static int dtsec_init(struct fman_mac *dtsec) err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if, dtsec->max_speed, dtsec->addr, dtsec->exceptions, - dtsec->tbiphy->mdio.addr); + dtsec->tbidev->addr); if (err) { free_init_resources(dtsec); pr_err("DTSEC version doesn't support this i/f mode\n"); return err; } - if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) { - u16 tmp_reg16; - - /* Configure the TBI PHY Control Register */ - tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET; - phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); + /* Configure the TBI PHY Control Register */ + tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET; + mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon); - tmp_reg16 = TBICON_CLK_SELECT; - phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16); - - tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE | - BMCR_FULLDPLX | BMCR_SPEED1000); - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); - - if (dtsec->basex_if) - tmp_reg16 = TBIANA_1000X; - else - tmp_reg16 = TBIANA_SGMII; - phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16); - - tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART | - BMCR_FULLDPLX | BMCR_SPEED1000); - - phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16); - } + tbicon = TBICON_CLK_SELECT; + mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon); /* Max Frame Length */ max_frm_ln = (u16)ioread32be(®s->maxfrm); @@ -1406,6 +1334,8 @@ static int dtsec_free(struct fman_mac *dtsec) kfree(dtsec->dtsec_drv_param); dtsec->dtsec_drv_param = NULL; + if (!IS_ERR_OR_NULL(dtsec->tbidev)) + put_device(&dtsec->tbidev->dev); kfree(dtsec); return 0; @@ -1434,7 +1364,6 @@ static struct fman_mac *dtsec_config(struct mac_device *mac_dev, dtsec->regs = mac_dev->vaddr; dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr); - dtsec->max_speed = params->max_speed; dtsec->phy_if = mac_dev->phy_if; dtsec->mac_id = params->mac_id; dtsec->exceptions = (DTSEC_IMASK_BREN | @@ -1457,7 +1386,6 @@ static struct fman_mac *dtsec_config(struct mac_device *mac_dev, dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en; dtsec->fm = params->fm; - dtsec->basex_if = params->basex_if; /* Save FMan revision */ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info); @@ -1476,18 +1404,18 @@ int dtsec_initialization(struct mac_device *mac_dev, int err; struct fman_mac *dtsec; struct device_node *phy_node; + unsigned long capabilities; + unsigned long *supported; + mac_dev->phylink_ops = &dtsec_mac_ops; mac_dev->set_promisc = dtsec_set_promiscuous; mac_dev->change_addr = dtsec_modify_mac_address; mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address; mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address; - mac_dev->set_tx_pause = dtsec_set_tx_pause_frames; - mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = fman_set_multi; - mac_dev->adjust_link = adjust_link_dtsec; mac_dev->enable = dtsec_enable; mac_dev->disable = dtsec_disable; @@ -1502,19 +1430,56 @@ int dtsec_initialization(struct mac_device *mac_dev, dtsec->dtsec_drv_param->tx_pad_crc = true; phy_node = of_parse_phandle(mac_node, "tbi-handle", 0); - if (!phy_node) { - pr_err("TBI PHY node is not available\n"); + if (!phy_node || of_device_is_available(phy_node)) { + of_node_put(phy_node); err = -EINVAL; + dev_err_probe(mac_dev->dev, err, + "TBI PCS node is not available\n"); goto _return_fm_mac_free; } - dtsec->tbiphy = of_phy_find_device(phy_node); - if (!dtsec->tbiphy) { - pr_err("of_phy_find_device (TBI PHY) failed\n"); - err = -EINVAL; + dtsec->tbidev = of_mdio_find_device(phy_node); + of_node_put(phy_node); + if (!dtsec->tbidev) { + err = -EPROBE_DEFER; + dev_err_probe(mac_dev->dev, err, + "could not find mdiodev for PCS\n"); goto _return_fm_mac_free; } - put_device(&dtsec->tbiphy->mdio.dev); + dtsec->pcs.ops = &dtsec_pcs_ops; + dtsec->pcs.poll = true; + + supported = mac_dev->phylink_config.supported_interfaces; + + /* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are + * supported? If not, we can determine support via the phy if SerDes + * support is added. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII || + mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) { + __set_bit(PHY_INTERFACE_MODE_SGMII, supported); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported); + } else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) { + __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported); + } + + if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) { + phy_interface_set_rgmii(supported); + + /* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports + * RMII and RGMII. However, the only SoCs which support RMII + * are the P1017 and P1023. Avoid advertising this mode on + * other SoCs. This is a bit of a moot point, since there's no + * in-tree support for ethernet on these platforms... + */ + if (of_machine_is_compatible("fsl,P1023") || + of_machine_is_compatible("fsl,P1023RDB")) + __set_bit(PHY_INTERFACE_MODE_RMII, supported); + } + + capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + mac_dev->phylink_config.mac_capabilities = capabilities; err = dtsec_init(dtsec); if (err < 0) diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index 65887a3160d7..e5d6cddea731 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -170,20 +170,10 @@ struct fman_mac_params { * 0 - FM_MAX_NUM_OF_10G_MACS */ u8 mac_id; - /* Note that the speed should indicate the maximum rate that - * this MAC should support rather than the actual speed; - */ - u16 max_speed; /* A handle to the FM object this port related to */ void *fm; fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */ fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */ - /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC - * and phy or backplane; Note: 1000BaseX auto-negotiation relates only - * to interface between MAC and phy/backplane, SGMII phy can still - * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps - */ - bool basex_if; }; struct eth_hash_t { diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 32d26cf17843..9349f841bd06 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -11,42 +11,12 @@ #include <linux/slab.h> #include <linux/io.h> +#include <linux/pcs-lynx.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <linux/phy/phy.h> #include <linux/of_mdio.h> -/* PCS registers */ -#define MDIO_SGMII_CR 0x00 -#define MDIO_SGMII_DEV_ABIL_SGMII 0x04 -#define MDIO_SGMII_LINK_TMR_L 0x12 -#define MDIO_SGMII_LINK_TMR_H 0x13 -#define MDIO_SGMII_IF_MODE 0x14 - -/* SGMII Control defines */ -#define SGMII_CR_AN_EN 0x1000 -#define SGMII_CR_RESTART_AN 0x0200 -#define SGMII_CR_FD 0x0100 -#define SGMII_CR_SPEED_SEL1_1G 0x0040 -#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \ - SGMII_CR_SPEED_SEL1_1G) - -/* SGMII Device Ability for SGMII defines */ -#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001 -#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0 - -/* Link timer define */ -#define LINK_TMR_L 0xa120 -#define LINK_TMR_H 0x0007 -#define LINK_TMR_L_BASEX 0xaf08 -#define LINK_TMR_H_BASEX 0x002f - -/* SGMII IF Mode defines */ -#define IF_MODE_USE_SGMII_AN 0x0002 -#define IF_MODE_SGMII_EN 0x0001 -#define IF_MODE_SGMII_SPEED_100M 0x0004 -#define IF_MODE_SGMII_SPEED_1G 0x0008 -#define IF_MODE_SGMII_DUPLEX_HALF 0x0010 - /* Num of additional exact match MAC adr regs */ #define MEMAC_NUM_OF_PADDRS 7 @@ -308,9 +278,6 @@ struct fman_mac { struct memac_regs __iomem *regs; /* MAC address of device */ u64 addr; - /* Ethernet physical interface */ - phy_interface_t phy_if; - u16 max_speed; struct mac_device *dev_id; /* device cookie used by the exception cbs */ fman_mac_exception_cb *exception_cb; fman_mac_exception_cb *event_cb; @@ -323,9 +290,12 @@ struct fman_mac { struct memac_cfg *memac_drv_param; void *fm; struct fman_rev_info fm_rev_info; - bool basex_if; - struct phy_device *pcsphy; + struct phy *serdes; + struct phylink_pcs *sgmii_pcs; + struct phylink_pcs *qsgmii_pcs; + struct phylink_pcs *xfi_pcs; bool allmulti_enabled; + bool rgmii_no_half_duplex; }; static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr, @@ -383,7 +353,6 @@ static void set_exception(struct memac_regs __iomem *regs, u32 val, } static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, - phy_interface_t phy_if, u16 speed, bool slow_10g_if, u32 exceptions) { u32 tmp; @@ -411,41 +380,6 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, iowrite32be((u32)cfg->pause_quanta, ®s->pause_quanta[0]); iowrite32be((u32)0, ®s->pause_thresh[0]); - /* IF_MODE */ - tmp = 0; - switch (phy_if) { - case PHY_INTERFACE_MODE_XGMII: - tmp |= IF_MODE_10G; - break; - case PHY_INTERFACE_MODE_MII: - tmp |= IF_MODE_MII; - break; - default: - tmp |= IF_MODE_GMII; - if (phy_if == PHY_INTERFACE_MODE_RGMII || - phy_if == PHY_INTERFACE_MODE_RGMII_ID || - phy_if == PHY_INTERFACE_MODE_RGMII_RXID || - phy_if == PHY_INTERFACE_MODE_RGMII_TXID) - tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO; - } - iowrite32be(tmp, ®s->if_mode); - - /* TX_FIFO_SECTIONS */ - tmp = 0; - if (phy_if == PHY_INTERFACE_MODE_XGMII) { - if (slow_10g_if) { - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G | - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); - } else { - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G | - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G); - } - } else { - tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G | - TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G); - } - iowrite32be(tmp, ®s->tx_fifo_sections); - /* clear all pending events and set-up interrupts */ iowrite32be(0xffffffff, ®s->ievent); set_exception(regs, exceptions, true); @@ -485,93 +419,6 @@ static u32 get_mac_addr_hash_code(u64 eth_addr) return xor_val; } -static void setup_sgmii_internal_phy(struct fman_mac *memac, - struct fixed_phy_status *fixed_link) -{ - u16 tmp_reg16; - - if (WARN_ON(!memac->pcsphy)) - return; - - /* SGMII mode */ - tmp_reg16 = IF_MODE_SGMII_EN; - if (!fixed_link) - /* AN enable */ - tmp_reg16 |= IF_MODE_USE_SGMII_AN; - else { - switch (fixed_link->speed) { - case 10: - /* For 10M: IF_MODE[SPEED_10M] = 0 */ - break; - case 100: - tmp_reg16 |= IF_MODE_SGMII_SPEED_100M; - break; - case 1000: - default: - tmp_reg16 |= IF_MODE_SGMII_SPEED_1G; - break; - } - if (!fixed_link->duplex) - tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF; - } - phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16); - - /* Device ability according to SGMII specification */ - tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE; - phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); - - /* Adjust link timer for SGMII - - * According to Cisco SGMII specification the timer should be 1.6 ms. - * The link_timer register is configured in units of the clock. - * - When running as 1G SGMII, Serdes clock is 125 MHz, so - * unit = 1 / (125*10^6 Hz) = 8 ns. - * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40 - * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so - * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. - * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120. - * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, - * we always set up here a value of 2.5 SGMII. - */ - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H); - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L); - - if (!fixed_link) - /* Restart AN */ - tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; - else - /* AN disabled */ - tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN; - phy_write(memac->pcsphy, 0x0, tmp_reg16); -} - -static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac) -{ - u16 tmp_reg16; - - /* AN Device capability */ - tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE; - phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16); - - /* Adjust link timer for SGMII - - * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms. - * The link_timer register is configured in units of the clock. - * - When running as 1G SGMII, Serdes clock is 125 MHz, so - * unit = 1 / (125*10^6 Hz) = 8 ns. - * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0 - * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so - * unit = 1 / (312.5*10^6 Hz) = 3.2 ns. - * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08. - * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII, - * we always set up here a value of 2.5 SGMII. - */ - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX); - phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX); - - /* Restart AN */ - tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN; - phy_write(memac->pcsphy, 0x0, tmp_reg16); -} - static int check_init_parameters(struct fman_mac *memac) { if (!memac->exception_cb) { @@ -677,41 +524,31 @@ static void free_init_resources(struct fman_mac *memac) memac->unicast_addr_hash = NULL; } -static bool is_init_done(struct memac_cfg *memac_drv_params) -{ - /* Checks if mEMAC driver parameters were initialized */ - if (!memac_drv_params) - return true; - - return false; -} - static int memac_enable(struct fman_mac *memac) { - struct memac_regs __iomem *regs = memac->regs; - u32 tmp; + int ret; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; + ret = phy_init(memac->serdes); + if (ret) { + dev_err(memac->dev_id->dev, + "could not initialize serdes: %pe\n", ERR_PTR(ret)); + return ret; + } - tmp = ioread32be(®s->command_config); - tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; - iowrite32be(tmp, ®s->command_config); + ret = phy_power_on(memac->serdes); + if (ret) { + dev_err(memac->dev_id->dev, + "could not power on serdes: %pe\n", ERR_PTR(ret)); + phy_exit(memac->serdes); + } - return 0; + return ret; } static void memac_disable(struct fman_mac *memac) - { - struct memac_regs __iomem *regs = memac->regs; - u32 tmp; - - WARN_ON_ONCE(!is_init_done(memac->memac_drv_param)); - - tmp = ioread32be(®s->command_config); - tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); - iowrite32be(tmp, ®s->command_config); + phy_power_off(memac->serdes); + phy_exit(memac->serdes); } static int memac_set_promiscuous(struct fman_mac *memac, bool new_val) @@ -719,9 +556,6 @@ static int memac_set_promiscuous(struct fman_mac *memac, bool new_val) struct memac_regs __iomem *regs = memac->regs; u32 tmp; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (new_val) tmp |= CMD_CFG_PROMIS_EN; @@ -733,73 +567,12 @@ static int memac_set_promiscuous(struct fman_mac *memac, bool new_val) return 0; } -static int memac_adjust_link(struct fman_mac *memac, u16 speed) -{ - struct memac_regs __iomem *regs = memac->regs; - u32 tmp; - - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - - tmp = ioread32be(®s->if_mode); - - /* Set full duplex */ - tmp &= ~IF_MODE_HD; - - if (phy_interface_mode_is_rgmii(memac->phy_if)) { - /* Configure RGMII in manual mode */ - tmp &= ~IF_MODE_RGMII_AUTO; - tmp &= ~IF_MODE_RGMII_SP_MASK; - /* Full duplex */ - tmp |= IF_MODE_RGMII_FD; - - switch (speed) { - case SPEED_1000: - tmp |= IF_MODE_RGMII_1000; - break; - case SPEED_100: - tmp |= IF_MODE_RGMII_100; - break; - case SPEED_10: - tmp |= IF_MODE_RGMII_10; - break; - default: - break; - } - } - - iowrite32be(tmp, ®s->if_mode); - - return 0; -} - -static void adjust_link_memac(struct mac_device *mac_dev) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - struct fman_mac *fman_mac; - bool rx_pause, tx_pause; - int err; - - fman_mac = mac_dev->fman_mac; - memac_adjust_link(fman_mac, phy_dev->speed); - mac_dev->update_speed(mac_dev, phy_dev->speed); - - fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); - err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); - if (err < 0) - dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n", - err); -} - static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, u16 pause_time, u16 thresh_time) { struct memac_regs __iomem *regs = memac->regs; u32 tmp; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->tx_fifo_sections); GET_TX_EMPTY_DEFAULT_VALUE(tmp); @@ -834,9 +607,6 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) struct memac_regs __iomem *regs = memac->regs; u32 tmp; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (en) tmp &= ~CMD_CFG_PAUSE_IGNORE; @@ -848,12 +618,175 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en) return 0; } +static void memac_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + unsigned long caps = config->mac_capabilities; + + if (phy_interface_mode_is_rgmii(state->interface) && + memac->rgmii_no_half_duplex) + caps &= ~(MAC_10HD | MAC_100HD); + + phylink_validate_mask_caps(supported, state, caps); +} + +/** + * memac_if_mode() - Convert an interface mode into an IF_MODE config + * @interface: A phy interface mode + * + * Return: A configuration word, suitable for programming into the lower bits + * of %IF_MODE. + */ +static u32 memac_if_mode(phy_interface_t interface) +{ + switch (interface) { + case PHY_INTERFACE_MODE_MII: + return IF_MODE_MII; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + return IF_MODE_GMII | IF_MODE_RGMII; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_QSGMII: + return IF_MODE_GMII; + case PHY_INTERFACE_MODE_10GBASER: + return IF_MODE_10G; + default: + WARN_ON_ONCE(1); + return 0; + } +} + +static struct phylink_pcs *memac_select_pcs(struct phylink_config *config, + phy_interface_t iface) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + return memac->sgmii_pcs; + case PHY_INTERFACE_MODE_QSGMII: + return memac->qsgmii_pcs; + case PHY_INTERFACE_MODE_10GBASER: + return memac->xfi_pcs; + default: + return NULL; + } +} + +static int memac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_10GBASER: + return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET, + iface); + default: + return 0; + } +} + +static void memac_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct memac_regs __iomem *regs = mac_dev->fman_mac->regs; + u32 tmp = ioread32be(®s->if_mode); + + tmp &= ~(IF_MODE_MASK | IF_MODE_RGMII); + tmp |= memac_if_mode(state->interface); + if (phylink_autoneg_inband(mode)) + tmp |= IF_MODE_RGMII_AUTO; + iowrite32be(tmp, ®s->if_mode); +} + +static void memac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct fman_mac *memac = mac_dev->fman_mac; + struct memac_regs __iomem *regs = memac->regs; + u32 tmp = memac_if_mode(interface); + u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE; + + memac_set_tx_pause_frames(memac, 0, pause_time, 0); + memac_accept_rx_pause_frames(memac, rx_pause); + + if (duplex == DUPLEX_HALF) + tmp |= IF_MODE_HD; + + switch (speed) { + case SPEED_1000: + tmp |= IF_MODE_RGMII_1000; + break; + case SPEED_100: + tmp |= IF_MODE_RGMII_100; + break; + case SPEED_10: + tmp |= IF_MODE_RGMII_10; + break; + } + iowrite32be(tmp, ®s->if_mode); + + /* TODO: EEE? */ + + if (speed == SPEED_10000) { + if (memac->fm_rev_info.major == 6 && + memac->fm_rev_info.minor == 4) + tmp = TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G; + else + tmp = TX_FIFO_SECTIONS_TX_AVAIL_10G; + tmp |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G; + } else { + tmp = TX_FIFO_SECTIONS_TX_AVAIL_1G | + TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G; + } + iowrite32be(tmp, ®s->tx_fifo_sections); + + mac_dev->update_speed(mac_dev, speed); + + tmp = ioread32be(®s->command_config); + tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); +} + +static void memac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct fman_mac *memac = fman_config_to_mac(config)->fman_mac; + struct memac_regs __iomem *regs = memac->regs; + u32 tmp; + + /* TODO: graceful */ + tmp = ioread32be(®s->command_config); + tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); + iowrite32be(tmp, ®s->command_config); +} + +static const struct phylink_mac_ops memac_mac_ops = { + .validate = memac_validate, + .mac_select_pcs = memac_select_pcs, + .mac_prepare = memac_prepare, + .mac_config = memac_mac_config, + .mac_link_up = memac_link_up, + .mac_link_down = memac_link_down, +}; + static int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr) { - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0); return 0; @@ -867,9 +800,6 @@ static int memac_add_hash_mac_address(struct fman_mac *memac, u32 hash; u64 addr; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); if (!(addr & GROUP_ADDRESS)) { @@ -898,9 +828,6 @@ static int memac_set_allmulti(struct fman_mac *memac, bool enable) u32 entry; struct memac_regs __iomem *regs = memac->regs; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - if (enable) { for (entry = 0; entry < HASH_TABLE_SIZE; entry++) iowrite32be(entry | HASH_CTRL_MCAST_EN, @@ -930,9 +857,6 @@ static int memac_del_hash_mac_address(struct fman_mac *memac, u32 hash; u64 addr; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK; @@ -960,9 +884,6 @@ static int memac_set_exception(struct fman_mac *memac, { u32 bit_mask = 0; - if (!is_init_done(memac->memac_drv_param)) - return -EINVAL; - bit_mask = get_exception_flag(exception); if (bit_mask) { if (enable) @@ -981,25 +902,16 @@ static int memac_set_exception(struct fman_mac *memac, static int memac_init(struct fman_mac *memac) { struct memac_cfg *memac_drv_param; - u8 i; enet_addr_t eth_addr; - bool slow_10g_if = false; - struct fixed_phy_status *fixed_link = NULL; int err; u32 reg32 = 0; - if (is_init_done(memac->memac_drv_param)) - return -EINVAL; - err = check_init_parameters(memac); if (err) return err; memac_drv_param = memac->memac_drv_param; - if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4) - slow_10g_if = true; - /* First, reset the MAC if desired. */ if (memac_drv_param->reset_on_init) { err = reset(memac->regs); @@ -1015,10 +927,7 @@ static int memac_init(struct fman_mac *memac) add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0); } - fixed_link = memac_drv_param->fixed_link; - - init(memac->regs, memac->memac_drv_param, memac->phy_if, - memac->max_speed, slow_10g_if, memac->exceptions); + init(memac->regs, memac->memac_drv_param, memac->exceptions); /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround * Exists only in FMan 6.0 and 6.3. @@ -1034,33 +943,6 @@ static int memac_init(struct fman_mac *memac) iowrite32be(reg32, &memac->regs->command_config); } - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) { - /* Configure internal SGMII PHY */ - if (memac->basex_if) - setup_sgmii_internal_phy_base_x(memac); - else - setup_sgmii_internal_phy(memac, fixed_link); - } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { - /* Configure 4 internal SGMII PHYs */ - for (i = 0; i < 4; i++) { - u8 qsmgii_phy_addr, phy_addr; - /* QSGMII PHY address occupies 3 upper bits of 5-bit - * phy_address; the lower 2 bits are used to extend - * register address space and access each one of 4 - * ports inside QSGMII. - */ - phy_addr = memac->pcsphy->mdio.addr; - qsmgii_phy_addr = (u8)((phy_addr << 2) | i); - memac->pcsphy->mdio.addr = qsmgii_phy_addr; - if (memac->basex_if) - setup_sgmii_internal_phy_base_x(memac); - else - setup_sgmii_internal_phy(memac, fixed_link); - - memac->pcsphy->mdio.addr = phy_addr; - } - } - /* Max Frame Length */ err = fman_set_mac_max_frame(memac->fm, memac->mac_id, memac_drv_param->max_frame_length); @@ -1089,19 +971,28 @@ static int memac_init(struct fman_mac *memac) fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id, FMAN_INTR_TYPE_NORMAL, memac_exception, memac); - kfree(memac_drv_param); - memac->memac_drv_param = NULL; - return 0; } +static void pcs_put(struct phylink_pcs *pcs) +{ + struct mdio_device *mdiodev; + + if (IS_ERR_OR_NULL(pcs)) + return; + + mdiodev = lynx_get_mdio_device(pcs); + lynx_pcs_destroy(pcs); + mdio_device_free(mdiodev); +} + static int memac_free(struct fman_mac *memac) { free_init_resources(memac); - if (memac->pcsphy) - put_device(&memac->pcsphy->mdio.dev); - + pcs_put(memac->sgmii_pcs); + pcs_put(memac->qsgmii_pcs); + pcs_put(memac->xfi_pcs); kfree(memac->memac_drv_param); kfree(memac); @@ -1134,8 +1025,6 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev, memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr); memac->regs = mac_dev->vaddr; - memac->max_speed = params->max_speed; - memac->phy_if = mac_dev->phy_if; memac->mac_id = params->mac_id; memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER | MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI); @@ -1143,7 +1032,6 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev, memac->event_cb = params->event_cb; memac->dev_id = mac_dev; memac->fm = params->fm; - memac->basex_if = params->basex_if; /* Save FMan revision */ fman_get_revision(memac->fm, &memac->fm_rev_info); @@ -1151,101 +1039,221 @@ static struct fman_mac *memac_config(struct mac_device *mac_dev, return memac; } +static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node, + int index) +{ + struct device_node *node; + struct mdio_device *mdiodev = NULL; + struct phylink_pcs *pcs; + + node = of_parse_phandle(mac_node, "pcsphy-handle", index); + if (node && of_device_is_available(node)) + mdiodev = of_mdio_find_device(node); + of_node_put(node); + + if (!mdiodev) + return ERR_PTR(-EPROBE_DEFER); + + pcs = lynx_pcs_create(mdiodev); + return pcs; +} + +static bool memac_supports(struct mac_device *mac_dev, phy_interface_t iface) +{ + /* If there's no serdes device, assume that it's been configured for + * whatever the default interface mode is. + */ + if (!mac_dev->fman_mac->serdes) + return mac_dev->phy_if == iface; + /* Otherwise, ask the serdes */ + return !phy_validate(mac_dev->fman_mac->serdes, PHY_MODE_ETHERNET, + iface, NULL); +} + int memac_initialization(struct mac_device *mac_dev, struct device_node *mac_node, struct fman_mac_params *params) { int err; - struct device_node *phy_node; - struct fixed_phy_status *fixed_link; + struct device_node *fixed; + struct phylink_pcs *pcs; struct fman_mac *memac; + unsigned long capabilities; + unsigned long *supported; + mac_dev->phylink_ops = &memac_mac_ops; mac_dev->set_promisc = memac_set_promiscuous; mac_dev->change_addr = memac_modify_mac_address; mac_dev->add_hash_mac_addr = memac_add_hash_mac_address; mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address; - mac_dev->set_tx_pause = memac_set_tx_pause_frames; - mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = fman_set_multi; - mac_dev->adjust_link = adjust_link_memac; mac_dev->enable = memac_enable; mac_dev->disable = memac_disable; - if (params->max_speed == SPEED_10000) - mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII; - mac_dev->fman_mac = memac_config(mac_dev, params); - if (!mac_dev->fman_mac) { - err = -EINVAL; - goto _return; - } + if (!mac_dev->fman_mac) + return -EINVAL; memac = mac_dev->fman_mac; memac->memac_drv_param->max_frame_length = fman_get_max_frm(); memac->memac_drv_param->reset_on_init = true; - if (memac->phy_if == PHY_INTERFACE_MODE_SGMII || - memac->phy_if == PHY_INTERFACE_MODE_QSGMII) { - phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0); - if (!phy_node) { - pr_err("PCS PHY node is not available\n"); - err = -EINVAL; + + err = of_property_match_string(mac_node, "pcs-handle-names", "xfi"); + if (err >= 0) { + memac->xfi_pcs = memac_pcs_create(mac_node, err); + if (IS_ERR(memac->xfi_pcs)) { + err = PTR_ERR(memac->xfi_pcs); + dev_err_probe(mac_dev->dev, err, "missing xfi pcs\n"); goto _return_fm_mac_free; } + } else if (err != -EINVAL && err != -ENODATA) { + goto _return_fm_mac_free; + } - memac->pcsphy = of_phy_find_device(phy_node); - if (!memac->pcsphy) { - pr_err("of_phy_find_device (PCS PHY) failed\n"); - err = -EINVAL; + err = of_property_match_string(mac_node, "pcs-handle-names", "qsgmii"); + if (err >= 0) { + memac->qsgmii_pcs = memac_pcs_create(mac_node, err); + if (IS_ERR(memac->qsgmii_pcs)) { + err = PTR_ERR(memac->qsgmii_pcs); + dev_err_probe(mac_dev->dev, err, + "missing qsgmii pcs\n"); goto _return_fm_mac_free; } + } else if (err != -EINVAL && err != -ENODATA) { + goto _return_fm_mac_free; } - if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) { - struct phy_device *phy; + /* For compatibility, if pcs-handle-names is missing, we assume this + * phy is the first one in pcsphy-handle + */ + err = of_property_match_string(mac_node, "pcs-handle-names", "sgmii"); + if (err == -EINVAL || err == -ENODATA) + pcs = memac_pcs_create(mac_node, 0); + else if (err < 0) + goto _return_fm_mac_free; + else + pcs = memac_pcs_create(mac_node, err); - err = of_phy_register_fixed_link(mac_node); - if (err) - goto _return_fm_mac_free; + if (IS_ERR(pcs)) { + err = PTR_ERR(pcs); + dev_err_probe(mac_dev->dev, err, "missing pcs\n"); + goto _return_fm_mac_free; + } - fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL); - if (!fixed_link) { - err = -ENOMEM; - goto _return_fm_mac_free; - } + /* If err is set here, it means that pcs-handle-names was missing above + * (and therefore that xfi_pcs cannot be set). If we are defaulting to + * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII. + */ + if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + memac->xfi_pcs = pcs; + else + memac->sgmii_pcs = pcs; + + memac->serdes = devm_of_phy_get(mac_dev->dev, mac_node, "serdes"); + err = PTR_ERR(memac->serdes); + if (err == -ENODEV || err == -ENOSYS) { + dev_dbg(mac_dev->dev, "could not get (optional) serdes\n"); + memac->serdes = NULL; + } else if (IS_ERR(memac->serdes)) { + dev_err_probe(mac_dev->dev, err, "could not get serdes\n"); + goto _return_fm_mac_free; + } - mac_dev->phy_node = of_node_get(mac_node); - phy = of_phy_find_device(mac_dev->phy_node); - if (!phy) { - err = -EINVAL; - of_node_put(mac_dev->phy_node); - goto _return_fixed_link_free; - } + /* The internal connection to the serdes is XGMII, but this isn't + * really correct for the phy mode (which is the external connection). + * However, this is how all older device trees say that they want + * 10GBASE-R (aka XFI), so just convert it for them. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER; + + /* TODO: The following interface modes are supported by (some) hardware + * but not by this driver: + * - 1000BASE-KX + * - 10GBASE-KR + * - XAUI/HiGig + */ + supported = mac_dev->phylink_config.supported_interfaces; - fixed_link->link = phy->link; - fixed_link->speed = phy->speed; - fixed_link->duplex = phy->duplex; - fixed_link->pause = phy->pause; - fixed_link->asym_pause = phy->asym_pause; + /* Note that half duplex is only supported on 10/100M interfaces. */ - put_device(&phy->mdio.dev); - memac->memac_drv_param->fixed_link = fixed_link; + if (memac->sgmii_pcs && + (memac_supports(mac_dev, PHY_INTERFACE_MODE_SGMII) || + memac_supports(mac_dev, PHY_INTERFACE_MODE_1000BASEX))) { + __set_bit(PHY_INTERFACE_MODE_SGMII, supported); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported); } + if (memac->sgmii_pcs && + memac_supports(mac_dev, PHY_INTERFACE_MODE_2500BASEX)) + __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported); + + if (memac->qsgmii_pcs && + memac_supports(mac_dev, PHY_INTERFACE_MODE_QSGMII)) + __set_bit(PHY_INTERFACE_MODE_QSGMII, supported); + else if (mac_dev->phy_if == PHY_INTERFACE_MODE_QSGMII) + dev_warn(mac_dev->dev, "no QSGMII pcs specified\n"); + + if (memac->xfi_pcs && + memac_supports(mac_dev, PHY_INTERFACE_MODE_10GBASER)) { + __set_bit(PHY_INTERFACE_MODE_10GBASER, supported); + } else { + /* From what I can tell, no 10g macs support RGMII. */ + phy_interface_set_rgmii(supported); + __set_bit(PHY_INTERFACE_MODE_MII, supported); + } + + capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100; + capabilities |= MAC_1000FD | MAC_2500FD | MAC_10000FD; + + /* These SoCs don't support half duplex at all; there's no different + * FMan version or compatible, so we just have to check the machine + * compatible instead + */ + if (of_machine_is_compatible("fsl,ls1043a") || + of_machine_is_compatible("fsl,ls1046a") || + of_machine_is_compatible("fsl,B4QDS")) + capabilities &= ~(MAC_10HD | MAC_100HD); + + mac_dev->phylink_config.mac_capabilities = capabilities; + + /* The T2080 and T4240 don't support half duplex RGMII. There is no + * other way to identify these SoCs, so just use the machine + * compatible. + */ + if (of_machine_is_compatible("fsl,T2080QDS") || + of_machine_is_compatible("fsl,T2080RDB") || + of_machine_is_compatible("fsl,T2081QDS") || + of_machine_is_compatible("fsl,T4240QDS") || + of_machine_is_compatible("fsl,T4240RDB")) + memac->rgmii_no_half_duplex = true; + + /* Most boards should use MLO_AN_INBAND, but existing boards don't have + * a managed property. Default to MLO_AN_INBAND if nothing else is + * specified. We need to be careful and not enable this if we have a + * fixed link or if we are using MII or RGMII, since those + * configurations modes don't use in-band autonegotiation. + */ + fixed = of_get_child_by_name(mac_node, "fixed-link"); + if (!fixed && !of_property_read_bool(mac_node, "fixed-link") && + !of_property_read_bool(mac_node, "managed") && + mac_dev->phy_if != PHY_INTERFACE_MODE_MII && + !phy_interface_mode_is_rgmii(mac_dev->phy_if)) + mac_dev->phylink_config.ovr_an_inband = true; + of_node_put(fixed); + err = memac_init(mac_dev->fman_mac); if (err < 0) - goto _return_fixed_link_free; + goto _return_fm_mac_free; dev_info(mac_dev->dev, "FMan MEMAC\n"); - goto _return; + return 0; -_return_fixed_link_free: - kfree(fixed_link); _return_fm_mac_free: memac_free(mac_dev->fman_mac); -_return: return err; } diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 5a4be54ad459..c2261d26db5b 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -13,6 +13,7 @@ #include <linux/bitrev.h> #include <linux/io.h> #include <linux/crc32.h> +#include <linux/netdevice.h> /* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */ #define TGEC_TX_IPG_LENGTH_MASK 0x000003ff @@ -243,10 +244,6 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg, static int check_init_parameters(struct fman_mac *tgec) { - if (tgec->max_speed < SPEED_10000) { - pr_err("10G MAC driver only support 10G speed\n"); - return -EINVAL; - } if (!tgec->exception_cb) { pr_err("uninitialized exception_cb\n"); return -EINVAL; @@ -384,40 +381,13 @@ static void free_init_resources(struct fman_mac *tgec) tgec->unicast_addr_hash = NULL; } -static bool is_init_done(struct tgec_cfg *cfg) -{ - /* Checks if tGEC driver parameters were initialized */ - if (!cfg) - return true; - - return false; -} - static int tgec_enable(struct fman_mac *tgec) { - struct tgec_regs __iomem *regs = tgec->regs; - u32 tmp; - - if (!is_init_done(tgec->cfg)) - return -EINVAL; - - tmp = ioread32be(®s->command_config); - tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; - iowrite32be(tmp, ®s->command_config); - return 0; } static void tgec_disable(struct fman_mac *tgec) { - struct tgec_regs __iomem *regs = tgec->regs; - u32 tmp; - - WARN_ON_ONCE(!is_init_done(tgec->cfg)); - - tmp = ioread32be(®s->command_config); - tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); - iowrite32be(tmp, ®s->command_config); } static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val) @@ -425,9 +395,6 @@ static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val) struct tgec_regs __iomem *regs = tgec->regs; u32 tmp; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (new_val) tmp |= CMD_CFG_PROMIS_EN; @@ -444,9 +411,6 @@ static int tgec_set_tx_pause_frames(struct fman_mac *tgec, { struct tgec_regs __iomem *regs = tgec->regs; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - iowrite32be((u32)pause_time, ®s->pause_quant); return 0; @@ -457,9 +421,6 @@ static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) struct tgec_regs __iomem *regs = tgec->regs; u32 tmp; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (!en) tmp |= CMD_CFG_PAUSE_IGNORE; @@ -470,12 +431,52 @@ static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en) return 0; } +static void tgec_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void tgec_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mac_device *mac_dev = fman_config_to_mac(config); + struct fman_mac *tgec = mac_dev->fman_mac; + struct tgec_regs __iomem *regs = tgec->regs; + u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE : + FSL_FM_PAUSE_TIME_DISABLE; + u32 tmp; + + tgec_set_tx_pause_frames(tgec, 0, pause_time, 0); + tgec_accept_rx_pause_frames(tgec, rx_pause); + mac_dev->update_speed(mac_dev, speed); + + tmp = ioread32be(®s->command_config); + tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN; + iowrite32be(tmp, ®s->command_config); +} + +static void tgec_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct fman_mac *tgec = fman_config_to_mac(config)->fman_mac; + struct tgec_regs __iomem *regs = tgec->regs; + u32 tmp; + + tmp = ioread32be(®s->command_config); + tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN); + iowrite32be(tmp, ®s->command_config); +} + +static const struct phylink_mac_ops tgec_mac_ops = { + .mac_config = tgec_mac_config, + .mac_link_up = tgec_link_up, + .mac_link_down = tgec_link_down, +}; + static int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr) { - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr); set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr)); @@ -490,9 +491,6 @@ static int tgec_add_hash_mac_address(struct fman_mac *tgec, u32 crc = 0xFFFFFFFF, hash; u64 addr; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - addr = ENET_ADDR_TO_UINT64(*eth_addr); if (!(addr & GROUP_ADDRESS)) { @@ -525,9 +523,6 @@ static int tgec_set_allmulti(struct fman_mac *tgec, bool enable) u32 entry; struct tgec_regs __iomem *regs = tgec->regs; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - if (enable) { for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++) iowrite32be(entry | TGEC_HASH_MCAST_EN, @@ -548,9 +543,6 @@ static int tgec_set_tstamp(struct fman_mac *tgec, bool enable) struct tgec_regs __iomem *regs = tgec->regs; u32 tmp; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - tmp = ioread32be(®s->command_config); if (enable) @@ -572,9 +564,6 @@ static int tgec_del_hash_mac_address(struct fman_mac *tgec, u32 crc = 0xFFFFFFFF, hash; u64 addr; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - addr = ((*(u64 *)eth_addr) >> 16); /* CRC calculation */ @@ -601,22 +590,12 @@ static int tgec_del_hash_mac_address(struct fman_mac *tgec, return 0; } -static void tgec_adjust_link(struct mac_device *mac_dev) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - - mac_dev->update_speed(mac_dev, phy_dev->speed); -} - static int tgec_set_exception(struct fman_mac *tgec, enum fman_mac_exceptions exception, bool enable) { struct tgec_regs __iomem *regs = tgec->regs; u32 bit_mask = 0; - if (!is_init_done(tgec->cfg)) - return -EINVAL; - bit_mask = get_exception_flag(exception); if (bit_mask) { if (enable) @@ -641,9 +620,6 @@ static int tgec_init(struct fman_mac *tgec) enet_addr_t eth_addr; int err; - if (is_init_done(tgec->cfg)) - return -EINVAL; - if (DEFAULT_RESET_ON_INIT && (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) { pr_err("Can't reset MAC!\n"); @@ -753,7 +729,6 @@ static struct fman_mac *tgec_config(struct mac_device *mac_dev, tgec->regs = mac_dev->vaddr; tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr); - tgec->max_speed = params->max_speed; tgec->mac_id = params->mac_id; tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT | TGEC_IMASK_REM_FAULT | @@ -788,17 +763,15 @@ int tgec_initialization(struct mac_device *mac_dev, int err; struct fman_mac *tgec; + mac_dev->phylink_ops = &tgec_mac_ops; mac_dev->set_promisc = tgec_set_promiscuous; mac_dev->change_addr = tgec_modify_mac_address; mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address; mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address; - mac_dev->set_tx_pause = tgec_set_tx_pause_frames; - mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = fman_set_multi; - mac_dev->adjust_link = tgec_adjust_link; mac_dev->enable = tgec_enable; mac_dev->disable = tgec_disable; @@ -808,6 +781,19 @@ int tgec_initialization(struct mac_device *mac_dev, goto _return; } + /* The internal connection to the serdes is XGMII, but this isn't + * really correct for the phy mode (which is the external connection). + * However, this is how all older device trees say that they want + * XAUI, so just convert it for them. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->phy_if = PHY_INTERFACE_MODE_XAUI; + + __set_bit(PHY_INTERFACE_MODE_XAUI, + mac_dev->phylink_config.supported_interfaces); + mac_dev->phylink_config.mac_capabilities = + MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10000FD; + tgec = mac_dev->fman_mac; tgec->cfg->max_frame_length = fman_get_max_frm(); err = tgec_init(tgec); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 13e67f2864be..43665806c590 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -15,6 +15,7 @@ #include <linux/phy.h> #include <linux/netdevice.h> #include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/etherdevice.h> #include <linux/libfdt_env.h> @@ -93,130 +94,8 @@ int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev) return 0; } -/** - * fman_set_mac_active_pause - * @mac_dev: A pointer to the MAC device - * @rx: Pause frame setting for RX - * @tx: Pause frame setting for TX - * - * Set the MAC RX/TX PAUSE frames settings - * - * Avoid redundant calls to FMD, if the MAC driver already contains the desired - * active PAUSE settings. Otherwise, the new active settings should be reflected - * in FMan. - * - * Return: 0 on success; Error code otherwise. - */ -int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) -{ - struct fman_mac *fman_mac = mac_dev->fman_mac; - int err = 0; - - if (rx != mac_dev->rx_pause_active) { - err = mac_dev->set_rx_pause(fman_mac, rx); - if (likely(err == 0)) - mac_dev->rx_pause_active = rx; - } - - if (tx != mac_dev->tx_pause_active) { - u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE : - FSL_FM_PAUSE_TIME_DISABLE); - - err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0); - - if (likely(err == 0)) - mac_dev->tx_pause_active = tx; - } - - return err; -} -EXPORT_SYMBOL(fman_set_mac_active_pause); - -/** - * fman_get_pause_cfg - * @mac_dev: A pointer to the MAC device - * @rx_pause: Return value for RX setting - * @tx_pause: Return value for TX setting - * - * Determine the MAC RX/TX PAUSE frames settings based on PHY - * autonegotiation or values set by eththool. - * - * Return: Pointer to FMan device. - */ -void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, - bool *tx_pause) -{ - struct phy_device *phy_dev = mac_dev->phy_dev; - u16 lcl_adv, rmt_adv; - u8 flowctrl; - - *rx_pause = *tx_pause = false; - - if (!phy_dev->duplex) - return; - - /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings - * are those set by ethtool. - */ - if (!mac_dev->autoneg_pause) { - *rx_pause = mac_dev->rx_pause_req; - *tx_pause = mac_dev->tx_pause_req; - return; - } - - /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE - * settings depend on the result of the link negotiation. - */ - - /* get local capabilities */ - lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising); - - /* get link partner capabilities */ - rmt_adv = 0; - if (phy_dev->pause) - rmt_adv |= LPA_PAUSE_CAP; - if (phy_dev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; - - /* Calculate TX/RX settings based on local and peer advertised - * symmetric/asymmetric PAUSE capabilities. - */ - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); - if (flowctrl & FLOW_CTRL_RX) - *rx_pause = true; - if (flowctrl & FLOW_CTRL_TX) - *tx_pause = true; -} -EXPORT_SYMBOL(fman_get_pause_cfg); - -#define DTSEC_SUPPORTED \ - (SUPPORTED_10baseT_Half \ - | SUPPORTED_10baseT_Full \ - | SUPPORTED_100baseT_Half \ - | SUPPORTED_100baseT_Full \ - | SUPPORTED_Autoneg \ - | SUPPORTED_Pause \ - | SUPPORTED_Asym_Pause \ - | SUPPORTED_FIBRE \ - | SUPPORTED_MII) - static DEFINE_MUTEX(eth_lock); -static const u16 phy2speed[] = { - [PHY_INTERFACE_MODE_MII] = SPEED_100, - [PHY_INTERFACE_MODE_GMII] = SPEED_1000, - [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, - [PHY_INTERFACE_MODE_TBI] = SPEED_1000, - [PHY_INTERFACE_MODE_RMII] = SPEED_100, - [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, - [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, - [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, - [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, - [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, - [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000, - [PHY_INTERFACE_MODE_XGMII] = SPEED_10000 -}; - static struct platform_device *dpaa_eth_add_device(int fman_id, struct mac_device *mac_dev) { @@ -263,8 +142,8 @@ no_mem: } static const struct of_device_id mac_match[] = { - { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization }, - { .compatible = "fsl,fman-xgec", .data = tgec_initialization }, + { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization }, + { .compatible = "fsl,fman-xgec", .data = tgec_initialization }, { .compatible = "fsl,fman-memac", .data = memac_initialization }, {} }; @@ -295,6 +174,7 @@ static int mac_probe(struct platform_device *_of_dev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + platform_set_drvdata(_of_dev, mac_dev); /* Save private information */ mac_dev->priv = priv; @@ -424,57 +304,21 @@ static int mac_probe(struct platform_device *_of_dev) } mac_dev->phy_if = phy_if; - priv->speed = phy2speed[mac_dev->phy_if]; - params.max_speed = priv->speed; - mac_dev->if_support = DTSEC_SUPPORTED; - /* We don't support half-duplex in SGMII mode */ - if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) - mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | - SUPPORTED_100baseT_Half); - - /* Gigabit support (no half-duplex) */ - if (params.max_speed == 1000) - mac_dev->if_support |= SUPPORTED_1000baseT_Full; - - /* The 10G interface only supports one mode */ - if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) - mac_dev->if_support = SUPPORTED_10000baseT_Full; - - /* Get the rest of the PHY information */ - mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); - - params.basex_if = false; params.mac_id = priv->cell_index; params.fm = (void *)priv->fman; params.exception_cb = mac_exception; params.event_cb = mac_exception; err = init(mac_dev, mac_node, ¶ms); - if (err < 0) { - dev_err(dev, "mac_dev->init() = %d\n", err); - of_node_put(mac_dev->phy_node); - return err; - } - - /* pause frame autonegotiation enabled */ - mac_dev->autoneg_pause = true; - - /* By intializing the values to false, force FMD to enable PAUSE frames - * on RX and TX - */ - mac_dev->rx_pause_req = true; - mac_dev->tx_pause_req = true; - mac_dev->rx_pause_active = false; - mac_dev->tx_pause_active = false; - err = fman_set_mac_active_pause(mac_dev, true, true); if (err < 0) - dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); + return err; if (!is_zero_ether_addr(mac_dev->addr)) dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); if (IS_ERR(priv->eth_dev)) { + err = PTR_ERR(priv->eth_dev); dev_err(dev, "failed to add Ethernet platform device for MAC %d\n", priv->cell_index); priv->eth_dev = NULL; diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 13b69ca5f00c..ad06f8d7924b 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/if_ether.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <linux/list.h> #include "fman_port.h" @@ -24,32 +25,22 @@ struct mac_device { struct resource *res; u8 addr[ETH_ALEN]; struct fman_port *port[2]; - u32 if_support; - struct phy_device *phy_dev; + struct phylink *phylink; + struct phylink_config phylink_config; phy_interface_t phy_if; - struct device_node *phy_node; - struct net_device *net_dev; - bool autoneg_pause; - bool rx_pause_req; - bool tx_pause_req; - bool rx_pause_active; - bool tx_pause_active; bool promisc; bool allmulti; + const struct phylink_mac_ops *phylink_ops; int (*enable)(struct fman_mac *mac_dev); void (*disable)(struct fman_mac *mac_dev); - void (*adjust_link)(struct mac_device *mac_dev); int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr); int (*set_allmulti)(struct fman_mac *mac_dev, bool enable); int (*set_tstamp)(struct fman_mac *mac_dev, bool enable); int (*set_multi)(struct net_device *net_dev, struct mac_device *mac_dev); - int (*set_rx_pause)(struct fman_mac *mac_dev, bool en); - int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority, - u16 pause_time, u16 thresh_time); int (*set_exception)(struct fman_mac *mac_dev, enum fman_mac_exceptions exception, bool enable); int (*add_hash_mac_addr)(struct fman_mac *mac_dev, @@ -63,6 +54,12 @@ struct mac_device { struct mac_priv_s *priv; }; +static inline struct mac_device +*fman_config_to_mac(struct phylink_config *config) +{ + return container_of(config, struct mac_device, phylink_config); +} + struct dpaa_eth_data { struct mac_device *mac_dev; int mac_hw_id; |