diff options
Diffstat (limited to 'drivers/net/ethernet/ti')
-rw-r--r-- | drivers/net/ethernet/ti/Kconfig | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-ethtool.c | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-nuss.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssg/icss_iep.c | 127 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_common.c | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_prueth.c | 98 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssm/icssm_prueth.c | 1746 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssm/icssm_prueth.h | 262 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/icssm/icssm_switch.h | 257 |
11 files changed, 2577 insertions, 66 deletions
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index a07c910c497a..a54d71155263 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -229,4 +229,16 @@ config TI_ICSS_IEP To compile this driver as a module, choose M here. The module will be called icss_iep. +config TI_PRUETH + tristate "TI PRU Ethernet EMAC driver" + depends on PRU_REMOTEPROC + depends on NET_SWITCHDEV + select TI_ICSS_IEP + imply PTP_1588_CLOCK + help + Some TI SoCs has Programmable Realtime Unit (PRU) cores which can + support Single or Dual Ethernet ports with the help of firmware code + running on PRU cores. This driver supports remoteproc based + communication to PRU firmware to expose Ethernet interface to Linux. + endif # NET_VENDOR_TI diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index cbcf44806924..93c0a4d0e33a 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -3,6 +3,9 @@ # Makefile for the TI network device drivers. # +obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o +icssm-prueth-y := icssm/icssm_prueth.o + obj-$(CONFIG_TI_CPSW) += cpsw-common.o obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 9032444435e9..c57497074ae6 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -694,17 +694,20 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - unsigned int ptp_v2_filter; - - ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | - BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | - BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + unsigned int ptp_filter; + + ptp_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) return ethtool_op_get_ts_info(ndev, info); @@ -716,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = am65_cpts_phc_index(common->cpts); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter; + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_filter; return 0; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index ecd6ecac87bb..110eb2da8dbc 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1522,7 +1522,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, } } - if (single_port) { + if (single_port && num_tx) { netif_txq = netdev_get_tx_queue(ndev, chn); netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); @@ -1813,6 +1813,9 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_NONE: port->rx_ts_enabled = false; break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: @@ -1823,7 +1826,7 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: port->rx_ts_enabled = true; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -1884,8 +1887,8 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, cfg.flags = 0; cfg.tx_type = port->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = port->rx_ts_enabled ? - HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE; + cfg.rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | + HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 2a1c43316f46..ec085897edf0 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -621,7 +621,8 @@ exit: static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) { - u32 val, cap, ret = 0; + u32 val, cap; + int ret = 0; mutex_lock(&iep->ptp_clk_mutex); @@ -685,11 +686,17 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) struct platform_device *pdev; struct device_node *iep_np; struct icss_iep *iep; + int ret; iep_np = of_parse_phandle(np, "ti,iep", idx); - if (!iep_np || !of_device_is_available(iep_np)) + if (!iep_np) return ERR_PTR(-ENODEV); + if (!of_device_is_available(iep_np)) { + of_node_put(iep_np); + return ERR_PTR(-ENODEV); + } + pdev = of_find_device_by_node(iep_np); of_node_put(iep_np); @@ -698,21 +705,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx) return ERR_PTR(-EPROBE_DEFER); iep = platform_get_drvdata(pdev); - if (!iep) - return ERR_PTR(-EPROBE_DEFER); + if (!iep) { + ret = -EPROBE_DEFER; + goto err_put_pdev; + } device_lock(iep->dev); if (iep->client_np) { device_unlock(iep->dev); dev_err(iep->dev, "IEP is already acquired by %s", iep->client_np->name); - return ERR_PTR(-EBUSY); + ret = -EBUSY; + goto err_put_pdev; } iep->client_np = np; device_unlock(iep->dev); - get_device(iep->dev); return iep; + +err_put_pdev: + put_device(&pdev->dev); + + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(icss_iep_get_idx); @@ -968,11 +982,112 @@ static const struct icss_iep_plat_data am654_icss_iep_plat_data = { .config = &am654_icss_iep_regmap_config, }; +static const struct icss_iep_plat_data am57xx_icss_iep_plat_data = { + .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT | + ICSS_IEP_SLOW_COMPEN_REG_SUPPORT, + .reg_offs = { + [ICSS_IEP_GLOBAL_CFG_REG] = 0x00, + [ICSS_IEP_COMPEN_REG] = 0x08, + [ICSS_IEP_SLOW_COMPEN_REG] = 0x0c, + [ICSS_IEP_COUNT_REG0] = 0x10, + [ICSS_IEP_COUNT_REG1] = 0x14, + [ICSS_IEP_CAPTURE_CFG_REG] = 0x18, + [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c, + + [ICSS_IEP_CAP6_RISE_REG0] = 0x50, + [ICSS_IEP_CAP6_RISE_REG1] = 0x54, + + [ICSS_IEP_CAP7_RISE_REG0] = 0x60, + [ICSS_IEP_CAP7_RISE_REG1] = 0x64, + + [ICSS_IEP_CMP_CFG_REG] = 0x70, + [ICSS_IEP_CMP_STAT_REG] = 0x74, + [ICSS_IEP_CMP0_REG0] = 0x78, + [ICSS_IEP_CMP0_REG1] = 0x7c, + [ICSS_IEP_CMP1_REG0] = 0x80, + [ICSS_IEP_CMP1_REG1] = 0x84, + + [ICSS_IEP_CMP8_REG0] = 0xc0, + [ICSS_IEP_CMP8_REG1] = 0xc4, + [ICSS_IEP_SYNC_CTRL_REG] = 0x180, + [ICSS_IEP_SYNC0_STAT_REG] = 0x188, + [ICSS_IEP_SYNC1_STAT_REG] = 0x18c, + [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190, + [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194, + [ICSS_IEP_SYNC1_DELAY_REG] = 0x198, + [ICSS_IEP_SYNC_START_REG] = 0x19c, + }, + .config = &am654_icss_iep_regmap_config, +}; + +static bool am335x_icss_iep_valid_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_CAPTURE_STAT_REG: + case ICSS_IEP_CAP6_RISE_REG0: + case ICSS_IEP_CMP_CFG_REG ... ICSS_IEP_CMP0_REG0: + case ICSS_IEP_CMP8_REG0 ... ICSS_IEP_SYNC_START_REG: + return true; + default: + return false; + } +} + +static const struct regmap_config am335x_icss_iep_regmap_config = { + .name = "icss iep", + .reg_stride = 1, + .reg_write = icss_iep_regmap_write, + .reg_read = icss_iep_regmap_read, + .writeable_reg = am335x_icss_iep_valid_reg, + .readable_reg = am335x_icss_iep_valid_reg, +}; + +static const struct icss_iep_plat_data am335x_icss_iep_plat_data = { + .flags = 0, + .reg_offs = { + [ICSS_IEP_GLOBAL_CFG_REG] = 0x00, + [ICSS_IEP_COMPEN_REG] = 0x08, + [ICSS_IEP_COUNT_REG0] = 0x0c, + [ICSS_IEP_CAPTURE_CFG_REG] = 0x10, + [ICSS_IEP_CAPTURE_STAT_REG] = 0x14, + + [ICSS_IEP_CAP6_RISE_REG0] = 0x30, + + [ICSS_IEP_CAP7_RISE_REG0] = 0x38, + + [ICSS_IEP_CMP_CFG_REG] = 0x40, + [ICSS_IEP_CMP_STAT_REG] = 0x44, + [ICSS_IEP_CMP0_REG0] = 0x48, + + [ICSS_IEP_CMP8_REG0] = 0x88, + [ICSS_IEP_SYNC_CTRL_REG] = 0x100, + [ICSS_IEP_SYNC0_STAT_REG] = 0x108, + [ICSS_IEP_SYNC1_STAT_REG] = 0x10c, + [ICSS_IEP_SYNC_PWIDTH_REG] = 0x110, + [ICSS_IEP_SYNC0_PERIOD_REG] = 0x114, + [ICSS_IEP_SYNC1_DELAY_REG] = 0x118, + [ICSS_IEP_SYNC_START_REG] = 0x11c, + }, + .config = &am335x_icss_iep_regmap_config, +}; + static const struct of_device_id icss_iep_of_match[] = { { .compatible = "ti,am654-icss-iep", .data = &am654_icss_iep_plat_data, }, + { + .compatible = "ti,am5728-icss-iep", + .data = &am57xx_icss_iep_plat_data, + }, + { + .compatible = "ti,am4376-icss-iep", + .data = &am335x_icss_iep_plat_data, + }, + { + .compatible = "ti,am3356-icss-iep", + .data = &am335x_icss_iep_plat_data, + }, {}, }; MODULE_DEVICE_TABLE(of, icss_iep_of_match); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 12f25cec6255..57e5f1c88f50 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -706,9 +706,9 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) struct page_pool *pool; struct sk_buff *skb; struct xdp_buff xdp; + int headroom, ret; u32 *psdata; void *pa; - int ret; *xdp_state = 0; pool = rx_chn->pg_pool; @@ -757,22 +757,23 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); - if (*xdp_state == ICSSG_XDP_PASS) - skb = xdp_build_skb_from_buff(&xdp); - else + if (*xdp_state != ICSSG_XDP_PASS) goto requeue; + headroom = xdp.data - xdp.data_hard_start; + pkt_len = xdp.data_end - xdp.data; } else { - /* prepare skb and send to n/w stack */ - skb = napi_build_skb(pa, PAGE_SIZE); + headroom = PRUETH_HEADROOM; } + /* prepare skb and send to n/w stack */ + skb = napi_build_skb(pa, PAGE_SIZE); if (!skb) { ndev->stats.rx_dropped++; page_pool_recycle_direct(pool, page); goto requeue; } - skb_reserve(skb, PRUETH_HEADROOM); + skb_reserve(skb, headroom); skb_put(skb, pkt_len); skb->dev = ndev; diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 2b973d6e2341..e42d0fdefee1 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -50,6 +50,8 @@ /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) +static void emac_adjust_link(struct net_device *ndev); + static int emac_get_tx_ts(struct prueth_emac *emac, struct emac_tx_ts_response *rsp) { @@ -201,6 +203,44 @@ static void prueth_emac_stop(struct prueth *prueth) } } +static void icssg_enable_fw_offload(struct prueth *prueth) +{ + struct prueth_emac *emac; + int mac; + + for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { + emac = prueth->emac[mac]; + if (prueth->is_hsr_offload_mode) { + if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); + else + icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); + } + + if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) { + if (netif_running(emac->ndev)) { + icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | + ICSSG_FDB_ENTRY_BLOCK, + true); + icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, + BIT(emac->port_id) | DEFAULT_PORT_MASK, + BIT(emac->port_id) | DEFAULT_UNTAG_MASK, + true); + if (prueth->is_hsr_offload_mode) + icssg_vtbl_modify(emac, DEFAULT_VID, + DEFAULT_PORT_MASK, + DEFAULT_UNTAG_MASK, true); + icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); + if (prueth->is_switch_mode) + icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); + } + } + } +} + static int prueth_emac_common_start(struct prueth *prueth) { struct prueth_emac *emac; @@ -229,6 +269,10 @@ static int prueth_emac_common_start(struct prueth *prueth) ret = icssg_config(prueth, emac, slice); if (ret) goto disable_class; + + mutex_lock(&emac->ndev->phydev->lock); + emac_adjust_link(emac->ndev); + mutex_unlock(&emac->ndev->phydev->lock); } ret = prueth_emac_start(prueth); @@ -610,7 +654,7 @@ static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac, static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) { - struct net_device *real_dev; + struct net_device *real_dev, *port_dev; struct prueth_emac *emac; u8 vlan_id, i; @@ -619,11 +663,15 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) if (is_hsr_master(real_dev)) { for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { - emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); - if (!emac) + port_dev = hsr_get_port_ndev(real_dev, i); + emac = netdev_priv(port_dev); + if (!emac) { + dev_put(port_dev); return -EINVAL; + } icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true); + dev_put(port_dev); } } else { emac = netdev_priv(real_dev); @@ -635,7 +683,7 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr) static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) { - struct net_device *real_dev; + struct net_device *real_dev, *port_dev; struct prueth_emac *emac; u8 vlan_id, i; @@ -644,11 +692,15 @@ static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr) if (is_hsr_master(real_dev)) { for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) { - emac = netdev_priv(hsr_get_port_ndev(real_dev, i)); - if (!emac) + port_dev = hsr_get_port_ndev(real_dev, i); + emac = netdev_priv(port_dev); + if (!emac) { + dev_put(port_dev); return -EINVAL; + } icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false); + dev_put(port_dev); } } else { emac = netdev_priv(real_dev); @@ -747,6 +799,7 @@ static int emac_ndo_open(struct net_device *ndev) ret = prueth_emac_common_start(prueth); if (ret) goto free_rx_irq; + icssg_enable_fw_offload(prueth); } flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; @@ -1354,8 +1407,7 @@ static int prueth_emac_restart(struct prueth *prueth) static void icssg_change_mode(struct prueth *prueth) { - struct prueth_emac *emac; - int mac, ret; + int ret; ret = prueth_emac_restart(prueth); if (ret) { @@ -1363,35 +1415,7 @@ static void icssg_change_mode(struct prueth *prueth) return; } - for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { - emac = prueth->emac[mac]; - if (prueth->is_hsr_offload_mode) { - if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM) - icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE); - else - icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE); - } - - if (netif_running(emac->ndev)) { - icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, - ICSSG_FDB_ENTRY_P0_MEMBERSHIP | - ICSSG_FDB_ENTRY_P1_MEMBERSHIP | - ICSSG_FDB_ENTRY_P2_MEMBERSHIP | - ICSSG_FDB_ENTRY_BLOCK, - true); - icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, - BIT(emac->port_id) | DEFAULT_PORT_MASK, - BIT(emac->port_id) | DEFAULT_UNTAG_MASK, - true); - if (prueth->is_hsr_offload_mode) - icssg_vtbl_modify(emac, DEFAULT_VID, - DEFAULT_PORT_MASK, - DEFAULT_UNTAG_MASK, true); - icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); - if (prueth->is_switch_mode) - icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); - } - } + icssg_enable_fw_offload(prueth); } static int prueth_netdevice_port_link(struct net_device *ndev, diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.c b/drivers/net/ethernet/ti/icssm/icssm_prueth.c new file mode 100644 index 000000000000..293b7af04263 --- /dev/null +++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.c @@ -0,0 +1,1746 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments ICSSM Ethernet Driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <linux/etherdevice.h> +#include <linux/genalloc.h> +#include <linux/if_bridge.h> +#include <linux/if_hsr.h> +#include <linux/if_vlan.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/remoteproc/pruss.h> +#include <linux/ptp_classify.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> +#include <net/pkt_cls.h> + +#include "icssm_prueth.h" +#include "../icssg/icssg_mii_rt.h" +#include "../icssg/icss_iep.h" + +#define OCMC_RAM_SIZE (SZ_64K) + +#define TX_START_DELAY 0x40 +#define TX_CLK_DELAY_100M 0x6 +#define HR_TIMER_TX_DELAY_US 100 + +static void icssm_prueth_write_reg(struct prueth *prueth, + enum prueth_mem region, + unsigned int reg, u32 val) +{ + writel_relaxed(val, prueth->mem[region].va + reg); +} + +/* Below macro is for 1528 Byte Frame support, to Allow even with + * Redundancy tag + */ +#define PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC (VLAN_ETH_FRAME_LEN + \ + ETH_FCS_LEN + \ + ICSSM_LRE_TAG_SIZE) + +/* ensure that order of PRUSS mem regions is same as enum prueth_mem */ +static enum pruss_mem pruss_mem_ids[] = { PRUSS_MEM_DRAM0, PRUSS_MEM_DRAM1, + PRUSS_MEM_SHRD_RAM2 }; + +static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = { + [PRUETH_PORT_QUEUE_HOST] = { + [PRUETH_QUEUE1] = { + P0_Q1_BUFFER_OFFSET, + HOST_QUEUE_DESC_OFFSET, + P0_Q1_BD_OFFSET, + P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE2] = { + P0_Q2_BUFFER_OFFSET, + HOST_QUEUE_DESC_OFFSET + 8, + P0_Q2_BD_OFFSET, + P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE3] = { + P0_Q3_BUFFER_OFFSET, + HOST_QUEUE_DESC_OFFSET + 16, + P0_Q3_BD_OFFSET, + P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE4] = { + P0_Q4_BUFFER_OFFSET, + HOST_QUEUE_DESC_OFFSET + 24, + P0_Q4_BD_OFFSET, + P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE), + }, + }, + [PRUETH_PORT_QUEUE_MII0] = { + [PRUETH_QUEUE1] = { + P1_Q1_BUFFER_OFFSET, + P1_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) * + ICSS_BLOCK_SIZE), + P1_Q1_BD_OFFSET, + P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE2] = { + P1_Q2_BUFFER_OFFSET, + P1_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) * + ICSS_BLOCK_SIZE), + P1_Q2_BD_OFFSET, + P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE3] = { + P1_Q3_BUFFER_OFFSET, + P1_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) * + ICSS_BLOCK_SIZE), + P1_Q3_BD_OFFSET, + P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE4] = { + P1_Q4_BUFFER_OFFSET, + P1_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) * + ICSS_BLOCK_SIZE), + P1_Q4_BD_OFFSET, + P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE), + }, + }, + [PRUETH_PORT_QUEUE_MII1] = { + [PRUETH_QUEUE1] = { + P2_Q1_BUFFER_OFFSET, + P2_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) * + ICSS_BLOCK_SIZE), + P2_Q1_BD_OFFSET, + P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE2] = { + P2_Q2_BUFFER_OFFSET, + P2_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) * + ICSS_BLOCK_SIZE), + P2_Q2_BD_OFFSET, + P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE3] = { + P2_Q3_BUFFER_OFFSET, + P2_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) * + ICSS_BLOCK_SIZE), + P2_Q3_BD_OFFSET, + P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE), + }, + [PRUETH_QUEUE4] = { + P2_Q4_BUFFER_OFFSET, + P2_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) * + ICSS_BLOCK_SIZE), + P2_Q4_BD_OFFSET, + P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE), + }, + }, +}; + +static const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = { + [PRUETH_PORT_QUEUE_HOST] = { + { .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, }, + { .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, }, + { .rd_ptr = P0_Q3_BD_OFFSET, .wr_ptr = P0_Q3_BD_OFFSET, }, + { .rd_ptr = P0_Q4_BD_OFFSET, .wr_ptr = P0_Q4_BD_OFFSET, }, + }, + [PRUETH_PORT_QUEUE_MII0] = { + { .rd_ptr = P1_Q1_BD_OFFSET, .wr_ptr = P1_Q1_BD_OFFSET, }, + { .rd_ptr = P1_Q2_BD_OFFSET, .wr_ptr = P1_Q2_BD_OFFSET, }, + { .rd_ptr = P1_Q3_BD_OFFSET, .wr_ptr = P1_Q3_BD_OFFSET, }, + { .rd_ptr = P1_Q4_BD_OFFSET, .wr_ptr = P1_Q4_BD_OFFSET, }, + }, + [PRUETH_PORT_QUEUE_MII1] = { + { .rd_ptr = P2_Q1_BD_OFFSET, .wr_ptr = P2_Q1_BD_OFFSET, }, + { .rd_ptr = P2_Q2_BD_OFFSET, .wr_ptr = P2_Q2_BD_OFFSET, }, + { .rd_ptr = P2_Q3_BD_OFFSET, .wr_ptr = P2_Q3_BD_OFFSET, }, + { .rd_ptr = P2_Q4_BD_OFFSET, .wr_ptr = P2_Q4_BD_OFFSET, }, + } +}; + +static void icssm_prueth_hostconfig(struct prueth *prueth) +{ + void __iomem *sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va; + void __iomem *sram; + + /* queue size lookup table */ + sram = sram_base + HOST_QUEUE_SIZE_ADDR; + writew(HOST_QUEUE_1_SIZE, sram); + writew(HOST_QUEUE_2_SIZE, sram + 2); + writew(HOST_QUEUE_3_SIZE, sram + 4); + writew(HOST_QUEUE_4_SIZE, sram + 6); + + /* queue information table */ + sram = sram_base + HOST_Q1_RX_CONTEXT_OFFSET; + memcpy_toio(sram, queue_infos[PRUETH_PORT_QUEUE_HOST], + sizeof(queue_infos[PRUETH_PORT_QUEUE_HOST])); + + /* buffer offset table */ + sram = sram_base + HOST_QUEUE_OFFSET_ADDR; + writew(P0_Q1_BUFFER_OFFSET, sram); + writew(P0_Q2_BUFFER_OFFSET, sram + 2); + writew(P0_Q3_BUFFER_OFFSET, sram + 4); + writew(P0_Q4_BUFFER_OFFSET, sram + 6); + + /* buffer descriptor offset table*/ + sram = sram_base + HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR; + writew(P0_Q1_BD_OFFSET, sram); + writew(P0_Q2_BD_OFFSET, sram + 2); + writew(P0_Q3_BD_OFFSET, sram + 4); + writew(P0_Q4_BD_OFFSET, sram + 6); + + /* queue table */ + sram = sram_base + HOST_QUEUE_DESC_OFFSET; + memcpy_toio(sram, queue_descs[PRUETH_PORT_QUEUE_HOST], + sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST])); +} + +static void icssm_prueth_mii_init(struct prueth *prueth) +{ + struct regmap *mii_rt; + u32 rxcfg_reg, rxcfg; + u32 txcfg_reg, txcfg; + + mii_rt = prueth->mii_rt; + + rxcfg = PRUSS_MII_RT_RXCFG_RX_ENABLE | + PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | + PRUSS_MII_RT_RXCFG_RX_L2_EN | + PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE | + PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS; + + /* Configuration of Port 0 Rx */ + rxcfg_reg = PRUSS_MII_RT_RXCFG0; + + regmap_write(mii_rt, rxcfg_reg, rxcfg); + + /* Configuration of Port 1 Rx */ + rxcfg_reg = PRUSS_MII_RT_RXCFG1; + + rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL; + + regmap_write(mii_rt, rxcfg_reg, rxcfg); + + txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE | + PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | + PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | + (TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT) | + (TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT); + + /* Configuration of Port 0 Tx */ + txcfg_reg = PRUSS_MII_RT_TXCFG0; + + regmap_write(mii_rt, txcfg_reg, txcfg); + + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + + /* Configuration of Port 1 Tx */ + txcfg_reg = PRUSS_MII_RT_TXCFG1; + + regmap_write(mii_rt, txcfg_reg, txcfg); + + txcfg_reg = PRUSS_MII_RT_RX_FRMS0; + + /* Min frame length should be set to 64 to allow receive of standard + * Ethernet frames such as PTP, LLDP that will not have the tag/rct. + * Actual size written to register is size - 1 per TRM. This also + * includes CRC/FCS. + */ + txcfg = FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK, + (PRUSS_MII_RT_RX_FRMS_MIN_FRM - 1)); + + /* For EMAC, set Max frame size to 1528 i.e size with VLAN. + * Actual size written to register is size - 1 as per TRM. + * Since driver support run time change of protocol, driver + * must overwrite the values based on Ethernet type. + */ + txcfg |= FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK, + (PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC - 1)); + + regmap_write(mii_rt, txcfg_reg, txcfg); + + txcfg_reg = PRUSS_MII_RT_RX_FRMS1; + + regmap_write(mii_rt, txcfg_reg, txcfg); +} + +static void icssm_prueth_clearmem(struct prueth *prueth, enum prueth_mem region) +{ + memset_io(prueth->mem[region].va, 0, prueth->mem[region].size); +} + +static void icssm_prueth_hostinit(struct prueth *prueth) +{ + /* Clear shared RAM */ + icssm_prueth_clearmem(prueth, PRUETH_MEM_SHARED_RAM); + + /* Clear OCMC RAM */ + icssm_prueth_clearmem(prueth, PRUETH_MEM_OCMC); + + /* Clear data RAMs */ + if (prueth->eth_node[PRUETH_MAC0]) + icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM0); + if (prueth->eth_node[PRUETH_MAC1]) + icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM1); + + /* Initialize host queues in shared RAM */ + icssm_prueth_hostconfig(prueth); + + /* Configure MII_RT */ + icssm_prueth_mii_init(prueth); +} + +/* This function initialize the driver in EMAC mode + * based on eth_type + */ +static void icssm_prueth_init_ethernet_mode(struct prueth *prueth) +{ + icssm_prueth_hostinit(prueth); +} + +static void icssm_prueth_port_enable(struct prueth_emac *emac, bool enable) +{ + struct prueth *prueth = emac->prueth; + void __iomem *port_ctrl; + void __iomem *ram; + + ram = prueth->mem[emac->dram].va; + port_ctrl = ram + PORT_CONTROL_ADDR; + writeb(!!enable, port_ctrl); +} + +static int icssm_prueth_emac_config(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + u32 sharedramaddr, ocmcaddr; + void __iomem *dram_base; + void __iomem *mac_addr; + void __iomem *dram; + void __iomem *sram; + + /* PRU needs local shared RAM address for C28 */ + sharedramaddr = ICSS_LOCAL_SHARED_RAM; + /* PRU needs real global OCMC address for C30*/ + ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa; + sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va; + + /* Clear data RAM */ + icssm_prueth_clearmem(prueth, emac->dram); + + dram_base = prueth->mem[emac->dram].va; + + /* setup mac address */ + mac_addr = dram_base + PORT_MAC_ADDR; + memcpy_toio(mac_addr, emac->mac_addr, 6); + + /* queue information table */ + dram = dram_base + TX_CONTEXT_Q1_OFFSET_ADDR; + memcpy_toio(dram, queue_infos[emac->port_id], + sizeof(queue_infos[emac->port_id])); + + /* queue table */ + dram = dram_base + PORT_QUEUE_DESC_OFFSET; + memcpy_toio(dram, queue_descs[emac->port_id], + sizeof(queue_descs[emac->port_id])); + + emac->rx_queue_descs = sram + HOST_QUEUE_DESC_OFFSET; + emac->tx_queue_descs = dram; + + /* Set in constant table C28 of PRU0 to ICSS Shared memory */ + pru_rproc_set_ctable(emac->pru, PRU_C28, sharedramaddr); + + /* Set in constant table C30 of PRU0 to OCMC memory */ + pru_rproc_set_ctable(emac->pru, PRU_C30, ocmcaddr); + + return 0; +} + +/* called back by PHY layer if there is change in link state of hw port*/ +static void icssm_emac_adjust_link(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct phy_device *phydev = emac->phydev; + struct prueth *prueth = emac->prueth; + bool new_state = false; + enum prueth_mem region; + unsigned long flags; + u32 port_status = 0; + u32 txcfg, mask; + u32 delay; + + spin_lock_irqsave(&emac->lock, flags); + + if (phydev->link) { + /* check the mode of operation */ + if (phydev->duplex != emac->duplex) { + new_state = true; + emac->duplex = phydev->duplex; + } + if (phydev->speed != emac->speed) { + new_state = true; + emac->speed = phydev->speed; + } + if (!emac->link) { + new_state = true; + emac->link = 1; + } + } else if (emac->link) { + new_state = true; + emac->link = 0; + } + + if (new_state) { + phy_print_status(phydev); + region = emac->dram; + + /* update phy/port status information based on PHY values*/ + if (emac->link) { + port_status |= PORT_LINK_MASK; + + icssm_prueth_write_reg(prueth, region, PHY_SPEED_OFFSET, + emac->speed); + + delay = TX_CLK_DELAY_100M; + delay = delay << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT; + mask = PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK; + + if (emac->port_id) + txcfg = PRUSS_MII_RT_TXCFG1; + else + txcfg = PRUSS_MII_RT_TXCFG0; + + regmap_update_bits(prueth->mii_rt, txcfg, mask, delay); + } + + writeb(port_status, prueth->mem[region].va + + PORT_STATUS_OFFSET); + } + + if (emac->link) { + /* reactivate the transmit queue if it is stopped */ + if (netif_running(ndev) && netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } else { + if (!netif_queue_stopped(ndev)) + netif_stop_queue(ndev); + } + + spin_unlock_irqrestore(&emac->lock, flags); +} + +static unsigned int +icssm_get_buff_desc_count(const struct prueth_queue_info *queue) +{ + unsigned int buffer_desc_count; + + buffer_desc_count = queue->buffer_desc_end - + queue->buffer_desc_offset; + buffer_desc_count /= BD_SIZE; + buffer_desc_count++; + + return buffer_desc_count; +} + +static void icssm_get_block(struct prueth_queue_desc __iomem *queue_desc, + const struct prueth_queue_info *queue, + int *write_block, int *read_block) +{ + *write_block = (readw(&queue_desc->wr_ptr) - + queue->buffer_desc_offset) / BD_SIZE; + *read_block = (readw(&queue_desc->rd_ptr) - + queue->buffer_desc_offset) / BD_SIZE; +} + +/** + * icssm_emac_rx_irq - EMAC Rx interrupt handler + * @irq: interrupt number + * @dev_id: pointer to net_device + * + * EMAC Interrupt handler - we only schedule NAPI and not process any packets + * here. + * + * Return: IRQ_HANDLED if the interrupt handled + */ +static irqreturn_t icssm_emac_rx_irq(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct prueth_emac *emac = netdev_priv(ndev); + + if (likely(netif_running(ndev))) { + /* disable Rx system event */ + disable_irq_nosync(emac->rx_irq); + napi_schedule(&emac->napi); + } + + return IRQ_HANDLED; +} + +/** + * icssm_prueth_tx_enqueue - queue a packet to firmware for transmission + * + * @emac: EMAC data structure + * @skb: packet data buffer + * @queue_id: priority queue id + * + * Return: 0 (Success) + */ +static int icssm_prueth_tx_enqueue(struct prueth_emac *emac, + struct sk_buff *skb, + enum prueth_queue_id queue_id) +{ + struct prueth_queue_desc __iomem *queue_desc; + const struct prueth_queue_info *txqueue; + struct net_device *ndev = emac->ndev; + unsigned int buffer_desc_count; + int free_blocks, update_block; + bool buffer_wrapped = false; + int write_block, read_block; + void *src_addr, *dst_addr; + int pkt_block_size; + void __iomem *dram; + int txport, pktlen; + u16 update_wr_ptr; + u32 wr_buf_desc; + void *ocmc_ram; + + dram = emac->prueth->mem[emac->dram].va; + if (eth_skb_pad(skb)) { + if (netif_msg_tx_err(emac) && net_ratelimit()) + netdev_err(ndev, "packet pad failed\n"); + return -ENOMEM; + } + + /* which port to tx: MII0 or MII1 */ + txport = emac->tx_port_queue; + src_addr = skb->data; + pktlen = skb->len; + /* Get the tx queue */ + queue_desc = emac->tx_queue_descs + queue_id; + txqueue = &queue_infos[txport][queue_id]; + + buffer_desc_count = icssm_get_buff_desc_count(txqueue); + + /* the PRU firmware deals mostly in pointers already + * offset into ram, we would like to deal in indexes + * within the queue we are working with for code + * simplicity, calculate this here + */ + icssm_get_block(queue_desc, txqueue, &write_block, &read_block); + + if (write_block > read_block) { + free_blocks = buffer_desc_count - write_block; + free_blocks += read_block; + } else if (write_block < read_block) { + free_blocks = read_block - write_block; + } else { /* they are all free */ + free_blocks = buffer_desc_count; + } + + pkt_block_size = DIV_ROUND_UP(pktlen, ICSS_BLOCK_SIZE); + if (pkt_block_size > free_blocks) /* out of queue space */ + return -ENOBUFS; + + /* calculate end BD address post write */ + update_block = write_block + pkt_block_size; + + /* Check for wrap around */ + if (update_block >= buffer_desc_count) { + update_block %= buffer_desc_count; + buffer_wrapped = true; + } + + /* OCMC RAM is not cached and write order is not important */ + ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va; + dst_addr = ocmc_ram + txqueue->buffer_offset + + (write_block * ICSS_BLOCK_SIZE); + + /* Copy the data from socket buffer(DRAM) to PRU buffers(OCMC) */ + if (buffer_wrapped) { /* wrapped around buffer */ + int bytes = (buffer_desc_count - write_block) * ICSS_BLOCK_SIZE; + int remaining; + + /* bytes is integral multiple of ICSS_BLOCK_SIZE but + * entire packet may have fit within the last BD + * if pkt_info.length is not integral multiple of + * ICSS_BLOCK_SIZE + */ + if (pktlen < bytes) + bytes = pktlen; + + /* copy non-wrapped part */ + memcpy(dst_addr, src_addr, bytes); + + /* copy wrapped part */ + src_addr += bytes; + remaining = pktlen - bytes; + dst_addr = ocmc_ram + txqueue->buffer_offset; + memcpy(dst_addr, src_addr, remaining); + } else { + memcpy(dst_addr, src_addr, pktlen); + } + + /* update first buffer descriptor */ + wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) & + PRUETH_BD_LENGTH_MASK; + writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr)); + + /* update the write pointer in this queue descriptor, the firmware + * polls for this change so this will signal the start of transmission + */ + update_wr_ptr = txqueue->buffer_desc_offset + (update_block * BD_SIZE); + writew(update_wr_ptr, &queue_desc->wr_ptr); + + return 0; +} + +void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor, + struct prueth_packet_info *pkt_info) +{ + pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK); + pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >> + PRUETH_BD_PORT_SHIFT; + pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >> + PRUETH_BD_LENGTH_SHIFT; + pkt_info->broadcast = !!(buffer_descriptor & PRUETH_BD_BROADCAST_MASK); + pkt_info->error = !!(buffer_descriptor & PRUETH_BD_ERROR_MASK); + pkt_info->lookup_success = !!(buffer_descriptor & + PRUETH_BD_LOOKUP_SUCCESS_MASK); + pkt_info->flood = !!(buffer_descriptor & PRUETH_BD_SW_FLOOD_MASK); + pkt_info->timestamp = !!(buffer_descriptor & PRUETH_BD_TIMESTAMP_MASK); +} + +/** + * icssm_emac_rx_packet - EMAC Receive function + * + * @emac: EMAC data structure + * @bd_rd_ptr: Buffer descriptor read pointer + * @pkt_info: packet information structure + * @rxqueue: Receive queue information structure + * + * Get a packet from receive queue + * + * Return: 0 (Success) + */ +int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr, + struct prueth_packet_info *pkt_info, + const struct prueth_queue_info *rxqueue) +{ + struct net_device *ndev = emac->ndev; + unsigned int buffer_desc_count; + int read_block, update_block; + unsigned int actual_pkt_len; + bool buffer_wrapped = false; + void *src_addr, *dst_addr; + struct sk_buff *skb; + int pkt_block_size; + void *ocmc_ram; + + /* the PRU firmware deals mostly in pointers already + * offset into ram, we would like to deal in indexes + * within the queue we are working with for code + * simplicity, calculate this here + */ + buffer_desc_count = icssm_get_buff_desc_count(rxqueue); + read_block = (*bd_rd_ptr - rxqueue->buffer_desc_offset) / BD_SIZE; + pkt_block_size = DIV_ROUND_UP(pkt_info->length, ICSS_BLOCK_SIZE); + + /* calculate end BD address post read */ + update_block = read_block + pkt_block_size; + + /* Check for wrap around */ + if (update_block >= buffer_desc_count) { + update_block %= buffer_desc_count; + if (update_block) + buffer_wrapped = true; + } + + /* calculate new pointer in ram */ + *bd_rd_ptr = rxqueue->buffer_desc_offset + (update_block * BD_SIZE); + + actual_pkt_len = pkt_info->length; + + /* Allocate a socket buffer for this packet */ + skb = netdev_alloc_skb_ip_align(ndev, actual_pkt_len); + if (!skb) { + if (netif_msg_rx_err(emac) && net_ratelimit()) + netdev_err(ndev, "failed rx buffer alloc\n"); + return -ENOMEM; + } + + dst_addr = skb->data; + + /* OCMC RAM is not cached and read order is not important */ + ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va; + + /* Get the start address of the first buffer from + * the read buffer description + */ + src_addr = ocmc_ram + rxqueue->buffer_offset + + (read_block * ICSS_BLOCK_SIZE); + + /* Copy the data from PRU buffers(OCMC) to socket buffer(DRAM) */ + if (buffer_wrapped) { /* wrapped around buffer */ + int bytes = (buffer_desc_count - read_block) * ICSS_BLOCK_SIZE; + int remaining; + /* bytes is integral multiple of ICSS_BLOCK_SIZE but + * entire packet may have fit within the last BD + * if pkt_info.length is not integral multiple of + * ICSS_BLOCK_SIZE + */ + if (pkt_info->length < bytes) + bytes = pkt_info->length; + + /* copy non-wrapped part */ + memcpy(dst_addr, src_addr, bytes); + + /* copy wrapped part */ + dst_addr += bytes; + remaining = actual_pkt_len - bytes; + + src_addr = ocmc_ram + rxqueue->buffer_offset; + memcpy(dst_addr, src_addr, remaining); + src_addr += remaining; + } else { + memcpy(dst_addr, src_addr, actual_pkt_len); + src_addr += actual_pkt_len; + } + + skb_put(skb, actual_pkt_len); + + /* send packet up the stack */ + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + + /* update stats */ + emac->stats.rx_bytes += actual_pkt_len; + emac->stats.rx_packets++; + + return 0; +} + +static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget) +{ + struct prueth_queue_desc __iomem *queue_desc; + const struct prueth_queue_info *rxqueue; + struct prueth *prueth = emac->prueth; + struct prueth_packet_info pkt_info; + int start_queue, end_queue; + void __iomem *shared_ram; + u16 bd_rd_ptr, bd_wr_ptr; + u16 update_rd_ptr; + u8 overflow_cnt; + u32 rd_buf_desc; + int used = 0; + int i, ret; + + shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va; + + start_queue = emac->rx_queue_start; + end_queue = emac->rx_queue_end; + + /* skip Rx if budget is 0 */ + if (!budget) + return 0; + + /* search host queues for packets */ + for (i = start_queue; i <= end_queue; i++) { + queue_desc = emac->rx_queue_descs + i; + rxqueue = &queue_infos[PRUETH_PORT_HOST][i]; + + overflow_cnt = readb(&queue_desc->overflow_cnt); + if (overflow_cnt > 0) { + emac->stats.rx_over_errors += overflow_cnt; + /* reset to zero */ + writeb(0, &queue_desc->overflow_cnt); + } + + bd_rd_ptr = readw(&queue_desc->rd_ptr); + bd_wr_ptr = readw(&queue_desc->wr_ptr); + + /* while packets are available in this queue */ + while (bd_rd_ptr != bd_wr_ptr) { + /* get packet info from the read buffer descriptor */ + rd_buf_desc = readl(shared_ram + bd_rd_ptr); + icssm_parse_packet_info(prueth, rd_buf_desc, &pkt_info); + + if (pkt_info.length <= 0) { + /* a packet length of zero will cause us to + * never move the read pointer ahead, locking + * the driver, so we manually have to move it + * to the write pointer, discarding all + * remaining packets in this queue. This should + * never happen. + */ + update_rd_ptr = bd_wr_ptr; + emac->stats.rx_length_errors++; + } else if (pkt_info.length > EMAC_MAX_FRM_SUPPORT) { + /* if the packet is too large we skip it but we + * still need to move the read pointer ahead + * and assume something is wrong with the read + * pointer as the firmware should be filtering + * these packets + */ + update_rd_ptr = bd_wr_ptr; + emac->stats.rx_length_errors++; + } else { + update_rd_ptr = bd_rd_ptr; + ret = icssm_emac_rx_packet(emac, &update_rd_ptr, + &pkt_info, rxqueue); + if (ret) + return used; + used++; + } + + /* after reading the buffer descriptor we clear it + * to prevent improperly moved read pointer errors + * from simply looking like old packets. + */ + writel(0, shared_ram + bd_rd_ptr); + + /* update read pointer in queue descriptor */ + writew(update_rd_ptr, &queue_desc->rd_ptr); + bd_rd_ptr = update_rd_ptr; + + /* all we have room for? */ + if (used >= budget) + return used; + } + } + + return used; +} + +static int icssm_emac_napi_poll(struct napi_struct *napi, int budget) +{ + struct prueth_emac *emac = container_of(napi, struct prueth_emac, napi); + int num_rx; + + num_rx = icssm_emac_rx_packets(emac, budget); + + if (num_rx < budget && napi_complete_done(napi, num_rx)) + enable_irq(emac->rx_irq); + + return num_rx; +} + +static int icssm_emac_set_boot_pru(struct prueth_emac *emac, + struct net_device *ndev) +{ + const struct prueth_firmware *pru_firmwares; + struct prueth *prueth = emac->prueth; + const char *fw_name; + int ret; + + pru_firmwares = &prueth->fw_data->fw_pru[emac->port_id - 1]; + fw_name = pru_firmwares->fw_name[prueth->eth_type]; + if (!fw_name) { + netdev_err(ndev, "eth_type %d not supported\n", + prueth->eth_type); + return -ENODEV; + } + + ret = rproc_set_firmware(emac->pru, fw_name); + if (ret) { + netdev_err(ndev, "failed to set %s firmware: %d\n", + fw_name, ret); + return ret; + } + + ret = rproc_boot(emac->pru); + if (ret) { + netdev_err(ndev, "failed to boot %s firmware: %d\n", + fw_name, ret); + return ret; + } + return ret; +} + +static int icssm_emac_request_irqs(struct prueth_emac *emac) +{ + struct net_device *ndev = emac->ndev; + int ret; + + ret = request_irq(emac->rx_irq, icssm_emac_rx_irq, + IRQF_TRIGGER_HIGH, + ndev->name, ndev); + if (ret) { + netdev_err(ndev, "unable to request RX IRQ\n"); + return ret; + } + + return ret; +} + +static void icssm_ptp_dram_init(struct prueth_emac *emac) +{ + void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va; + u64 temp64; + + writew(0, sram + MII_RX_CORRECTION_OFFSET); + writew(0, sram + MII_TX_CORRECTION_OFFSET); + + /* Initialize RCF to 1 (Linux N/A) */ + writel(1 * 1024, sram + TIMESYNC_TC_RCF_OFFSET); + + /* This flag will be set and cleared by firmware */ + /* Write Sync0 period for sync signal generation in PTP + * memory in shared RAM + */ + writel(200000000 / 50, sram + TIMESYNC_SYNC0_WIDTH_OFFSET); + + /* Write CMP1 period for sync signal generation in PTP + * memory in shared RAM + */ + temp64 = 1000000; + memcpy_toio(sram + TIMESYNC_CMP1_CMP_OFFSET, &temp64, sizeof(temp64)); + + /* Write Sync0 period for sync signal generation in PTP + * memory in shared RAM + */ + writel(1000000, sram + TIMESYNC_CMP1_PERIOD_OFFSET); + + /* Configures domainNumber list. Firmware supports 2 domains */ + writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST); + writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST + 1); + + /* Configure 1-step/2-step */ + writeb(1, sram + DISABLE_SWITCH_SYNC_RELAY_OFFSET); + + /* Configures the setting to Link local frame without HSR tag */ + writeb(0, sram + LINK_LOCAL_FRAME_HAS_HSR_TAG); + + /* Enable E2E/UDP PTP message timestamping */ + writeb(1, sram + PTP_IPV4_UDP_E2E_ENABLE); +} + +/** + * icssm_emac_ndo_open - EMAC device open + * @ndev: network adapter device + * + * Called when system wants to start the interface. + * + * Return: 0 for a successful open, or appropriate error code + */ +static int icssm_emac_ndo_open(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + int ret; + + /* set h/w MAC as user might have re-configured */ + ether_addr_copy(emac->mac_addr, ndev->dev_addr); + + if (!prueth->emac_configured) + icssm_prueth_init_ethernet_mode(prueth); + + icssm_prueth_emac_config(emac); + + if (!prueth->emac_configured) { + icssm_ptp_dram_init(emac); + ret = icss_iep_init(prueth->iep, NULL, NULL, 0); + if (ret) { + netdev_err(ndev, "Failed to initialize iep: %d\n", ret); + goto iep_exit; + } + } + + ret = icssm_emac_set_boot_pru(emac, ndev); + if (ret) + goto iep_exit; + + ret = icssm_emac_request_irqs(emac); + if (ret) + goto rproc_shutdown; + + napi_enable(&emac->napi); + + /* start PHY */ + phy_start(emac->phydev); + + /* enable the port and vlan */ + icssm_prueth_port_enable(emac, true); + + prueth->emac_configured |= BIT(emac->port_id); + + if (netif_msg_drv(emac)) + dev_notice(&ndev->dev, "started\n"); + + return 0; + +rproc_shutdown: + rproc_shutdown(emac->pru); + +iep_exit: + if (!prueth->emac_configured) + icss_iep_exit(prueth->iep); + + return ret; +} + +/** + * icssm_emac_ndo_stop - EMAC device stop + * @ndev: network adapter device + * + * Called when system wants to stop or down the interface. + * + * Return: Always 0 (Success) + */ +static int icssm_emac_ndo_stop(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + + prueth->emac_configured &= ~BIT(emac->port_id); + + /* disable the mac port */ + icssm_prueth_port_enable(emac, false); + + /* stop PHY */ + phy_stop(emac->phydev); + + napi_disable(&emac->napi); + hrtimer_cancel(&emac->tx_hrtimer); + + /* stop the PRU */ + rproc_shutdown(emac->pru); + + /* free rx interrupts */ + free_irq(emac->rx_irq, ndev); + + if (netif_msg_drv(emac)) + dev_notice(&ndev->dev, "stopped\n"); + + return 0; +} + +/* VLAN-tag PCP to priority queue map for EMAC/Switch/HSR/PRP used by driver + * Index is PCP val / 2. + * low - pcp 0..3 maps to Q4 for Host + * high - pcp 4..7 maps to Q3 for Host + * low - pcp 0..3 maps to Q2 (FWD Queue) for PRU-x + * where x = 1 for PRUETH_PORT_MII0 + * 0 for PRUETH_PORT_MII1 + * high - pcp 4..7 maps to Q1 (FWD Queue) for PRU-x + */ +static const unsigned short emac_pcp_tx_priority_queue_map[] = { + PRUETH_QUEUE4, PRUETH_QUEUE4, + PRUETH_QUEUE3, PRUETH_QUEUE3, + PRUETH_QUEUE2, PRUETH_QUEUE2, + PRUETH_QUEUE1, PRUETH_QUEUE1, +}; + +static u16 icssm_prueth_get_tx_queue_id(struct prueth *prueth, + struct sk_buff *skb) +{ + u16 vlan_tci, pcp; + int err; + + err = vlan_get_tag(skb, &vlan_tci); + if (likely(err)) + pcp = 0; + else + pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + /* Below code (pcp >>= 1) is made common for all + * protocols (i.e., EMAC, RSTP, HSR and PRP)* + * pcp value 0,1 will be updated to 0 mapped to QUEUE4 + * pcp value 2,3 will be updated to 1 mapped to QUEUE4 + * pcp value 4,5 will be updated to 2 mapped to QUEUE3 + * pcp value 6,7 will be updated to 3 mapped to QUEUE3 + */ + pcp >>= 1; + + return emac_pcp_tx_priority_queue_map[pcp]; +} + +/** + * icssm_emac_ndo_start_xmit - EMAC Transmit function + * @skb: SKB pointer + * @ndev: EMAC network adapter + * + * Called by the system to transmit a packet - we queue the packet in + * EMAC hardware transmit queue + * + * Return: enum netdev_tx + */ +static enum netdev_tx icssm_emac_ndo_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret; + u16 qid; + + qid = icssm_prueth_get_tx_queue_id(emac->prueth, skb); + ret = icssm_prueth_tx_enqueue(emac, skb, qid); + if (ret) { + if (ret != -ENOBUFS && netif_msg_tx_err(emac) && + net_ratelimit()) + netdev_err(ndev, "packet queue failed: %d\n", ret); + goto fail_tx; + } + + emac->stats.tx_packets++; + emac->stats.tx_bytes += skb->len; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; + +fail_tx: + if (ret == -ENOBUFS) { + netif_stop_queue(ndev); + hrtimer_start(&emac->tx_hrtimer, + us_to_ktime(HR_TIMER_TX_DELAY_US), + HRTIMER_MODE_REL_PINNED); + ret = NETDEV_TX_BUSY; + } else { + /* error */ + emac->stats.tx_dropped++; + ret = NET_XMIT_DROP; + } + + return ret; +} + +/** + * icssm_emac_ndo_get_stats64 - EMAC get statistics function + * @ndev: The EMAC network adapter + * @stats: rtnl_link_stats structure + * + * Called when system wants to get statistics from the device. + * + */ +static void icssm_emac_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + stats->rx_packets = emac->stats.rx_packets; + stats->rx_bytes = emac->stats.rx_bytes; + stats->tx_packets = emac->stats.tx_packets; + stats->tx_bytes = emac->stats.tx_bytes; + stats->tx_dropped = emac->stats.tx_dropped; + stats->rx_over_errors = emac->stats.rx_over_errors; + stats->rx_length_errors = emac->stats.rx_length_errors; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = icssm_emac_ndo_open, + .ndo_stop = icssm_emac_ndo_stop, + .ndo_start_xmit = icssm_emac_ndo_start_xmit, + .ndo_get_stats64 = icssm_emac_ndo_get_stats64, +}; + +/* get emac_port corresponding to eth_node name */ +static int icssm_prueth_node_port(struct device_node *eth_node) +{ + u32 port_id; + int ret; + + ret = of_property_read_u32(eth_node, "reg", &port_id); + if (ret) + return ret; + + if (port_id == 0) + return PRUETH_PORT_MII0; + else if (port_id == 1) + return PRUETH_PORT_MII1; + else + return PRUETH_PORT_INVALID; +} + +/* get MAC instance corresponding to eth_node name */ +static int icssm_prueth_node_mac(struct device_node *eth_node) +{ + u32 port_id; + int ret; + + ret = of_property_read_u32(eth_node, "reg", &port_id); + if (ret) + return ret; + + if (port_id == 0) + return PRUETH_MAC0; + else if (port_id == 1) + return PRUETH_MAC1; + else + return PRUETH_MAC_INVALID; +} + +static enum hrtimer_restart icssm_emac_tx_timer_callback(struct hrtimer *timer) +{ + struct prueth_emac *emac = + container_of(timer, struct prueth_emac, tx_hrtimer); + + if (netif_queue_stopped(emac->ndev)) + netif_wake_queue(emac->ndev); + + return HRTIMER_NORESTART; +} + +static int icssm_prueth_netdev_init(struct prueth *prueth, + struct device_node *eth_node) +{ + struct prueth_emac *emac; + struct net_device *ndev; + enum prueth_port port; + enum prueth_mac mac; + int ret; + + port = icssm_prueth_node_port(eth_node); + if (port == PRUETH_PORT_INVALID) + return -EINVAL; + + mac = icssm_prueth_node_mac(eth_node); + if (mac == PRUETH_MAC_INVALID) + return -EINVAL; + + ndev = devm_alloc_etherdev(prueth->dev, sizeof(*emac)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, prueth->dev); + emac = netdev_priv(ndev); + prueth->emac[mac] = emac; + emac->prueth = prueth; + emac->ndev = ndev; + emac->port_id = port; + + /* by default eth_type is EMAC */ + switch (port) { + case PRUETH_PORT_MII0: + emac->tx_port_queue = PRUETH_PORT_QUEUE_MII0; + + /* packets from MII0 are on queues 1 through 2 */ + emac->rx_queue_start = PRUETH_QUEUE1; + emac->rx_queue_end = PRUETH_QUEUE2; + + emac->dram = PRUETH_MEM_DRAM0; + emac->pru = prueth->pru0; + break; + case PRUETH_PORT_MII1: + emac->tx_port_queue = PRUETH_PORT_QUEUE_MII1; + + /* packets from MII1 are on queues 3 through 4 */ + emac->rx_queue_start = PRUETH_QUEUE3; + emac->rx_queue_end = PRUETH_QUEUE4; + + emac->dram = PRUETH_MEM_DRAM1; + emac->pru = prueth->pru1; + break; + default: + return -EINVAL; + } + + emac->rx_irq = of_irq_get_byname(eth_node, "rx"); + if (emac->rx_irq < 0) { + ret = emac->rx_irq; + if (ret != -EPROBE_DEFER) + dev_err(prueth->dev, "could not get rx irq\n"); + goto free; + } + + /* get mac address from DT and set private and netdev addr */ + ret = of_get_ethdev_address(eth_node, ndev); + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", + port, ndev->dev_addr); + } + ether_addr_copy(emac->mac_addr, ndev->dev_addr); + + /* connect PHY */ + emac->phydev = of_phy_get_and_connect(ndev, eth_node, + icssm_emac_adjust_link); + if (!emac->phydev) { + dev_dbg(prueth->dev, "PHY connection failed\n"); + ret = -ENODEV; + goto free; + } + + /* remove unsupported modes */ + phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + + phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + + phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + + ndev->dev.of_node = eth_node; + ndev->netdev_ops = &emac_netdev_ops; + + netif_napi_add(ndev, &emac->napi, icssm_emac_napi_poll); + + hrtimer_setup(&emac->tx_hrtimer, &icssm_emac_tx_timer_callback, + CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + + return 0; +free: + emac->ndev = NULL; + prueth->emac[mac] = NULL; + + return ret; +} + +static void icssm_prueth_netdev_exit(struct prueth *prueth, + struct device_node *eth_node) +{ + struct prueth_emac *emac; + enum prueth_mac mac; + + mac = icssm_prueth_node_mac(eth_node); + if (mac == PRUETH_MAC_INVALID) + return; + + emac = prueth->emac[mac]; + if (!emac) + return; + + phy_disconnect(emac->phydev); + + netif_napi_del(&emac->napi); + prueth->emac[mac] = NULL; +} + +static int icssm_prueth_probe(struct platform_device *pdev) +{ + struct device_node *eth0_node = NULL, *eth1_node = NULL; + struct device_node *eth_node, *eth_ports_node; + enum pruss_pru_id pruss_id0, pruss_id1; + struct device *dev = &pdev->dev; + struct device_node *np; + struct prueth *prueth; + struct pruss *pruss; + int i, ret; + + np = dev->of_node; + if (!np) + return -ENODEV; /* we don't support non DT */ + + prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); + if (!prueth) + return -ENOMEM; + + platform_set_drvdata(pdev, prueth); + prueth->dev = dev; + prueth->fw_data = device_get_match_data(dev); + + eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); + if (!eth_ports_node) + return -ENOENT; + + for_each_child_of_node(eth_ports_node, eth_node) { + u32 reg; + + if (strcmp(eth_node->name, "ethernet-port")) + continue; + ret = of_property_read_u32(eth_node, "reg", ®); + if (ret < 0) { + dev_err(dev, "%pOF error reading port_id %d\n", + eth_node, ret); + of_node_put(eth_node); + return ret; + } + + of_node_get(eth_node); + + if (reg == 0 && !eth0_node) { + eth0_node = eth_node; + if (!of_device_is_available(eth0_node)) { + of_node_put(eth0_node); + eth0_node = NULL; + } + } else if (reg == 1 && !eth1_node) { + eth1_node = eth_node; + if (!of_device_is_available(eth1_node)) { + of_node_put(eth1_node); + eth1_node = NULL; + } + } else { + if (reg == 0 || reg == 1) + dev_err(dev, "duplicate port reg value: %d\n", + reg); + else + dev_err(dev, "invalid port reg value: %d\n", + reg); + + of_node_put(eth_node); + } + } + + of_node_put(eth_ports_node); + + /* At least one node must be present and available else we fail */ + if (!eth0_node && !eth1_node) { + dev_err(dev, "neither port0 nor port1 node available\n"); + return -ENODEV; + } + + prueth->eth_node[PRUETH_MAC0] = eth0_node; + prueth->eth_node[PRUETH_MAC1] = eth1_node; + + prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); + if (IS_ERR(prueth->mii_rt)) { + dev_err(dev, "couldn't get mii-rt syscon regmap\n"); + ret = PTR_ERR(prueth->mii_rt); + goto put_eth; + } + + if (eth0_node) { + prueth->pru0 = pru_rproc_get(np, 0, &pruss_id0); + if (IS_ERR(prueth->pru0)) { + ret = PTR_ERR(prueth->pru0); + dev_err_probe(dev, ret, "unable to get PRU0"); + goto put_eth; + } + } + + if (eth1_node) { + prueth->pru1 = pru_rproc_get(np, 1, &pruss_id1); + if (IS_ERR(prueth->pru1)) { + ret = PTR_ERR(prueth->pru1); + dev_err_probe(dev, ret, "unable to get PRU1"); + goto put_pru0; + } + } + + pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1); + if (IS_ERR(pruss)) { + ret = PTR_ERR(pruss); + dev_err(dev, "unable to get pruss handle\n"); + goto put_pru1; + } + prueth->pruss = pruss; + + /* Configure PRUSS */ + if (eth0_node) + pruss_cfg_gpimode(pruss, pruss_id0, PRUSS_GPI_MODE_MII); + if (eth1_node) + pruss_cfg_gpimode(pruss, pruss_id1, PRUSS_GPI_MODE_MII); + pruss_cfg_miirt_enable(pruss, true); + pruss_cfg_xfr_enable(pruss, PRU_TYPE_PRU, true); + + /* Get PRUSS mem resources */ + /* OCMC is system resource which we get separately */ + for (i = 0; i < ARRAY_SIZE(pruss_mem_ids); i++) { + /* skip appropriate DRAM if not required */ + if (!eth0_node && i == PRUETH_MEM_DRAM0) + continue; + + if (!eth1_node && i == PRUETH_MEM_DRAM1) + continue; + + ret = pruss_request_mem_region(pruss, pruss_mem_ids[i], + &prueth->mem[i]); + if (ret) { + dev_err(dev, "unable to get PRUSS resource %d: %d\n", + i, ret); + goto put_mem; + } + } + + prueth->sram_pool = of_gen_pool_get(np, "sram", 0); + if (!prueth->sram_pool) { + dev_err(dev, "unable to get SRAM pool\n"); + ret = -ENODEV; + goto put_mem; + } + + prueth->ocmc_ram_size = OCMC_RAM_SIZE; + /* Decreased by 8KB to address the reserved region for AM33x */ + if (prueth->fw_data->driver_data == PRUSS_AM33XX) + prueth->ocmc_ram_size = (SZ_64K - SZ_8K); + + prueth->mem[PRUETH_MEM_OCMC].va = + (void __iomem *)gen_pool_alloc(prueth->sram_pool, + prueth->ocmc_ram_size); + if (!prueth->mem[PRUETH_MEM_OCMC].va) { + dev_err(dev, "unable to allocate OCMC resource\n"); + ret = -ENOMEM; + goto put_mem; + } + prueth->mem[PRUETH_MEM_OCMC].pa = gen_pool_virt_to_phys + (prueth->sram_pool, (unsigned long) + prueth->mem[PRUETH_MEM_OCMC].va); + prueth->mem[PRUETH_MEM_OCMC].size = prueth->ocmc_ram_size; + dev_dbg(dev, "ocmc: pa %pa va %p size %#zx\n", + &prueth->mem[PRUETH_MEM_OCMC].pa, + prueth->mem[PRUETH_MEM_OCMC].va, + prueth->mem[PRUETH_MEM_OCMC].size); + + /* setup netdev interfaces */ + if (eth0_node) { + ret = icssm_prueth_netdev_init(prueth, eth0_node); + if (ret) { + if (ret != -EPROBE_DEFER) { + dev_err(dev, "netdev init %s failed: %d\n", + eth0_node->name, ret); + } + goto free_pool; + } + } + + if (eth1_node) { + ret = icssm_prueth_netdev_init(prueth, eth1_node); + if (ret) { + if (ret != -EPROBE_DEFER) { + dev_err(dev, "netdev init %s failed: %d\n", + eth1_node->name, ret); + } + goto netdev_exit; + } + } + + prueth->iep = icss_iep_get(np); + if (IS_ERR(prueth->iep)) { + ret = PTR_ERR(prueth->iep); + dev_err(dev, "unable to get IEP\n"); + goto netdev_exit; + } + + /* register the network devices */ + if (eth0_node) { + ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); + if (ret) { + dev_err(dev, "can't register netdev for port MII0"); + goto iep_put; + } + + prueth->registered_netdevs[PRUETH_MAC0] = + prueth->emac[PRUETH_MAC0]->ndev; + } + + if (eth1_node) { + ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); + if (ret) { + dev_err(dev, "can't register netdev for port MII1"); + goto netdev_unregister; + } + + prueth->registered_netdevs[PRUETH_MAC1] = + prueth->emac[PRUETH_MAC1]->ndev; + } + + dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", + (!eth0_node || !eth1_node) ? "single" : "dual"); + + if (eth1_node) + of_node_put(eth1_node); + if (eth0_node) + of_node_put(eth0_node); + return 0; + +netdev_unregister: + for (i = 0; i < PRUETH_NUM_MACS; i++) { + if (!prueth->registered_netdevs[i]) + continue; + unregister_netdev(prueth->registered_netdevs[i]); + } + +iep_put: + icss_iep_put(prueth->iep); + prueth->iep = NULL; + +netdev_exit: + for (i = 0; i < PRUETH_NUM_MACS; i++) { + eth_node = prueth->eth_node[i]; + if (!eth_node) + continue; + + icssm_prueth_netdev_exit(prueth, eth_node); + } + +free_pool: + gen_pool_free(prueth->sram_pool, + (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va, + prueth->ocmc_ram_size); + +put_mem: + for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) { + if (prueth->mem[i].va) + pruss_release_mem_region(pruss, &prueth->mem[i]); + } + pruss_put(prueth->pruss); + +put_pru1: + if (eth1_node) + pru_rproc_put(prueth->pru1); +put_pru0: + if (eth0_node) + pru_rproc_put(prueth->pru0); +put_eth: + of_node_put(eth1_node); + of_node_put(eth0_node); + + return ret; +} + +static void icssm_prueth_remove(struct platform_device *pdev) +{ + struct prueth *prueth = platform_get_drvdata(pdev); + struct device_node *eth_node; + int i; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + if (!prueth->registered_netdevs[i]) + continue; + unregister_netdev(prueth->registered_netdevs[i]); + } + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + eth_node = prueth->eth_node[i]; + if (!eth_node) + continue; + + icssm_prueth_netdev_exit(prueth, eth_node); + of_node_put(eth_node); + } + + gen_pool_free(prueth->sram_pool, + (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va, + prueth->ocmc_ram_size); + + for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) { + if (prueth->mem[i].va) + pruss_release_mem_region(prueth->pruss, + &prueth->mem[i]); + } + + icss_iep_put(prueth->iep); + prueth->iep = NULL; + + pruss_put(prueth->pruss); + + if (prueth->eth_node[PRUETH_MAC0]) + pru_rproc_put(prueth->pru0); + if (prueth->eth_node[PRUETH_MAC1]) + pru_rproc_put(prueth->pru1); +} + +#ifdef CONFIG_PM_SLEEP +static int icssm_prueth_suspend(struct device *dev) +{ + struct prueth *prueth = dev_get_drvdata(dev); + struct net_device *ndev; + int i, ret; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + ndev = prueth->registered_netdevs[i]; + + if (!ndev) + continue; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ret = icssm_emac_ndo_stop(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to stop: %d", ret); + return ret; + } + } + } + + return 0; +} + +static int icssm_prueth_resume(struct device *dev) +{ + struct prueth *prueth = dev_get_drvdata(dev); + struct net_device *ndev; + int i, ret; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + ndev = prueth->registered_netdevs[i]; + + if (!ndev) + continue; + + if (netif_running(ndev)) { + ret = icssm_emac_ndo_open(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to start: %d", ret); + return ret; + } + netif_device_attach(ndev); + } + } + + return 0; +} + +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops prueth_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(icssm_prueth_suspend, icssm_prueth_resume) +}; + +/* AM335x SoC-specific firmware data */ +static struct prueth_private_data am335x_prueth_pdata = { + .driver_data = PRUSS_AM33XX, + .fw_pru[PRUSS_PRU0] = { + .fw_name[PRUSS_ETHTYPE_EMAC] = + "ti-pruss/am335x-pru0-prueth-fw.elf", + }, + .fw_pru[PRUSS_PRU1] = { + .fw_name[PRUSS_ETHTYPE_EMAC] = + "ti-pruss/am335x-pru1-prueth-fw.elf", + }, +}; + +/* AM437x SoC-specific firmware data */ +static struct prueth_private_data am437x_prueth_pdata = { + .driver_data = PRUSS_AM43XX, + .fw_pru[PRUSS_PRU0] = { + .fw_name[PRUSS_ETHTYPE_EMAC] = + "ti-pruss/am437x-pru0-prueth-fw.elf", + }, + .fw_pru[PRUSS_PRU1] = { + .fw_name[PRUSS_ETHTYPE_EMAC] = + "ti-pruss/am437x-pru1-prueth-fw.elf", + }, +}; + +/* AM57xx SoC-specific firmware data */ +static struct prueth_private_data am57xx_prueth_pdata = { + .driver_data = PRUSS_AM57XX, + .fw_pru[PRUSS_PRU0] = { + .fw_name[PRUSS_ETHTYPE_EMAC] = + "ti-pruss/am57xx-pru0-prueth-fw.elf", + }, + .fw_pru[PRUSS_PRU1] = { + .fw_name[PRUSS_ETHTYPE_EMAC] = + "ti-pruss/am57xx-pru1-prueth-fw.elf", + }, +}; + +static const struct of_device_id prueth_dt_match[] = { + { .compatible = "ti,am57-prueth", .data = &am57xx_prueth_pdata, }, + { .compatible = "ti,am4376-prueth", .data = &am437x_prueth_pdata, }, + { .compatible = "ti,am3359-prueth", .data = &am335x_prueth_pdata, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, prueth_dt_match); + +static struct platform_driver prueth_driver = { + .probe = icssm_prueth_probe, + .remove = icssm_prueth_remove, + .driver = { + .name = "prueth", + .of_match_table = prueth_dt_match, + .pm = &prueth_dev_pm_ops, + }, +}; +module_platform_driver(prueth_driver); + +MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>"); +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); +MODULE_DESCRIPTION("PRUSS ICSSM Ethernet Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.h b/drivers/net/ethernet/ti/icssm/icssm_prueth.h new file mode 100644 index 000000000000..8e7e0af08144 --- /dev/null +++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSM Ethernet driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_PRUETH_H +#define __NET_TI_PRUETH_H + +#include <linux/phy.h> +#include <linux/types.h> +#include <linux/pruss_driver.h> +#include <linux/remoteproc/pruss.h> + +#include "icssm_switch.h" +#include "icssm_prueth_ptp.h" + +/* ICSSM size of redundancy tag */ +#define ICSSM_LRE_TAG_SIZE 6 + +/* PRUSS local memory map */ +#define ICSS_LOCAL_SHARED_RAM 0x00010000 +#define EMAC_MAX_PKTLEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +/* Below macro is for 1528 Byte Frame support, to Allow even with + * Redundancy tag + */ +#define EMAC_MAX_FRM_SUPPORT (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN + \ + ICSSM_LRE_TAG_SIZE) + +/* PRU Ethernet Type - Ethernet functionality (protocol + * implemented) provided by the PRU firmware being loaded. + */ +enum pruss_ethtype { + PRUSS_ETHTYPE_EMAC = 0, + PRUSS_ETHTYPE_HSR, + PRUSS_ETHTYPE_PRP, + PRUSS_ETHTYPE_SWITCH, + PRUSS_ETHTYPE_MAX, +}; + +#define PRUETH_IS_EMAC(p) ((p)->eth_type == PRUSS_ETHTYPE_EMAC) +#define PRUETH_IS_SWITCH(p) ((p)->eth_type == PRUSS_ETHTYPE_SWITCH) + +/** + * struct prueth_queue_desc - Queue descriptor + * @rd_ptr: Read pointer, points to a buffer descriptor in Shared PRU RAM. + * @wr_ptr: Write pointer, points to a buffer descriptor in Shared PRU RAM. + * @busy_s: Slave queue busy flag, set by slave(us) to request access from + * master(PRU). + * @status: Bit field status register, Bits: + * 0: Master queue busy flag. + * 1: Packet has been placed in collision queue. + * 2: Packet has been discarded due to overflow. + * @max_fill_level: Maximum queue usage seen. + * @overflow_cnt: Count of queue overflows. + * + * Each port has up to 4 queues with variable length. The queue is processed + * as ring buffer with read and write pointers. Both pointers are address + * pointers and increment by 4 for each buffer descriptor position. Queue has + * a length defined in constants and a status. + */ +struct prueth_queue_desc { + u16 rd_ptr; + u16 wr_ptr; + u8 busy_s; + u8 status; + u8 max_fill_level; + u8 overflow_cnt; +}; + +/** + * struct prueth_queue_info - Information about a queue in memory + * @buffer_offset: buffer offset in OCMC RAM + * @queue_desc_offset: queue descriptor offset in Shared RAM + * @buffer_desc_offset: buffer descriptors offset in Shared RAM + * @buffer_desc_end: end address of buffer descriptors in Shared RAM + */ +struct prueth_queue_info { + u16 buffer_offset; + u16 queue_desc_offset; + u16 buffer_desc_offset; + u16 buffer_desc_end; +}; + +/** + * struct prueth_packet_info - Info about a packet in buffer + * @shadow: this packet is stored in the collision queue + * @port: port packet is on + * @length: length of packet + * @broadcast: this packet is a broadcast packet + * @error: this packet has an error + * @lookup_success: src mac found in FDB + * @flood: packet is to be flooded + * @timestamp: Specifies if timestamp is appended to the packet + */ +struct prueth_packet_info { + bool shadow; + unsigned int port; + unsigned int length; + bool broadcast; + bool error; + bool lookup_success; + bool flood; + bool timestamp; +}; + +/* In switch mode there are 3 real ports i.e. 3 mac addrs. + * however Linux sees only the host side port. The other 2 ports + * are the switch ports. + * In emac mode there are 2 real ports i.e. 2 mac addrs. + * Linux sees both the ports. + */ +enum prueth_port { + PRUETH_PORT_HOST = 0, /* host side port */ + PRUETH_PORT_MII0, /* physical port MII 0 */ + PRUETH_PORT_MII1, /* physical port MII 1 */ + PRUETH_PORT_INVALID, /* Invalid prueth port */ +}; + +enum prueth_mac { + PRUETH_MAC0 = 0, + PRUETH_MAC1, + PRUETH_NUM_MACS, + PRUETH_MAC_INVALID, +}; + +/* In both switch & emac modes there are 3 port queues + * EMAC mode: + * RX packets for both MII0 & MII1 ports come on + * QUEUE_HOST. + * TX packets for MII0 go on QUEUE_MII0, TX packets + * for MII1 go on QUEUE_MII1. + * Switch mode: + * Host port RX packets come on QUEUE_HOST + * TX packets might have to go on MII0 or MII1 or both. + * MII0 TX queue is QUEUE_MII0 and MII1 TX queue is + * QUEUE_MII1. + */ +enum prueth_port_queue_id { + PRUETH_PORT_QUEUE_HOST = 0, + PRUETH_PORT_QUEUE_MII0, + PRUETH_PORT_QUEUE_MII1, + PRUETH_PORT_QUEUE_MAX, +}; + +/* Each port queue has 4 queues and 1 collision queue */ +enum prueth_queue_id { + PRUETH_QUEUE1 = 0, + PRUETH_QUEUE2, + PRUETH_QUEUE3, + PRUETH_QUEUE4, + PRUETH_COLQUEUE, /* collision queue */ +}; + +/** + * struct prueth_firmware - PRU Ethernet FW data + * @fw_name: firmware names of firmware to run on PRU + */ +struct prueth_firmware { + const char *fw_name[PRUSS_ETHTYPE_MAX]; +}; + +/* PRUeth memory range identifiers */ +enum prueth_mem { + PRUETH_MEM_DRAM0 = 0, + PRUETH_MEM_DRAM1, + PRUETH_MEM_SHARED_RAM, + PRUETH_MEM_OCMC, + PRUETH_MEM_MAX, +}; + +enum pruss_device { + PRUSS_AM57XX = 0, + PRUSS_AM43XX, + PRUSS_AM33XX, + PRUSS_K2G +}; + +/** + * struct prueth_private_data - PRU Ethernet private data + * @driver_data: PRU Ethernet device name + * @fw_pru: firmware names to be used for PRUSS ethernet usecases + */ +struct prueth_private_data { + enum pruss_device driver_data; + const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS]; +}; + +struct prueth_emac_stats { + u64 tx_packets; + u64 tx_dropped; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 rx_length_errors; + u64 rx_over_errors; +}; + +/* data for each emac port */ +struct prueth_emac { + struct prueth *prueth; + struct net_device *ndev; + struct napi_struct napi; + + struct rproc *pru; + struct phy_device *phydev; + struct prueth_queue_desc __iomem *rx_queue_descs; + struct prueth_queue_desc __iomem *tx_queue_descs; + + int link; + int speed; + int duplex; + int rx_irq; + + enum prueth_port_queue_id tx_port_queue; + enum prueth_queue_id rx_queue_start; + enum prueth_queue_id rx_queue_end; + enum prueth_port port_id; + enum prueth_mem dram; + const char *phy_id; + u32 msg_enable; + u8 mac_addr[6]; + phy_interface_t phy_if; + + /* spin lock used to protect + * during link configuration + */ + spinlock_t lock; + + struct hrtimer tx_hrtimer; + struct prueth_emac_stats stats; +}; + +struct prueth { + struct device *dev; + struct pruss *pruss; + struct rproc *pru0, *pru1; + struct pruss_mem_region mem[PRUETH_MEM_MAX]; + struct gen_pool *sram_pool; + struct regmap *mii_rt; + struct icss_iep *iep; + + const struct prueth_private_data *fw_data; + struct prueth_fw_offsets *fw_offsets; + + struct device_node *eth_node[PRUETH_NUM_MACS]; + struct prueth_emac *emac[PRUETH_NUM_MACS]; + struct net_device *registered_netdevs[PRUETH_NUM_MACS]; + + unsigned int eth_type; + size_t ocmc_ram_size; + u8 emac_configured; +}; + +void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor, + struct prueth_packet_info *pkt_info); +int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr, + struct prueth_packet_info *pkt_info, + const struct prueth_queue_info *rxqueue); + +#endif /* __NET_TI_PRUETH_H */ diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h new file mode 100644 index 000000000000..e0bf692beda1 --- /dev/null +++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com + */ +#ifndef PRUETH_PTP_H +#define PRUETH_PTP_H + +#define RX_SYNC_TIMESTAMP_OFFSET_P1 0x8 /* 8 bytes */ +#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x14 /* 12 bytes */ + +#define DISABLE_PTP_FRAME_FORWARDING_CTRL_OFFSET 0x14 /* 1 byte */ + +#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x20 /* 12 bytes */ +#define RX_SYNC_TIMESTAMP_OFFSET_P2 0x2c /* 12 bytes */ +#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0x38 /* 12 bytes */ +#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0x44 /* 12 bytes */ +#define TIMESYNC_DOMAIN_NUMBER_LIST 0x50 /* 2 bytes */ +#define P1_SMA_LINE_DELAY_OFFSET 0x52 /* 4 bytes */ +#define P2_SMA_LINE_DELAY_OFFSET 0x56 /* 4 bytes */ +#define TIMESYNC_SECONDS_COUNT_OFFSET 0x5a /* 6 bytes */ +#define TIMESYNC_TC_RCF_OFFSET 0x60 /* 4 bytes */ +#define DUT_IS_MASTER_OFFSET 0x64 /* 1 byte */ +#define MASTER_PORT_NUM_OFFSET 0x65 /* 1 byte */ +#define SYNC_MASTER_MAC_OFFSET 0x66 /* 6 bytes */ +#define TX_TS_NOTIFICATION_OFFSET_SYNC_P1 0x6c /* 1 byte */ +#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P1 0x6d /* 1 byte */ +#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P1 0x6e /* 1 byte */ +#define TX_TS_NOTIFICATION_OFFSET_SYNC_P2 0x6f /* 1 byte */ +#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P2 0x70 /* 1 byte */ +#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P2 0x71 /* 1 byte */ +#define TX_SYNC_TIMESTAMP_OFFSET_P1 0x72 /* 12 bytes */ +#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x7e /* 12 bytes */ +#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x8a /* 12 bytes */ +#define TX_SYNC_TIMESTAMP_OFFSET_P2 0x96 /* 12 bytes */ +#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0xa2 /* 12 bytes */ +#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0xae /* 12 bytes */ +#define TIMESYNC_CTRL_VAR_OFFSET 0xba /* 1 byte */ +#define DISABLE_SWITCH_SYNC_RELAY_OFFSET 0xbb /* 1 byte */ +#define MII_RX_CORRECTION_OFFSET 0xbc /* 2 bytes */ +#define MII_TX_CORRECTION_OFFSET 0xbe /* 2 bytes */ +#define TIMESYNC_CMP1_CMP_OFFSET 0xc0 /* 8 bytes */ +#define TIMESYNC_SYNC0_CMP_OFFSET 0xc8 /* 8 bytes */ +#define TIMESYNC_CMP1_PERIOD_OFFSET 0xd0 /* 4 bytes */ +#define TIMESYNC_SYNC0_WIDTH_OFFSET 0xd4 /* 4 bytes */ +#define SINGLE_STEP_IEP_OFFSET_P1 0xd8 /* 8 bytes */ +#define SINGLE_STEP_SECONDS_OFFSET_P1 0xe0 /* 8 bytes */ +#define SINGLE_STEP_IEP_OFFSET_P2 0xe8 /* 8 bytes */ +#define SINGLE_STEP_SECONDS_OFFSET_P2 0xf0 /* 8 bytes */ +#define LINK_LOCAL_FRAME_HAS_HSR_TAG 0xf8 /* 1 bytes */ +#define PTP_PREV_TX_TIMESTAMP_P1 0xf9 /* 8 bytes */ +#define PTP_PREV_TX_TIMESTAMP_P2 0x101 /* 8 bytes */ +#define PTP_CLK_IDENTITY_OFFSET 0x109 /* 8 bytes */ +#define PTP_SCRATCH_MEM 0x111 /* 16 byte */ +#define PTP_IPV4_UDP_E2E_ENABLE 0x121 /* 1 byte */ + +enum { + PRUETH_PTP_SYNC, + PRUETH_PTP_DLY_REQ, + PRUETH_PTP_DLY_RESP, + PRUETH_PTP_TS_EVENTS, +}; + +#define PRUETH_PTP_TS_SIZE 12 +#define PRUETH_PTP_TS_NOTIFY_SIZE 1 +#define PRUETH_PTP_TS_NOTIFY_MASK 0xff + +/* Bit definitions for TIMESYNC_CTRL */ +#define TIMESYNC_CTRL_BG_ENABLE BIT(0) +#define TIMESYNC_CTRL_FORCED_2STEP BIT(1) + +static inline u32 icssm_prueth_tx_ts_offs_get(u8 port, u8 event) +{ + return TX_SYNC_TIMESTAMP_OFFSET_P1 + port * + PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_SIZE + + event * PRUETH_PTP_TS_SIZE; +} + +static inline u32 icssm_prueth_tx_ts_notify_offs_get(u8 port, u8 event) +{ + return TX_TS_NOTIFICATION_OFFSET_SYNC_P1 + + PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_NOTIFY_SIZE * port + + event * PRUETH_PTP_TS_NOTIFY_SIZE; +} + +#endif /* PRUETH_PTP_H */ diff --git a/drivers/net/ethernet/ti/icssm/icssm_switch.h b/drivers/net/ethernet/ti/icssm/icssm_switch.h new file mode 100644 index 000000000000..8b494ffdcde7 --- /dev/null +++ b/drivers/net/ethernet/ti/icssm/icssm_switch.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com + */ + +#ifndef __ICSS_SWITCH_H +#define __ICSS_SWITCH_H + +/* Basic Switch Parameters + * Used to auto compute offset addresses on L3 OCMC RAM. Do not modify these + * without changing firmware accordingly + */ +#define SWITCH_BUFFER_SIZE (64 * 1024) /* L3 buffer */ +#define ICSS_BLOCK_SIZE 32 /* data bytes per BD */ +#define BD_SIZE 4 /* byte buffer descriptor */ +#define NUM_QUEUES 4 /* Queues on Port 0/1/2 */ + +#define PORT_LINK_MASK 0x1 +#define PORT_IS_HD_MASK 0x2 + +/* Physical Port queue size (number of BDs). Same for both ports */ +#define QUEUE_1_SIZE 97 /* Network Management high */ +#define QUEUE_2_SIZE 97 /* Network Management low */ +#define QUEUE_3_SIZE 97 /* Protocol specific */ +#define QUEUE_4_SIZE 97 /* NRT (IP,ARP, ICMP) */ + +/* Host queue size (number of BDs). Each BD points to data buffer of 32 bytes. + * HOST PORT QUEUES can buffer up to 4 full sized frames per queue + */ +#define HOST_QUEUE_1_SIZE 194 /* Protocol and VLAN priority 7 & 6 */ +#define HOST_QUEUE_2_SIZE 194 /* Protocol mid */ +#define HOST_QUEUE_3_SIZE 194 /* Protocol low */ +#define HOST_QUEUE_4_SIZE 194 /* NRT (IP, ARP, ICMP) */ + +#define COL_QUEUE_SIZE 0 + +/* NRT Buffer descriptor definition + * Each buffer descriptor points to a max 32 byte block and has 32 bit in size + * to have atomic operation. + * PRU can address bytewise into memory. + * Definition of 32 bit descriptor is as follows + * + * Bits Name Meaning + * ============================================================================= + * 0..7 Index points to index in buffer queue, max 256 x 32 + * byte blocks can be addressed + * 6 LookupSuccess For switch, FDB lookup was successful (source + * MAC address found in FDB). + * For RED, NodeTable lookup was successful. + * 7 Flood Packet should be flooded (destination MAC + * address found in FDB). For switch only. + * 8..12 Block_length number of valid bytes in this specific block. + * Will be <=32 bytes on last block of packet + * 13 More "More" bit indicating that there are more blocks + * 14 Shadow indicates that "index" is pointing into shadow + * buffer + * 15 TimeStamp indicates that this packet has time stamp in + * separate buffer - only needed if PTP runs on + * host + * 16..17 Port different meaning for ingress and egress, + * Ingress: Port = 0 indicates phy port 1 and + * Port = 1 indicates phy port 2. + * Egress: 0 sends on phy port 1 and 1 sends on + * phy port 2. Port = 2 goes over MAC table + * look-up + * 18..28 Length 11 bit of total packet length which is put into + * first BD only so that host access only one BD + * 29 VlanTag indicates that packet has Length/Type field of + * 0x08100 with VLAN tag in following byte + * 30 Broadcast indicates that packet goes out on both physical + * ports, there will be two bd but only one buffer + * 31 Error indicates there was an error in the packet + */ +#define PRUETH_BD_START_FLAG_MASK BIT(0) +#define PRUETH_BD_START_FLAG_SHIFT 0 + +#define PRUETH_BD_HSR_FRAME_MASK BIT(4) +#define PRUETH_BD_HSR_FRAME_SHIFT 4 + +#define PRUETH_BD_SUP_HSR_FRAME_MASK BIT(5) +#define PRUETH_BD_SUP_HSR_FRAME_SHIFT 5 + +#define PRUETH_BD_LOOKUP_SUCCESS_MASK BIT(6) +#define PRUETH_BD_LOOKUP_SUCCESS_SHIFT 6 + +#define PRUETH_BD_SW_FLOOD_MASK BIT(7) +#define PRUETH_BD_SW_FLOOD_SHIFT 7 + +#define PRUETH_BD_SHADOW_MASK BIT(14) +#define PRUETH_BD_SHADOW_SHIFT 14 + +#define PRUETH_BD_TIMESTAMP_MASK BIT(15) +#define PRUETH_BD_TIMESTAMP_SHIFT 15 + +#define PRUETH_BD_PORT_MASK GENMASK(17, 16) +#define PRUETH_BD_PORT_SHIFT 16 + +#define PRUETH_BD_LENGTH_MASK GENMASK(28, 18) +#define PRUETH_BD_LENGTH_SHIFT 18 + +#define PRUETH_BD_BROADCAST_MASK BIT(30) +#define PRUETH_BD_BROADCAST_SHIFT 30 + +#define PRUETH_BD_ERROR_MASK BIT(31) +#define PRUETH_BD_ERROR_SHIFT 31 + +/* The following offsets indicate which sections of the memory are used + * for EMAC internal tasks + */ +#define DRAM_START_OFFSET 0x1E98 +#define SRAM_START_OFFSET 0x400 + +/* General Purpose Statistics + * These are present on both PRU0 and PRU1 DRAM + */ +/* base statistics offset */ +#define STATISTICS_OFFSET 0x1F00 +#define STAT_SIZE 0x98 + +/* Offset for storing + * 1. Storm Prevention Params + * 2. PHY Speed Offset + * 3. Port Status Offset + * These are present on both PRU0 and PRU1 + */ +/* 4 bytes */ +#define STORM_PREVENTION_OFFSET_BC (STATISTICS_OFFSET + STAT_SIZE) +/* 4 bytes */ +#define PHY_SPEED_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 4) +/* 1 byte */ +#define PORT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 8) +/* 1 byte */ +#define COLLISION_COUNTER (STATISTICS_OFFSET + STAT_SIZE + 9) +/* 4 bytes */ +#define RX_PKT_SIZE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 10) +/* 4 bytes */ +#define PORT_CONTROL_ADDR (STATISTICS_OFFSET + STAT_SIZE + 14) +/* 6 bytes */ +#define PORT_MAC_ADDR (STATISTICS_OFFSET + STAT_SIZE + 18) +/* 1 byte */ +#define RX_INT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 24) +/* 4 bytes */ +#define STORM_PREVENTION_OFFSET_MC (STATISTICS_OFFSET + STAT_SIZE + 25) +/* 4 bytes */ +#define STORM_PREVENTION_OFFSET_UC (STATISTICS_OFFSET + STAT_SIZE + 29) +/* 4 bytes ? */ +#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33) + +/* DRAM Offsets for EMAC + * Present on Both DRAM0 and DRAM1 + */ + +/* 4 queue descriptors for port tx = 32 bytes */ +#define TX_CONTEXT_Q1_OFFSET_ADDR (PORT_QUEUE_DESC_OFFSET + 32) +#define PORT_QUEUE_DESC_OFFSET (ICSS_EMAC_TTS_CYC_TX_SOF + 8) + +/* EMAC Time Triggered Send Offsets */ +#define ICSS_EMAC_TTS_CYC_TX_SOF (ICSS_EMAC_TTS_PREV_TX_SOF + 8) +#define ICSS_EMAC_TTS_PREV_TX_SOF \ + (ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET + 4) +#define ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET (ICSS_EMAC_TTS_STATUS_OFFSET \ + + 4) +#define ICSS_EMAC_TTS_STATUS_OFFSET (ICSS_EMAC_TTS_CFG_TIME_OFFSET + 4) +#define ICSS_EMAC_TTS_CFG_TIME_OFFSET (ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET + 4) +#define ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET \ + (ICSS_EMAC_TTS_CYCLE_START_OFFSET + 8) +#define ICSS_EMAC_TTS_CYCLE_START_OFFSET ICSS_EMAC_TTS_BASE_OFFSET +#define ICSS_EMAC_TTS_BASE_OFFSET DRAM_START_OFFSET + +/* Shared RAM offsets for EMAC */ + +/* Queue Descriptors */ + +/* 4 queue descriptors for port 0 (host receive). 32 bytes */ +#define HOST_QUEUE_DESC_OFFSET (HOST_QUEUE_SIZE_ADDR + 16) + +/* table offset for queue size: + * 3 ports * 4 Queues * 1 byte offset = 12 bytes + */ +#define HOST_QUEUE_SIZE_ADDR (HOST_QUEUE_OFFSET_ADDR + 8) +/* table offset for queue: + * 4 Queues * 2 byte offset = 8 bytes + */ +#define HOST_QUEUE_OFFSET_ADDR (HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR + 8) +/* table offset for Host queue descriptors: + * 1 ports * 4 Queues * 2 byte offset = 8 bytes + */ +#define HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR (HOST_Q4_RX_CONTEXT_OFFSET + 8) + +/* Host Port Rx Context */ +#define HOST_Q4_RX_CONTEXT_OFFSET (HOST_Q3_RX_CONTEXT_OFFSET + 8) +#define HOST_Q3_RX_CONTEXT_OFFSET (HOST_Q2_RX_CONTEXT_OFFSET + 8) +#define HOST_Q2_RX_CONTEXT_OFFSET (HOST_Q1_RX_CONTEXT_OFFSET + 8) +#define HOST_Q1_RX_CONTEXT_OFFSET (EMAC_PROMISCUOUS_MODE_OFFSET + 4) + +/* Promiscuous mode control */ +#define EMAC_P1_PROMISCUOUS_BIT BIT(0) +#define EMAC_P2_PROMISCUOUS_BIT BIT(1) +#define EMAC_PROMISCUOUS_MODE_OFFSET (EMAC_RESERVED + 4) +#define EMAC_RESERVED EOF_48K_BUFFER_BD + +/* allow for max 48k buffer which spans the descriptors up to 0x1800 6kB */ +#define EOF_48K_BUFFER_BD (P0_BUFFER_DESC_OFFSET + HOST_BD_SIZE + \ + PORT_BD_SIZE) + +#define HOST_BD_SIZE ((HOST_QUEUE_1_SIZE + \ + HOST_QUEUE_2_SIZE + HOST_QUEUE_3_SIZE + \ + HOST_QUEUE_4_SIZE) * BD_SIZE) +#define PORT_BD_SIZE ((QUEUE_1_SIZE + QUEUE_2_SIZE + \ + QUEUE_3_SIZE + QUEUE_4_SIZE) * 2 * BD_SIZE) + +#define END_OF_BD_POOL (P2_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE) +#define P2_Q4_BD_OFFSET (P2_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE) +#define P2_Q3_BD_OFFSET (P2_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE) +#define P2_Q2_BD_OFFSET (P2_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE) +#define P2_Q1_BD_OFFSET (P1_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE) +#define P1_Q4_BD_OFFSET (P1_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE) +#define P1_Q3_BD_OFFSET (P1_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE) +#define P1_Q2_BD_OFFSET (P1_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE) +#define P1_Q1_BD_OFFSET (P0_Q4_BD_OFFSET + HOST_QUEUE_4_SIZE * BD_SIZE) +#define P0_Q4_BD_OFFSET (P0_Q3_BD_OFFSET + HOST_QUEUE_3_SIZE * BD_SIZE) +#define P0_Q3_BD_OFFSET (P0_Q2_BD_OFFSET + HOST_QUEUE_2_SIZE * BD_SIZE) +#define P0_Q2_BD_OFFSET (P0_Q1_BD_OFFSET + HOST_QUEUE_1_SIZE * BD_SIZE) +#define P0_Q1_BD_OFFSET P0_BUFFER_DESC_OFFSET +#define P0_BUFFER_DESC_OFFSET SRAM_START_OFFSET + +/* Memory Usage of L3 OCMC RAM */ + +/* L3 64KB Memory - mainly buffer Pool */ +#define END_OF_BUFFER_POOL (P2_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \ + ICSS_BLOCK_SIZE) +#define P2_Q4_BUFFER_OFFSET (P2_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \ + ICSS_BLOCK_SIZE) +#define P2_Q3_BUFFER_OFFSET (P2_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \ + ICSS_BLOCK_SIZE) +#define P2_Q2_BUFFER_OFFSET (P2_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \ + ICSS_BLOCK_SIZE) +#define P2_Q1_BUFFER_OFFSET (P1_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \ + ICSS_BLOCK_SIZE) +#define P1_Q4_BUFFER_OFFSET (P1_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \ + ICSS_BLOCK_SIZE) +#define P1_Q3_BUFFER_OFFSET (P1_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \ + ICSS_BLOCK_SIZE) +#define P1_Q2_BUFFER_OFFSET (P1_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \ + ICSS_BLOCK_SIZE) +#define P1_Q1_BUFFER_OFFSET (P0_Q4_BUFFER_OFFSET + HOST_QUEUE_4_SIZE * \ + ICSS_BLOCK_SIZE) +#define P0_Q4_BUFFER_OFFSET (P0_Q3_BUFFER_OFFSET + HOST_QUEUE_3_SIZE * \ + ICSS_BLOCK_SIZE) +#define P0_Q3_BUFFER_OFFSET (P0_Q2_BUFFER_OFFSET + HOST_QUEUE_2_SIZE * \ + ICSS_BLOCK_SIZE) +#define P0_Q2_BUFFER_OFFSET (P0_Q1_BUFFER_OFFSET + HOST_QUEUE_1_SIZE * \ + ICSS_BLOCK_SIZE) +#define P0_COL_BUFFER_OFFSET 0xEE00 +#define P0_Q1_BUFFER_OFFSET 0x0000 + +#endif /* __ICSS_SWITCH_H */ |