diff options
Diffstat (limited to 'drivers/net/ethernet/marvell')
88 files changed, 5096 insertions, 1138 deletions
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 837295fecd17..50f7c59e8a04 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -34,7 +34,6 @@ config MV643XX_ETH config MVMDIO tristate "Marvell MDIO interface support" depends on HAS_IOMEM - select MDIO_DEVRES select PHYLIB help This driver supports the MDIO interface found in the network diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a06048719e84..0ab52c57c648 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1333,7 +1333,8 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) static void mib_counters_timer_wrapper(struct timer_list *t) { - struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); + struct mv643xx_eth_private *mp = timer_container_of(mp, t, + mib_counters_timer); mib_counters_update(mp); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); } @@ -2247,7 +2248,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (unlikely(mp->oom)) { mp->oom = 0; - del_timer(&mp->rx_oom); + timer_delete(&mp->rx_oom); } work_done = 0; @@ -2306,7 +2307,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) static inline void oom_timer_wrapper(struct timer_list *t) { - struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); + struct mv643xx_eth_private *mp = timer_container_of(mp, t, rx_oom); napi_schedule(&mp->napi); } @@ -2521,7 +2522,7 @@ static int mv643xx_eth_stop(struct net_device *dev) napi_disable(&mp->napi); - del_timer_sync(&mp->rx_oom); + timer_delete_sync(&mp->rx_oom); netif_carrier_off(dev); if (dev->phydev) @@ -2531,7 +2532,7 @@ static int mv643xx_eth_stop(struct net_device *dev) port_reset(mp); mv643xx_eth_get_stats(dev); mib_counters_update(mp); - del_timer_sync(&mp->mib_counters_timer); + timer_delete_sync(&mp->mib_counters_timer); for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); @@ -2704,9 +2705,15 @@ static struct platform_device *port_platdev[3]; static void mv643xx_eth_shared_of_remove(void) { + struct mv643xx_eth_platform_data *pd; int n; for (n = 0; n < 3; n++) { + if (!port_platdev[n]) + continue; + pd = dev_get_platdata(&port_platdev[n]->dev); + if (pd) + of_node_put(pd->phy_node); platform_device_del(port_platdev[n]); port_platdev[n] = NULL; } @@ -2769,8 +2776,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, } ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); - if (!ppdev) - return -ENOMEM; + if (!ppdev) { + ret = -ENOMEM; + goto put_err; + } ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ppdev->dev.of_node = pnp; @@ -2792,6 +2801,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, port_err: platform_device_put(ppdev); +put_err: + of_node_put(ppd.phy_node); return ret; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 1fb285fa0bdb..476e73e502fe 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -284,8 +284,12 @@ MVNETA_TXQ_BUCKET_REFILL_PERIOD)) #define MVNETA_LPI_CTRL_0 0x2cc0 +#define MVNETA_LPI_CTRL_0_TS (0xff << 8) #define MVNETA_LPI_CTRL_1 0x2cc4 -#define MVNETA_LPI_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_1_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_1_REQUEST_FORCE BIT(1) +#define MVNETA_LPI_CTRL_1_MANUAL_MODE BIT(2) +#define MVNETA_LPI_CTRL_1_TW (0xfff << 4) #define MVNETA_LPI_CTRL_2 0x2cc8 #define MVNETA_LPI_STATUS 0x2ccc @@ -541,10 +545,6 @@ struct mvneta_port { struct mvneta_bm_pool *pool_short; int bm_win_id; - bool eee_enabled; - bool eee_active; - bool tx_lpi_enabled; - u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; @@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, prefetch(data); xdp_buff_clear_frags_flag(xdp); xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, - data_len, false); + data_len, true); } static void @@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, struct xdp_buff *xdp, u32 desc_status) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + u32 metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; u8 num_frags; @@ -2410,6 +2411,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); skb->ip_summed = mvneta_rx_csum(pp, desc_status); if (unlikely(xdp_buff_has_frags(xdp))) @@ -3960,23 +3963,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) return container_of(pcs, struct mvneta_port, phylink_pcs); } -static int mvneta_pcs_validate(struct phylink_pcs *pcs, - unsigned long *supported, - const struct phylink_link_state *state) +static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) { - /* We only support QSGMII, SGMII, 802.3z and RGMII modes. - * When in 802.3z mode, we must have AN enabled: + /* When operating in an 802.3z mode, we must have AN enabled: * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + * Therefore, inband is "required". */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - return -EINVAL; + if (phy_interface_mode_is_8023z(interface)) + return LINK_INBAND_ENABLE; - return 0; + /* QSGMII, SGMII and RGMII can be configured to use inband + * signalling of the AN result. Indicate these as "possible". + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_QSGMII || + phy_interface_mode_is_rgmii(interface)) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + /* For any other modes, indicate that inband is not supported. */ + return LINK_INBAND_DISABLE; } -static void mvneta_pcs_get_state(struct phylink_pcs *pcs, +static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, struct phylink_link_state *state) { struct mvneta_port *pp = mvneta_pcs_to_port(pcs); @@ -4071,7 +4081,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { - .pcs_validate = mvneta_pcs_validate, + .pcs_inband_caps = mvneta_pcs_inband_caps, .pcs_get_state = mvneta_pcs_get_state, .pcs_config = mvneta_pcs_config, .pcs_an_restart = mvneta_pcs_an_restart, @@ -4206,18 +4216,6 @@ static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, return 0; } -static void mvneta_set_eee(struct mvneta_port *pp, bool enable) -{ - u32 lpi_ctl1; - - lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); - if (enable) - lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; - else - lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; - mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); -} - static void mvneta_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -4233,9 +4231,6 @@ static void mvneta_mac_link_down(struct phylink_config *config, val |= MVNETA_GMAC_FORCE_LINK_DOWN; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } - - pp->eee_active = false; - mvneta_set_eee(pp, false); } static void mvneta_mac_link_up(struct phylink_config *config, @@ -4284,11 +4279,56 @@ static void mvneta_mac_link_up(struct phylink_config *config, } mvneta_port_up(pp); +} + +static void mvneta_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); + u32 lpi1; + + lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + lpi1 &= ~(MVNETA_LPI_CTRL_1_REQUEST_ENABLE | + MVNETA_LPI_CTRL_1_REQUEST_FORCE | + MVNETA_LPI_CTRL_1_MANUAL_MODE); + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1); +} + +static int mvneta_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct mvneta_port *pp = netdev_priv(to_net_dev(config->dev)); + u32 ts, tw, lpi0, lpi1, status; - if (phy && pp->eee_enabled) { - pp->eee_active = phy_init_eee(phy, false) >= 0; - mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); + status = mvreg_read(pp, MVNETA_GMAC_STATUS); + if (status & MVNETA_GMAC_SPEED_1000) { + /* At 1G speeds, the timer resolution are 1us, and + * 802.3 says tw is 16.5us. Round up to 17us. + */ + tw = 17; + ts = timer; + } else { + /* At 100M speeds, the timer resolutions are 10us, and + * 802.3 says tw is 30us. + */ + tw = 3; + ts = DIV_ROUND_UP(timer, 10); } + + if (ts > 255) + ts = 255; + + /* Configure ts */ + lpi0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + lpi0 = u32_replace_bits(lpi0, ts, MVNETA_LPI_CTRL_0_TS); + mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi0); + + /* Configure tw and enable LPI generation */ + lpi1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + lpi1 = u32_replace_bits(lpi1, tw, MVNETA_LPI_CTRL_1_TW); + lpi1 |= MVNETA_LPI_CTRL_1_REQUEST_ENABLE; + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi1); + + return 0; } static const struct phylink_mac_ops mvneta_phylink_ops = { @@ -4298,6 +4338,8 @@ static const struct phylink_mac_ops mvneta_phylink_ops = { .mac_finish = mvneta_mac_finish, .mac_link_down = mvneta_mac_link_down, .mac_link_up = mvneta_mac_link_up, + .mac_disable_tx_lpi = mvneta_mac_disable_tx_lpi, + .mac_enable_tx_lpi = mvneta_mac_enable_tx_lpi, }; static int mvneta_mdio_probe(struct mvneta_port *pp) @@ -4385,6 +4427,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) if (pp->neta_armada3700) return 0; + netdev_lock(port->napi.dev); spin_lock(&pp->lock); /* * Configuring the driver for a new CPU while the driver is @@ -4392,6 +4435,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) */ if (pp->is_stopped) { spin_unlock(&pp->lock); + netdev_unlock(port->napi.dev); return 0; } netif_tx_stop_all_queues(pp->dev); @@ -4411,7 +4455,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) /* Mask all ethernet port interrupts */ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); - napi_enable(&port->napi); + napi_enable_locked(&port->napi); /* * Enable per-CPU interrupts on the CPU that is @@ -4432,6 +4476,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); spin_unlock(&pp->lock); + netdev_unlock(port->napi.dev); + return 0; } @@ -4564,7 +4610,7 @@ static int mvneta_stop(struct net_device *dev) /* Inform that we are stopping so we don't want to setup the * driver for new CPUs in the notifiers. The code of the * notifier for CPU online is protected by the same spinlock, - * so when we get the lock, the notifer work is done. + * so when we get the lock, the notifier work is done. */ spin_lock(&pp->lock); pp->is_stopped = true; @@ -4968,8 +5014,6 @@ static int mvneta_ethtool_get_rxnfc(struct net_device *dev, case ETHTOOL_GRXRINGS: info->data = rxq_number; return 0; - case ETHTOOL_GRXFH: - return -EOPNOTSUPP; default: return -EOPNOTSUPP; } @@ -5099,14 +5143,6 @@ static int mvneta_ethtool_get_eee(struct net_device *dev, struct ethtool_keee *eee) { struct mvneta_port *pp = netdev_priv(dev); - u32 lpi_ctl0; - - lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); - - eee->eee_enabled = pp->eee_enabled; - eee->eee_active = pp->eee_active; - eee->tx_lpi_enabled = pp->tx_lpi_enabled; - eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; return phylink_ethtool_get_eee(pp->phylink, eee); } @@ -5115,7 +5151,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev, struct ethtool_keee *eee) { struct mvneta_port *pp = netdev_priv(dev); - u32 lpi_ctl0; /* The Armada 37x documents do not give limits for this other than * it being an 8-bit register. @@ -5123,16 +5158,6 @@ static int mvneta_ethtool_set_eee(struct net_device *dev, if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) return -EINVAL; - lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); - lpi_ctl0 &= ~(0xff << 8); - lpi_ctl0 |= eee->tx_lpi_timer << 8; - mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); - - pp->eee_enabled = eee->eee_enabled; - pp->tx_lpi_enabled = eee->tx_lpi_enabled; - - mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); - return phylink_ethtool_set_eee(pp->phylink, eee); } @@ -5446,6 +5471,9 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; + /* Ensure LPI is disabled */ + mvneta_mac_disable_tx_lpi(&pp->phylink_config); + return 0; } @@ -5530,13 +5558,19 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk_bus); pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; - pp->phylink_pcs.neg_mode = true; pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; + /* Setup EEE. Choose 250us idle. Only supported in SGMII modes. */ + __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.lpi_interfaces); + __set_bit(PHY_INTERFACE_MODE_SGMII, pp->phylink_config.lpi_interfaces); + pp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD; + pp->phylink_config.lpi_timer_default = 250; + pp->phylink_config.eee_enabled_default = true; + phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.supported_interfaces); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index e47783ce77e0..57ac039df6f7 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -115,7 +115,7 @@ struct mvneta_bm_pool { /* Packet size */ int pkt_size; - /* Size of the buffer acces through DMA*/ + /* Size of the buffer access through DMA */ u32 buf_size; /* BPPE virtual base address */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 9e02e4367bec..061fcd444d50 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -481,6 +481,11 @@ #define MVPP22_GMAC_INT_SUM_MASK 0xa4 #define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) #define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2) +#define MVPP2_GMAC_LPI_CTRL0 0xc0 +#define MVPP2_GMAC_LPI_CTRL0_TS_MASK GENMASK(15, 8) +#define MVPP2_GMAC_LPI_CTRL1 0xc4 +#define MVPP2_GMAC_LPI_CTRL1_REQ_EN BIT(0) +#define MVPP2_GMAC_LPI_CTRL1_TW_MASK GENMASK(15, 4) /* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0, * relative to port->base. @@ -1108,6 +1113,9 @@ struct mvpp2 { /* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; }; struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 1641791a2d5b..44b201817d94 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, - MVPP22_CLS_HEK_OPT_VLAN, + MVPP22_CLS_HEK_TAGGED, 0, 0), }; @@ -1618,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx, return 0; } -int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, + const struct ethtool_rxfh_fields *info) { u16 hash_opts = 0; u32 flow_type; @@ -1656,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info) return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts); } -int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info) +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, + struct ethtool_rxfh_fields *info) { unsigned long hash_opts; u32 flow_type; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h index 85c9c6e80678..caadf3aea95d 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx, int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx, u32 *indir); -int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info); -int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info); +int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, + struct ethtool_rxfh_fields *info); +int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, + const struct ethtool_rxfh_fields *info); void mvpp2_cls_init(struct mvpp2 *priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 571631a30320..8ebb985d2573 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + u32 rx_status, timestamp, metasize = 0; struct mvpp2_bm_pool *bm_pool; struct page_pool *pp = NULL; struct sk_buff *skb; unsigned int frag_size; dma_addr_t dma_addr; phys_addr_t phys_addr; - u32 rx_status, timestamp; int pool, rx_bytes, err, ret; struct page *page; void *data; @@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); xdp_prepare_buff(&xdp, data, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, - rx_bytes, false); + rx_bytes, true); ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); @@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, ps.rx_bytes += rx_bytes; continue; } + + metasize = xdp.data - xdp.data_meta; } if (frag_size) @@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); skb_put(skb, rx_bytes); + if (metasize) + skb_metadata_set(skb, metasize); skb->ip_summed = mvpp2_rx_csum(port, rx_status); skb->protocol = eth_type_trans(skb, dev); @@ -5169,38 +5173,40 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = dev->stats.tx_dropped; } -static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { - struct hwtstamp_config config; + struct mvpp2_port *port = netdev_priv(dev); void __iomem *ptp; u32 gcr, int_mask; - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (config.tx_type != HWTSTAMP_TX_OFF && - config.tx_type != HWTSTAMP_TX_ON) + if (config->tx_type != HWTSTAMP_TX_OFF && + config->tx_type != HWTSTAMP_TX_ON) return -ERANGE; ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); int_mask = gcr = 0; - if (config.tx_type != HWTSTAMP_TX_OFF) { + if (config->tx_type != HWTSTAMP_TX_OFF) { gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | MVPP22_PTP_INT_MASK_QUEUE0; } /* It seems we must also release the TX reset when enabling the TSU */ - if (config.rx_filter != HWTSTAMP_FILTER_NONE) + if (config->rx_filter != HWTSTAMP_FILTER_NONE) gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET; if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) mvpp22_tai_start(port->priv->tai); - if (config.rx_filter != HWTSTAMP_FILTER_NONE) { - config.rx_filter = HWTSTAMP_FILTER_ALL; + if (config->rx_filter != HWTSTAMP_FILTER_NONE) { + config->rx_filter = HWTSTAMP_FILTER_ALL; mvpp2_modify(ptp + MVPP22_PTP_GCR, MVPP22_PTP_GCR_RX_RESET | MVPP22_PTP_GCR_TX_RESET | @@ -5221,26 +5227,22 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) mvpp22_tai_stop(port->priv->tai); - port->tx_hwtstamp_type = config.tx_type; - - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + port->tx_hwtstamp_type = config->tx_type; return 0; } -static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) +static int mvpp2_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { - struct hwtstamp_config config; - - memset(&config, 0, sizeof(config)); + struct mvpp2_port *port = netdev_priv(dev); - config.tx_type = port->tx_hwtstamp_type; - config.rx_filter = port->rx_hwtstamp ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + if (!port->hwtstamp) + return -EOPNOTSUPP; - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; + config->tx_type = port->tx_hwtstamp_type; + config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; return 0; } @@ -5270,18 +5272,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mvpp2_port *port = netdev_priv(dev); - switch (cmd) { - case SIOCSHWTSTAMP: - if (port->hwtstamp) - return mvpp2_set_ts_config(port, ifr); - break; - - case SIOCGHWTSTAMP: - if (port->hwtstamp) - return mvpp2_get_ts_config(port, ifr); - break; - } - if (!port->phylink) return -ENOTSUPP; @@ -5598,9 +5588,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_GRXFH: - ret = mvpp2_ethtool_rxfh_get(port, info); - break; case ETHTOOL_GRXRINGS: info->data = port->nrxqs; break; @@ -5638,9 +5625,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, return -EOPNOTSUPP; switch (info->cmd) { - case ETHTOOL_SRXFH: - ret = mvpp2_ethtool_rxfh_set(port, info); - break; case ETHTOOL_SRXCLSRLINS: ret = mvpp2_ethtool_cls_rule_ins(port, info); break; @@ -5757,6 +5741,51 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); } +static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev, + struct ethtool_rxfh_fields *info) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported(port)) + return -EOPNOTSUPP; + + return mvpp2_ethtool_rxfh_get(port, info); +} + +static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev, + const struct ethtool_rxfh_fields *info, + struct netlink_ext_ack *extack) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!mvpp22_rss_is_supported(port)) + return -EOPNOTSUPP; + + return mvpp2_ethtool_rxfh_set(port, info); +} + +static int mvpp2_ethtool_get_eee(struct net_device *dev, + struct ethtool_keee *eee) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_get_eee(port->phylink, eee); +} + +static int mvpp2_ethtool_set_eee(struct net_device *dev, + struct ethtool_keee *eee) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -EOPNOTSUPP; + + return phylink_ethtool_set_eee(port->phylink, eee); +} + /* Device ops */ static const struct net_device_ops mvpp2_netdev_ops = { @@ -5773,6 +5802,8 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_features = mvpp2_set_features, .ndo_bpf = mvpp2_xdp, .ndo_xdp_xmit = mvpp2_xdp_xmit, + .ndo_hwtstamp_get = mvpp2_hwtstamp_get, + .ndo_hwtstamp_set = mvpp2_hwtstamp_set, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { @@ -5799,9 +5830,13 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, .get_rxfh = mvpp2_ethtool_get_rxfh, .set_rxfh = mvpp2_ethtool_set_rxfh, + .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields, + .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields, .create_rxfh_context = mvpp2_create_rxfh_context, .modify_rxfh_context = mvpp2_modify_rxfh_context, .remove_rxfh_context = mvpp2_remove_rxfh_context, + .get_eee = mvpp2_ethtool_get_eee, + .set_eee = mvpp2_ethtool_set_eee, }; /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that @@ -6188,6 +6223,7 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) } static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); @@ -6224,22 +6260,30 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { .pcs_config = mvpp2_xlg_pcs_config, }; -static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, - unsigned long *supported, - const struct phylink_link_state *state) +static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs, + phy_interface_t interface) { - /* When in 802.3z mode, we must have AN enabled: + /* When operating in an 802.3z mode, we must have AN enabled: * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + * Therefore, inband is "required". */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - return -EINVAL; + if (phy_interface_mode_is_8023z(interface)) + return LINK_INBAND_ENABLE; - return 0; + /* SGMII and RGMII can be configured to use inband signalling of the + * AN result. Indicate these as "possible". + */ + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_rgmii(interface)) + return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; + + /* For any other modes, indicate that inband is not supported. */ + return LINK_INBAND_DISABLE; } static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); @@ -6343,7 +6387,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { - .pcs_validate = mvpp2_gmac_pcs_validate, + .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps, .pcs_get_state = mvpp2_gmac_pcs_get_state, .pcs_config = mvpp2_gmac_pcs_config, .pcs_an_restart = mvpp2_gmac_pcs_an_restart, @@ -6665,6 +6709,55 @@ static void mvpp2_mac_link_down(struct phylink_config *config, mvpp2_port_disable(port); } +static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1, + MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0); +} + +static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + u32 ts, tw, lpi1, status; + + status = readl(port->base + MVPP2_GMAC_STATUS0); + if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) { + /* At 1G speeds, the timer resolution are 1us, and + * 802.3 says tw is 16.5us. Round up to 17us. + */ + tw = 17; + ts = timer; + } else { + /* At 100M speeds, the timer resolutions are 10us, and + * 802.3 says tw is 30us. + */ + tw = 3; + ts = DIV_ROUND_UP(timer, 10); + } + + if (ts > 255) + ts = 255; + + /* Configure ts */ + mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0, + MVPP2_GMAC_LPI_CTRL0_TS_MASK, + FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts)); + + lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1); + + /* Configure tw */ + lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK); + + /* Enable LPI generation */ + writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN, + port->base + MVPP2_GMAC_LPI_CTRL1); + + return 0; +} + static const struct phylink_mac_ops mvpp2_phylink_ops = { .mac_select_pcs = mvpp2_select_pcs, .mac_prepare = mvpp2_mac_prepare, @@ -6672,6 +6765,8 @@ static const struct phylink_mac_ops mvpp2_phylink_ops = { .mac_finish = mvpp2_mac_finish, .mac_link_up = mvpp2_mac_link_up, .mac_link_down = mvpp2_mac_link_down, + .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi, + .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi, }; /* Work-around for ACPI */ @@ -6901,9 +6996,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, for (thread = 0; thread < priv->nthreads; thread++) { port_pcpu = per_cpu_ptr(port->pcpu, thread); - hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED_SOFT); - port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED_SOFT); port_pcpu->timer_scheduled = false; port_pcpu->dev = dev; } @@ -6940,9 +7034,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->dev_port = port->id; port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; - port->pcs_gmac.neg_mode = true; port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; - port->pcs_xlg.neg_mode = true; if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; @@ -6950,6 +7042,15 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.mac_capabilities = MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; + __set_bit(PHY_INTERFACE_MODE_SGMII, + port->phylink_config.lpi_interfaces); + + port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD; + + /* Setup EEE. Choose 250us idle. */ + port->phylink_config.lpi_timer_default = 250; + port->phylink_config.eee_enabled_default = true; + if (port->priv->global_tx_fc) port->phylink_config.mac_capabilities |= MAC_SYM_PAUSE | MAC_ASYM_PAUSE; @@ -7024,6 +7125,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_port_pcpu; } port->phylink = phylink; + + mvpp2_mac_disable_tx_lpi(&port->phylink_config); } else { dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); port->phylink = NULL; @@ -7627,8 +7730,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23; - /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock); /* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40..93e978bdf303 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) } /* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; } +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) } /* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid; + lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, /* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); } +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err; - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this <vid,port> already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask); @@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY); /* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid; - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + spin_lock_bh(&priv->prs_spinlock); - /* No such entry */ - if (tid < 0) - return; + /* Invalidate TCAM entry with this <vid,port>, if it exists */ + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); } /* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); } /* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; + spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); } /* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + pe.index = tid; reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); } /* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); @@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv); err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv); - return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; } /* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, } /* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; } +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); @@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue; /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); } int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe); + spin_unlock_bh(&port->priv->prs_spinlock); return 0; } @@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; + spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c index 4f4d58189118..a88c006ea65b 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c @@ -150,17 +150,14 @@ octep_get_ethtool_stats(struct net_device *netdev, iface_rx_stats, iface_tx_stats); - for (q = 0; q < oct->num_oqs; q++) { - struct octep_iq *iq = oct->iq[q]; - struct octep_oq *oq = oct->oq[q]; - - tx_packets += iq->stats.instr_completed; - tx_bytes += iq->stats.bytes_sent; - tx_busy_errors += iq->stats.tx_busy; - - rx_packets += oq->stats.packets; - rx_bytes += oq->stats.bytes; - rx_alloc_errors += oq->stats.alloc_failures; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + tx_packets += oct->stats_iq[q].instr_completed; + tx_bytes += oct->stats_iq[q].bytes_sent; + tx_busy_errors += oct->stats_iq[q].tx_busy; + + rx_packets += oct->stats_oq[q].packets; + rx_bytes += oct->stats_oq[q].bytes; + rx_alloc_errors += oct->stats_oq[q].alloc_failures; } i = 0; data[i++] = rx_packets; @@ -198,22 +195,18 @@ octep_get_ethtool_stats(struct net_device *netdev, data[i++] = iface_rx_stats->err_pkts; /* Per Tx Queue stats */ - for (q = 0; q < oct->num_iqs; q++) { - struct octep_iq *iq = oct->iq[q]; - - data[i++] = iq->stats.instr_posted; - data[i++] = iq->stats.instr_completed; - data[i++] = iq->stats.bytes_sent; - data[i++] = iq->stats.tx_busy; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + data[i++] = oct->stats_iq[q].instr_posted; + data[i++] = oct->stats_iq[q].instr_completed; + data[i++] = oct->stats_iq[q].bytes_sent; + data[i++] = oct->stats_iq[q].tx_busy; } /* Per Rx Queue stats */ - for (q = 0; q < oct->num_oqs; q++) { - struct octep_oq *oq = oct->oq[q]; - - data[i++] = oq->stats.packets; - data[i++] = oq->stats.bytes; - data[i++] = oq->stats.alloc_failures; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + data[i++] = oct->stats_oq[q].packets; + data[i++] = oct->stats_oq[q].bytes; + data[i++] = oct->stats_oq[q].alloc_failures; } } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 549436efc204..24499bb36c00 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -822,7 +822,7 @@ static inline int octep_iq_full_check(struct octep_iq *iq) if (unlikely(IQ_INSTR_SPACE(iq) > OCTEP_WAKE_QUEUE_THRESHOLD)) { netif_start_subqueue(iq->netdev, iq->q_no); - iq->stats.restart_cnt++; + iq->stats->restart_cnt++; return 0; } @@ -960,7 +960,7 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, wmb(); /* Ring Doorbell to notify the NIC of new packets */ writel(iq->fill_cnt, iq->doorbell_reg); - iq->stats.instr_posted += iq->fill_cnt; + iq->stats->instr_posted += iq->fill_cnt; iq->fill_cnt = 0; return NETDEV_TX_OK; @@ -991,37 +991,24 @@ dma_map_err: static void octep_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { - u64 tx_packets, tx_bytes, rx_packets, rx_bytes; struct octep_device *oct = netdev_priv(netdev); + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; int q; - if (netif_running(netdev)) - octep_ctrl_net_get_if_stats(oct, - OCTEP_CTRL_NET_INVALID_VFID, - &oct->iface_rx_stats, - &oct->iface_tx_stats); - tx_packets = 0; tx_bytes = 0; rx_packets = 0; rx_bytes = 0; - for (q = 0; q < oct->num_oqs; q++) { - struct octep_iq *iq = oct->iq[q]; - struct octep_oq *oq = oct->oq[q]; - - tx_packets += iq->stats.instr_completed; - tx_bytes += iq->stats.bytes_sent; - rx_packets += oq->stats.packets; - rx_bytes += oq->stats.bytes; + for (q = 0; q < OCTEP_MAX_QUEUES; q++) { + tx_packets += oct->stats_iq[q].instr_completed; + tx_bytes += oct->stats_iq[q].bytes_sent; + rx_packets += oct->stats_oq[q].packets; + rx_bytes += oct->stats_oq[q].bytes; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->rx_packets = rx_packets; stats->rx_bytes = rx_bytes; - stats->multicast = oct->iface_rx_stats.mcast_pkts; - stats->rx_errors = oct->iface_rx_stats.err_pkts; - stats->collisions = oct->iface_tx_stats.xscol; - stats->tx_fifo_errors = oct->iface_tx_stats.undflw; } /** @@ -1137,6 +1124,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features return err; } +static int octep_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi) +{ + struct octep_device *oct = netdev_priv(dev); + + ivi->vf = vf; + ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr); + ivi->spoofchk = true; + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + ivi->trusted = false; + + return 0; +} + +static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct octep_device *oct = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(mac)) { + dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac); + return -EADDRNOTAVAIL; + } + + dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac); + ether_addr_copy(oct->vf_info[vf].mac_addr, mac); + oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF; + + err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true); + if (err) + dev_err(&oct->pdev->dev, + "Set VF%d MAC address failed via host control Mbox\n", + vf); + + return err; +} + static const struct net_device_ops octep_netdev_ops = { .ndo_open = octep_open, .ndo_stop = octep_stop, @@ -1146,6 +1170,8 @@ static const struct net_device_ops octep_netdev_ops = { .ndo_set_mac_address = octep_set_mac, .ndo_change_mtu = octep_change_mtu, .ndo_set_features = octep_set_features, + .ndo_get_vf_config = octep_get_vf_config, + .ndo_set_vf_mac = octep_set_vf_mac }; /** @@ -1197,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work) miss_cnt); rtnl_lock(); if (netif_running(oct->netdev)) - octep_stop(oct->netdev); + dev_close(oct->netdev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index fee59e0e0138..81ac4267811c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -220,6 +220,7 @@ struct octep_iface_link_info { /* The Octeon VF device specific info data structure.*/ struct octep_pfvf_info { u8 mac_addr[ETH_ALEN]; + u32 flags; u32 mbox_version; }; @@ -257,11 +258,17 @@ struct octep_device { /* Pointers to Octeon Tx queues */ struct octep_iq *iq[OCTEP_MAX_IQ]; + /* Per iq stats */ + struct octep_iq_stats stats_iq[OCTEP_MAX_IQ]; + /* Rx queues (OQ: Output Queue) */ u16 num_oqs; /* Pointers to Octeon Rx queues */ struct octep_oq *oq[OCTEP_MAX_OQ]; + /* Per oq stats */ + struct octep_oq_stats stats_oq[OCTEP_MAX_OQ]; + /* Hardware port number of the PCIe interface */ u16 pcie_port; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index e6eb98d70f3c..ebecdd29f3bd 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_err(&oct->pdev->dev, + "VF%d attempted to override administrative set MAC address\n", + vf_id); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; + return; + } + err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n", + vf_id); return; } + + ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr); rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; } @@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id, { int err; + if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) { + dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id); + ether_addr_copy(rsp->s_set_mac.mac_addr, + oct->vf_info[vf_id].mac_addr); + rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; + return; + } err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr); if (err) { rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK; - dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n"); + dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n", + vf_id); return; } rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h index 0dc6eead292a..386a095a99bc 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h @@ -8,8 +8,6 @@ #ifndef _OCTEP_PFVF_MBOX_H_ #define _OCTEP_PFVF_MBOX_H_ -/* VF flags */ -#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */ #define OCTEON_SDP_16K_HW_FRS 16380UL #define OCTEON_SDP_64K_HW_FRS 65531UL @@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version { #define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2 +/* VF flags */ +/* PF has set VF MAC address */ +#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0) + enum octep_pfvf_mbox_opcode { OCTEP_PFVF_MBOX_CMD_VERSION, OCTEP_PFVF_MBOX_CMD_SET_MTU, diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 8af75cb37c3e..82b6b19e76b4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -87,7 +87,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) page = dev_alloc_page(); if (unlikely(!page)) { dev_err(oq->dev, "refill: rx buffer alloc failed\n"); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } @@ -98,7 +98,7 @@ static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) "OQ-%d buffer refill: DMA mapping error!\n", oq->q_no); put_page(page); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } oq->buff_info[refill_idx].page = page; @@ -134,6 +134,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no) oq->netdev = oct->netdev; oq->dev = &oct->pdev->dev; oq->q_no = q_no; + oq->stats = &oct->stats_oq[q_no]; oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); oq->ring_size_mask = oq->max_count - 1; oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); @@ -443,7 +444,7 @@ static int __octep_oq_process_rx(struct octep_device *oct, if (!skb) { octep_oq_drop_rx(oq, buff_info, &read_idx, &desc_used); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; continue; } skb_reserve(skb, data_offset); @@ -494,8 +495,8 @@ static int __octep_oq_process_rx(struct octep_device *oct, oq->host_read_idx = read_idx; oq->refill_count += desc_used; - oq->stats.packets += pkt; - oq->stats.bytes += rx_bytes; + oq->stats->packets += pkt; + oq->stats->bytes += rx_bytes; return pkt; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h index 3b08e2d560dc..b4696c93d0e6 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -186,8 +186,8 @@ struct octep_oq { */ u8 __iomem *pkts_sent_reg; - /* Statistics for this OQ. */ - struct octep_oq_stats stats; + /* Pointer to statistics for this OQ. */ + struct octep_oq_stats *stats; /* Packets pending to be processed */ u32 pkts_pending; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index 06851b78aa28..08ee90013fef 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -81,9 +81,9 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget) } iq->pkts_processed += compl_pkts; - iq->stats.instr_completed += compl_pkts; - iq->stats.bytes_sent += compl_bytes; - iq->stats.sgentry_sent += compl_sg; + iq->stats->instr_completed += compl_pkts; + iq->stats->bytes_sent += compl_bytes; + iq->stats->sgentry_sent += compl_sg; iq->flush_index = fi; netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); @@ -187,6 +187,7 @@ static int octep_setup_iq(struct octep_device *oct, int q_no) iq->netdev = oct->netdev; iq->dev = &oct->pdev->dev; iq->q_no = q_no; + iq->stats = &oct->stats_iq[q_no]; iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); iq->ring_size_mask = iq->max_count - 1; iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h index 875a2c34091f..58fb39dda977 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -170,8 +170,8 @@ struct octep_iq { */ u16 flush_index; - /* Statistics for this input queue. */ - struct octep_iq_stats stats; + /* Pointer to statistics for this input queue. */ + struct octep_iq_stats *stats; /* Pointer to the Virtual Base addr of the input ring. */ struct octep_tx_desc_hw *desc_ring; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c index 7b21439a315f..d60441928ba9 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c @@ -114,12 +114,9 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev, iface_tx_stats = &oct->iface_tx_stats; iface_rx_stats = &oct->iface_rx_stats; - for (q = 0; q < oct->num_oqs; q++) { - struct octep_vf_iq *iq = oct->iq[q]; - struct octep_vf_oq *oq = oct->oq[q]; - - tx_busy_errors += iq->stats.tx_busy; - rx_alloc_errors += oq->stats.alloc_failures; + for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) { + tx_busy_errors += oct->stats_iq[q].tx_busy; + rx_alloc_errors += oct->stats_oq[q].alloc_failures; } i = 0; data[i++] = rx_alloc_errors; @@ -134,22 +131,18 @@ static void octep_vf_get_ethtool_stats(struct net_device *netdev, data[i++] = iface_rx_stats->dropped_octets_fifo_full; /* Per Tx Queue stats */ - for (q = 0; q < oct->num_iqs; q++) { - struct octep_vf_iq *iq = oct->iq[q]; - - data[i++] = iq->stats.instr_posted; - data[i++] = iq->stats.instr_completed; - data[i++] = iq->stats.bytes_sent; - data[i++] = iq->stats.tx_busy; + for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) { + data[i++] = oct->stats_iq[q].instr_posted; + data[i++] = oct->stats_iq[q].instr_completed; + data[i++] = oct->stats_iq[q].bytes_sent; + data[i++] = oct->stats_iq[q].tx_busy; } /* Per Rx Queue stats */ for (q = 0; q < oct->num_oqs; q++) { - struct octep_vf_oq *oq = oct->oq[q]; - - data[i++] = oq->stats.packets; - data[i++] = oq->stats.bytes; - data[i++] = oq->stats.alloc_failures; + data[i++] = oct->stats_oq[q].packets; + data[i++] = oct->stats_oq[q].bytes; + data[i++] = oct->stats_oq[q].alloc_failures; } } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 7e6771c9cdbb..420c3f4cf741 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -18,8 +18,6 @@ #include "octep_vf_config.h" #include "octep_vf_main.h" -struct workqueue_struct *octep_vf_wq; - /* Supported Devices */ static const struct pci_device_id octep_vf_pci_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)}, @@ -574,7 +572,7 @@ static int octep_vf_iq_full_check(struct octep_vf_iq *iq) * caused queues to get re-enabled after * being stopped */ - iq->stats.restart_cnt++; + iq->stats->restart_cnt++; fallthrough; case 1: /* Queue left enabled, since IQ is not yet full*/ return 0; @@ -731,7 +729,7 @@ ring_dbell: /* Flush the hw descriptors before writing to doorbell */ smp_wmb(); writel(iq->fill_cnt, iq->doorbell_reg); - iq->stats.instr_posted += iq->fill_cnt; + iq->stats->instr_posted += iq->fill_cnt; iq->fill_cnt = 0; return NETDEV_TX_OK; } @@ -786,27 +784,16 @@ static void octep_vf_get_stats64(struct net_device *netdev, tx_bytes = 0; rx_packets = 0; rx_bytes = 0; - for (q = 0; q < oct->num_oqs; q++) { - struct octep_vf_iq *iq = oct->iq[q]; - struct octep_vf_oq *oq = oct->oq[q]; - - tx_packets += iq->stats.instr_completed; - tx_bytes += iq->stats.bytes_sent; - rx_packets += oq->stats.packets; - rx_bytes += oq->stats.bytes; + for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) { + tx_packets += oct->stats_iq[q].instr_completed; + tx_bytes += oct->stats_iq[q].bytes_sent; + rx_packets += oct->stats_oq[q].packets; + rx_bytes += oct->stats_oq[q].bytes; } stats->tx_packets = tx_packets; stats->tx_bytes = tx_bytes; stats->rx_packets = rx_packets; stats->rx_bytes = rx_bytes; - if (!octep_vf_get_if_stats(oct)) { - stats->multicast = oct->iface_rx_stats.mcast_pkts; - stats->rx_errors = oct->iface_rx_stats.err_pkts; - stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full + - oct->iface_rx_stats.err_pkts; - stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full; - stats->tx_dropped = oct->iface_tx_stats.dropped; - } } /** @@ -846,7 +833,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct octep_vf_device *oct = netdev_priv(netdev); netdev_hold(netdev, NULL, GFP_ATOMIC); - schedule_work(&oct->tx_timeout_task); + if (!schedule_work(&oct->tx_timeout_task)) + netdev_put(netdev, NULL); + } static int octep_vf_set_mac(struct net_device *netdev, void *p) diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h index 5769f62545cd..b9f13506f462 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -246,11 +246,17 @@ struct octep_vf_device { /* Pointers to Octeon Tx queues */ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ]; + /* Per iq stats */ + struct octep_vf_iq_stats stats_iq[OCTEP_VF_MAX_IQ]; + /* Rx queues (OQ: Output Queue) */ u16 num_oqs; /* Pointers to Octeon Rx queues */ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ]; + /* Per oq stats */ + struct octep_vf_oq_stats stats_oq[OCTEP_VF_MAX_OQ]; + /* Hardware port number of the PCIe interface */ u16 pcie_port; @@ -314,8 +320,6 @@ static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct) #define octep_vf_read_csr64(octep_vf_dev, reg_off) \ readq((octep_vf_dev)->mmio.hw_addr + (reg_off)) -extern struct workqueue_struct *octep_vf_wq; - int octep_vf_device_setup(struct octep_vf_device *oct); int octep_vf_setup_iqs(struct octep_vf_device *oct); void octep_vf_free_iqs(struct octep_vf_device *oct); diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c index 82821bc28634..d70c8be3cfc4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c @@ -87,7 +87,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o page = dev_alloc_page(); if (unlikely(!page)) { dev_err(oq->dev, "refill: rx buffer alloc failed\n"); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } @@ -98,7 +98,7 @@ static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *o "OQ-%d buffer refill: DMA mapping error!\n", oq->q_no); put_page(page); - oq->stats.alloc_failures++; + oq->stats->alloc_failures++; break; } oq->buff_info[refill_idx].page = page; @@ -134,6 +134,7 @@ static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) oq->netdev = oct->netdev; oq->dev = &oct->pdev->dev; oq->q_no = q_no; + oq->stats = &oct->stats_oq[q_no]; oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); oq->ring_size_mask = oq->max_count - 1; oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); @@ -458,8 +459,8 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct, oq->host_read_idx = read_idx; oq->refill_count += desc_used; - oq->stats.packets += pkt; - oq->stats.bytes += rx_bytes; + oq->stats->packets += pkt; + oq->stats->bytes += rx_bytes; return pkt; } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h index fe46838b5200..9e296b7d7e34 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h @@ -187,7 +187,7 @@ struct octep_vf_oq { u8 __iomem *pkts_sent_reg; /* Statistics for this OQ. */ - struct octep_vf_oq_stats stats; + struct octep_vf_oq_stats *stats; /* Packets pending to be processed */ u32 pkts_pending; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c index 47a5c054fdb6..8180e5ce3d7e 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c @@ -82,9 +82,9 @@ int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget) } iq->pkts_processed += compl_pkts; - iq->stats.instr_completed += compl_pkts; - iq->stats.bytes_sent += compl_bytes; - iq->stats.sgentry_sent += compl_sg; + iq->stats->instr_completed += compl_pkts; + iq->stats->bytes_sent += compl_bytes; + iq->stats->sgentry_sent += compl_sg; iq->flush_index = fi; netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts, @@ -186,6 +186,7 @@ static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no) iq->netdev = oct->netdev; iq->dev = &oct->pdev->dev; iq->q_no = q_no; + iq->stats = &oct->stats_iq[q_no]; iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); iq->ring_size_mask = iq->max_count - 1; iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h index f338b975103c..1cede90e3a5f 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h @@ -129,7 +129,7 @@ struct octep_vf_iq { u16 flush_index; /* Statistics for this input queue. */ - struct octep_vf_iq_stats stats; + struct octep_vf_iq_stats *stats; /* Pointer to the Virtual Base addr of the input ring. */ struct octep_vf_tx_desc_hw *desc_ring; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index ccea37847df8..532813d8d028 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -12,4 +12,4 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ - rvu_rep.o + rvu_rep.o cn20k/mbox_init.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 8216f843a7cd..4ff19a04b23e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) }, { 0, } /* end of table */ }; @@ -707,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + if (idx >= CGX_RX_STAT_GLOBAL_INDEX) + lmac_id = 0; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; } @@ -1167,17 +1182,25 @@ static int cgx_link_usertable_index_map(int speed) static void set_mod_args(struct cgx_set_link_mode_args *args, u32 speed, u8 duplex, u8 autoneg, u64 mode) { - /* Fill default values incase of user did not pass - * valid parameters + int mode_baseidx; + u8 cgx_mode; + + if (args->multimode) { + args->mode |= mode; + return; + } + + /* Derive mode_base_idx and mode fields based + * on cgx_mode value */ - if (args->duplex == DUPLEX_UNKNOWN) - args->duplex = duplex; - if (args->speed == SPEED_UNKNOWN) - args->speed = speed; - if (args->an == AUTONEG_UNKNOWN) - args->an = autoneg; + cgx_mode = find_first_bit((unsigned long *)&mode, + CGX_MODE_MAX); args->mode = mode; - args->ports = 0; + mode_baseidx = cgx_mode - 41; + if (mode_baseidx > 0) { + args->mode_baseidx = 1; + args->mode = BIT_ULL(mode_baseidx); + } } static void otx2_map_ethtool_link_modes(u64 bitmask, @@ -1185,16 +1208,16 @@ static void otx2_map_ethtool_link_modes(u64 bitmask, { switch (bitmask) { case ETHTOOL_LINK_MODE_10baseT_Half_BIT: - set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT)); break; case ETHTOOL_LINK_MODE_10baseT_Full_BIT: - set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT)); break; case ETHTOOL_LINK_MODE_100baseT_Half_BIT: - set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT)); break; case ETHTOOL_LINK_MODE_100baseT_Full_BIT: - set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT)); break; case ETHTOOL_LINK_MODE_1000baseT_Half_BIT: set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII)); @@ -1466,25 +1489,36 @@ int cgx_get_fwdata_base(u64 *base) } int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + struct cgx_lmac_fwdata_s *linkmodes, int cgx_id, int lmac_id) { struct cgx *cgx = cgxd; u64 req = 0, resp; + u8 bit; if (!cgx) return -ENODEV; - if (args.mode) - otx2_map_ethtool_link_modes(args.mode, &args); - if (!args.speed && args.duplex && !args.an) - return -EINVAL; + for_each_set_bit(bit, args.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) + otx2_map_ethtool_link_modes(bit, &args); + + if (args.multimode) { + if (linkmodes->advertised_link_modes_own != CGX_CMD_OWN_NS) + return -EBUSY; + + linkmodes->advertised_link_modes = args.mode; + /* Update ownership */ + linkmodes->advertised_link_modes_own = CGX_CMD_OWN_FIRMWARE; + args.mode = GENMASK_ULL(41, 0); + } req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req); req = FIELD_SET(CMDMODECHANGE_SPEED, cgx_link_usertable_index_map(args.speed), req); req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req); req = FIELD_SET(CMDMODECHANGE_AN, args.an, req); - req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req); + req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req); req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req); return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); @@ -1670,9 +1704,11 @@ unsigned long cgx_get_lmac_bmap(void *cgxd) static int cgx_lmac_init(struct cgx *cgx) { + u8 max_dmac_filters; struct lmac *lmac; + int err, filter; + unsigned int i; u64 lmac_list; - int i, err; /* lmac_list specifies which lmacs are enabled * when bit n is set to 1, LMAC[n] is enabled @@ -1698,7 +1734,7 @@ static int cgx_lmac_init(struct cgx *cgx) err = -ENOMEM; goto err_lmac_free; } - sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + sprintf(lmac->name, "cgx_fwi_%u_%u", cgx->cgx_id, i); if (cgx->mac_ops->non_contiguous_serdes_lane) { lmac->lmac_id = __ffs64(lmac_list); lmac_list &= ~BIT_ULL(lmac->lmac_id); @@ -1711,6 +1747,8 @@ static int cgx_lmac_init(struct cgx *cgx) cgx->mac_ops->dmac_filter_count / cgx->lmac_count; + max_dmac_filters = lmac->mac_to_index_bmap.max; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); if (err) goto err_name_free; @@ -1740,6 +1778,15 @@ static int cgx_lmac_init(struct cgx *cgx) set_bit(lmac->lmac_id, &cgx->lmac_bmap); cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); + + /* Disable stale DMAC filters for sane state */ + for (filter = 0; filter < max_dmac_filters; filter++) + cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id); } /* Start X2P reset on given MAC block */ @@ -1917,6 +1964,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_disable_device; } + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + /* MAP configuration registers */ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!cgx->reg_base) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 1cf12e5c7da8..950231e7ea71 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -171,6 +171,7 @@ int cgx_set_fec(u64 fec, int cgx_id, int lmac_id); int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int cgx_get_phy_fec_stats(void *cgxd, int lmac_id); int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + struct cgx_lmac_fwdata_s *linkmodes, int cgx_id, int lmac_id); u64 cgx_features_get(void *cgxd); struct mac_ops *get_mac_ops(void *cgxd); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index d4a27c882a5b..39352d451cc3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -95,7 +95,31 @@ enum CGX_MODE_ { CGX_MODE_100G_C2M, CGX_MODE_100G_CR4, CGX_MODE_100G_KR4, - CGX_MODE_MAX /* = 29 */ + CGX_MODE_LAUI_2_C2C_BIT, + CGX_MODE_LAUI_2_C2M_BIT, + CGX_MODE_50GBASE_CR2_C_BIT, + CGX_MODE_50GBASE_KR2_C_BIT, /* = 30 */ + CGX_MODE_100GAUI_2_C2C_BIT, + CGX_MODE_100GAUI_2_C2M_BIT, + CGX_MODE_100GBASE_CR2_BIT, + CGX_MODE_100GBASE_KR2_BIT, + CGX_MODE_SFI_1G_BIT, + CGX_MODE_25GBASE_CR_C_BIT, + CGX_MODE_25GBASE_KR_C_BIT, + CGX_MODE_SGMII_10M_BIT, + CGX_MODE_SGMII_100M_BIT, /* = 39 */ + CGX_MODE_2500_BASEX_BIT = 42, /* Mode group 1 */ + CGX_MODE_5000_BASEX_BIT, + CGX_MODE_O_USGMII_BIT, + CGX_MODE_Q_USGMII_BIT, + CGX_MODE_2_5G_USXGMII_BIT, + CGX_MODE_5G_USXGMII_BIT, + CGX_MODE_10G_SXGMII_BIT, + CGX_MODE_10G_DXGMII_BIT, + CGX_MODE_10G_QXGMII_BIT, + CGX_MODE_TP_BIT, + CGX_MODE_FIBER_BIT, + CGX_MODE_MAX /* = 53 */ }; /* REQUEST ID types. Input to firmware */ enum cgx_cmd_id { @@ -258,7 +282,12 @@ struct cgx_lnk_sts { #define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8) #define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12) #define CMDMODECHANGE_AN GENMASK_ULL(13, 13) -#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14) +/* this field categorize the mode ID(FLAGS) range to accommodate + * more modes. + * To specify mode ID range of 0 - 41, this field will be 0. + * To specify mode ID range of 42 - 83, this field will be 1. + */ +#define CMDMODECHANGE_MODE_BASEIDX GENMASK_ULL(21, 20) #define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22) /* LINK_BRING_UP command timeout */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h new file mode 100644 index 000000000000..4285b5d6a6a2 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef CN20K_API_H +#define CN20K_API_H + +#include "../rvu.h" + +struct ng_rvu { + struct mbox_ops *rvu_mbox_ops; + struct qmem *pf_mbox_addr; + struct qmem *vf_mbox_addr; +}; + +/* Mbox related APIs */ +int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num); +int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type, unsigned long *pf_bmap); +void cn20k_free_mbox_memory(struct rvu *rvu); +int cn20k_register_afpf_mbox_intr(struct rvu *rvu); +int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start); +void cn20k_rvu_enable_mbox_intr(struct rvu *rvu); +void cn20k_rvu_unregister_interrupts(struct rvu *rvu); +int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs); +void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs); +void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs); +#endif /* CN20K_API_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c new file mode 100644 index 000000000000..bd3aab7770dd --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include "rvu_trace.h" +#include "mbox.h" +#include "reg.h" +#include "api.h" + +static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_irq_data *rvu_irq_data = rvu_irq; + struct rvu *rvu = rvu_irq_data->rvu; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + + /* Clear interrupts */ + intr = rvupf_read64(rvu, rvu_irq_data->intr_status); + rvupf_write64(rvu, rvu_irq_data->intr_status, intr); + + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr); + + rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start, + rvu_irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start) +{ + struct rvu_irq_data *irq_data; + int intr_vec, offset, vec = 0; + int err; + + /* irq data for 4 VFPF intr vectors */ + irq_data = devm_kcalloc(rvu->dev, 4, + sizeof(struct rvu_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; + intr_vec++, vec++) { + switch (intr_vec) { + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: + irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 64; + break; + } + irq_data[vec].afvf_queue_work_hdlr = + rvu_queue_work; + offset = pf_vec_start + intr_vec; + irq_data[vec].vec_num = offset; + irq_data[vec].rvu = rvu; + + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d", + vec / 2, vec % 2); + err = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + &irq_data[vec]); + if (err) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for AFVF mbox irq\n"); + return err; + } + rvu->irq_allocated[offset] = true; + } + + return 0; +} + +/* CN20K mbox PFx => AF irq handler */ +static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_irq_data *rvu_irq_data = rvu_irq; + struct rvu *rvu = rvu_irq_data->rvu; + u64 intr; + + /* Clear interrupts */ + intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status); + rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr); + + if (intr) + trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr); + + /* Sync with mbox memory region */ + rmb(); + + rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info, + rvu_irq_data->start, + rvu_irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +void cn20k_rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0), + INTR_MASK(hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1), + INTR_MASK(hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0), + INTR_MASK(hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1), + INTR_MASK(hw->total_pfs - 64)); +} + +void cn20k_rvu_unregister_interrupts(struct rvu *rvu) +{ + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0), + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1), + INTR_MASK(rvu->hw->total_pfs - 64)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0), + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1), + INTR_MASK(rvu->hw->total_pfs - 64)); +} + +int cn20k_register_afpf_mbox_intr(struct rvu *rvu) +{ + struct rvu_irq_data *irq_data; + int intr_vec, ret, vec = 0; + + /* irq data for 4 PF intr vectors */ + irq_data = devm_kcalloc(rvu->dev, 4, + sizeof(struct rvu_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <= + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++, + vec++) { + switch (intr_vec) { + case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF_INT(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF_INT(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF1_INT(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_AF_PFAF1_INT(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + } + irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work; + irq_data[vec].vec_num = intr_vec; + irq_data[vec].rvu = rvu; + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[intr_vec * NAME_SIZE], + "RVUAF PFAF%d Mbox%d", + vec / 2, vec % 2); + ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec), + rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0, + &rvu->irq_name[intr_vec * NAME_SIZE], + &irq_data[vec]); + if (ret) + return ret; + + rvu->irq_allocated[intr_vec] = true; + } + + return 0; +} + +int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type, unsigned long *pf_bmap) +{ + int region; + u64 bar; + + if (type == TYPE_AFVF) { + for (region = 0; region < num; region++) { + if (!test_bit(region, pf_bmap)) + continue; + + bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base); + bar += region * MBOX_SIZE; + mbox_addr[region] = (void *)bar; + + if (!mbox_addr[region]) + return -ENOMEM; + } + return 0; + } + + for (region = 0; region < num; region++) { + if (!test_bit(region, pf_bmap)) + continue; + + bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base); + bar += region * MBOX_SIZE; + + mbox_addr[region] = (void *)bar; + + if (!mbox_addr[region]) + return -ENOMEM; + } + return 0; +} + +static int rvu_alloc_mbox_memory(struct rvu *rvu, int type, + int ndevs, int mbox_size) +{ + struct qmem *mbox_addr; + dma_addr_t iova; + int pf, err; + + /* Allocate contiguous memory for mailbox communication. + * eg: AF <=> PFx mbox memory + * This allocated memory is split into chunks of MBOX_SIZE + * and setup into each of the RVU PFs. In HW this memory will + * get aliased to an offset within BAR2 of those PFs. + * + * AF will access mbox memory using direct physical addresses + * and PFs will access the same shared memory from BAR2. + * + * PF <=> VF mbox memory also works in the same fashion. + * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs + */ + + err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size); + if (err) + return -ENOMEM; + + switch (type) { + case TYPE_AFPF: + rvu->ng_rvu->pf_mbox_addr = mbox_addr; + iova = (u64)mbox_addr->iova; + for (pf = 0; pf < ndevs; pf++) { + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf), + (u64)iova); + iova += mbox_size; + } + break; + case TYPE_AFVF: + rvu->ng_rvu->vf_mbox_addr = mbox_addr; + rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova); + break; + default: + return 0; + } + + return 0; +} + +static struct mbox_ops cn20k_mbox_ops = { + .pf_intr_handler = cn20k_mbox_pf_common_intr_handler, + .afvf_intr_handler = cn20k_afvf_mbox_intr_handler, +}; + +int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs) +{ + int dev; + + if (!is_cn20k(rvu->pdev)) + return 0; + + rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops; + + if (type == TYPE_AFVF) { + rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE)); + } else { + for (dev = 0; dev < ndevs; dev++) + rvu_write64(rvu, BLKADDR_RVUM, + RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE)); + } + + return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE); +} + +void cn20k_free_mbox_memory(struct rvu *rvu) +{ + if (!is_cn20k(rvu->pdev)) + return; + + qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr); + qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr); +} + +void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs) +{ + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); +} + +void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs) +{ + /* Clear any pending interrupts and enable AF VF interrupts for + * the first 64 VFs. + */ + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* FLR */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Same for remaining VFs, if any. */ + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); +} + +int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int nixlf) +{ + int qints, hwctx_size, err; + u64 cfg, ctx_cfg; + + if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev)) + return 0; + + ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3); + /* Alloc memory for CQINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 24) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf), + (u64)pfvf->cq_ints_ctx->iova); + + /* Alloc memory for QINT's HW contexts */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + qints = (cfg >> 12) & 0xFFF; + hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF); + err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size); + if (err) + return -ENOMEM; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf), + (u64)pfvf->nix_qints_ctx->iova); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h new file mode 100644 index 000000000000..affb39803120 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef RVU_MBOX_REG_H +#define RVU_MBOX_REG_H +#include "../rvu.h" +#include "../rvu_reg.h" + +/* RVUM block registers */ +#define RVU_PF_DISC (0x0) +#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16) +#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12) + +/* Mbox Registers */ +/* RVU AF BAR0 Mbox registers for AF => PFx */ +#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4) +#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4) +#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3) +#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6) +#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6) +#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6) + +/* RVU PF => AF mbox registers */ +#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3) +#define RVU_MBOX_PF_INT (0xC20) +#define RVU_MBOX_PF_INT_W1S (0xC28) +#define RVU_MBOX_PF_INT_ENA_W1S (0xC30) +#define RVU_MBOX_PF_INT_ENA_W1C (0xC38) + +#define RVU_AF_BAR2_SEL (0x9000000) +#define RVU_AF_BAR2_PFID (0x16400) +#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12) +#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12) + +#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6) +#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6) +#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6) + +#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3) +#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4) +#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4) + +#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3) + +#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3) + +#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3) + +#define RVU_MBOX_PF_VF_ADDR (0xC40) +#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48) +#define RVU_MBOX_PF_VF_CFG (0xC60) + +#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3) +#define RVU_MBOX_VF_INT (0x20) +#define RVU_MBOX_VF_INT_W1S (0x28) +#define RVU_MBOX_VF_INT_ENA_W1S (0x30) +#define RVU_MBOX_VF_INT_ENA_W1C (0x38) + +#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3) +#endif /* RVU_MBOX_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h new file mode 100644 index 000000000000..76ce3ec6da9c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef STRUCT_H +#define STRUCT_H + +/* + * CN20k RVU PF MBOX Interrupt Vector Enumeration + * + * Vectors 0 - 3 are compatible with pre cn20k and hence + * existing macros are being reused. + */ +enum rvu_mbox_pf_int_vec_e { + RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6, + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7, + RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8, + RVU_MBOX_PF_INT_VEC_CNT = 0x9, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_cn20k_int_vec_e { + RVU_AF_CN20K_INT_VEC_POISON = 0x0, + RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1, + RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2, + RVU_AF_CN20K_INT_VEC_PFME0 = 0x3, + RVU_AF_CN20K_INT_VEC_PFME1 = 0x4, + RVU_AF_CN20K_INT_VEC_GEN = 0x5, + RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6, + RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7, + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8, + RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9, + RVU_AF_CN20K_INT_VEC_CNT = 0xa, +}; +#endif diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 5d84386ed22d..8a08bebf08c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -39,7 +39,7 @@ struct qmem { void *base; dma_addr_t iova; int alloc_sz; - u16 entry_sz; + u32 entry_sz; u8 align; u32 qsize; }; @@ -159,6 +159,7 @@ enum nix_scheduler { #define SDP_HW_MIN_FRS 16 #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ +#define SDP_LINK_CREDIT 0x320202 /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 1e5aa5397504..75872d257eca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -10,8 +10,11 @@ #include <linux/pci.h> #include "rvu_reg.h" +#include "cn20k/reg.h" +#include "cn20k/api.h" #include "mbox.h" #include "rvu_trace.h" +#include "rvu.h" static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); @@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) mdev->rsp_size = 0; tx_hdr->num_msgs = 0; tx_hdr->msg_size = 0; + tx_hdr->sig = 0; rx_hdr->num_msgs = 0; rx_hdr->msg_size = 0; + rx_hdr->sig = 0; } EXPORT_SYMBOL(__otx2_mbox_reset); @@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox) } EXPORT_SYMBOL(otx2_mbox_destroy); +int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1); + mbox->tr_shift = 4; + break; + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0); + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0); + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1); + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1); + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0); + mbox->tr_shift = 4; + break; + case MBOX_DIR_VFPF: + mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0); + mbox->tr_shift = 0; + break; + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1); + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + mbox->reg_base = reg_base; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + mbox->ndevs = ndevs; + + return 0; +} + static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, void *reg_base, int direction, int ndevs) { + if (is_cn20k(pdev)) + return cn20k_mbox_setup(mbox, pdev, reg_base, + direction, ndevs); + switch (direction) { case MBOX_DIR_AFPF: case MBOX_DIR_PFVF: @@ -188,14 +282,13 @@ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; - struct device *sender = &mbox->pdev->dev; while (!time_after(jiffies, timeout)) { if (mdev->num_msgs == mdev->msgs_acked) return 0; usleep_range(800, 1000); } - dev_dbg(sender, "timed out while waiting for rsp\n"); + trace_otx2_msg_wait_rsp(mbox->pdev); return -EIO; } EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); @@ -219,6 +312,7 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; + struct mbox_msghdr *msg; u64 intr_val; tx_hdr = hw_mbase + mbox->tx_start; @@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) spin_lock(&mdev->mbox_lock); - tx_hdr->msg_size = mdev->msg_size; + if (!tx_hdr->sig) { + tx_hdr->msg_size = mdev->msg_size; + tx_hdr->num_msgs = mdev->num_msgs; + } /* Reset header for next messages */ mdev->msg_size = 0; @@ -248,10 +345,12 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) * messages. So this should be written after writing all the messages * to the shared memory. */ - tx_hdr->num_msgs = mdev->num_msgs; rx_hdr->num_msgs = 0; - trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size); + msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset); + + trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size, + msg->id, msg->pcifunc); spin_unlock(&mdev->mbox_lock); @@ -306,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_msghdr *msghdr = NULL; + struct mbox_hdr *mboxhdr = NULL; spin_lock(&mdev->mbox_lock); size = ALIGN(size, MBOX_MSG_ALIGN); @@ -329,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, mdev->msg_size += size; mdev->rsp_size += size_rsp; msghdr->next_msgoff = mdev->msg_size + msgs_offset; + + mboxhdr = mdev->mbase + mbox->tx_start; + /* Clear the msg header region */ + memset(mboxhdr, 0, msgs_offset); + exit: spin_unlock(&mdev->mbox_lock); @@ -445,6 +550,14 @@ const char *otx2_mbox_id2name(u16 id) #define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CGX_MESSAGES +#undef M + +#define M(_name, _id, _1, _2, _3) case _id: return # _name; + MBOX_UP_CPT_MESSAGES +#undef M default: return "INVALID ID"; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 62c07407eb94..933073cd2280 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -10,9 +10,11 @@ #include <linux/etherdevice.h> #include <linux/sizes.h> +#include <linux/ethtool.h> #include "rvu_struct.h" #include "common.h" +#include "cn20k/struct.h" #define MBOX_SIZE SZ_64K @@ -50,6 +52,11 @@ #define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ #define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ +enum { + TYPE_AFVF, + TYPE_AFPF, +}; + struct otx2_mbox_dev { void *mbase; /* This dev's mbox region */ void *hwbase; @@ -78,6 +85,8 @@ struct otx2_mbox { struct mbox_hdr { u64 msg_size; /* Total msgs size embedded */ u16 num_msgs; /* No of msgs embedded */ + u16 opt_msg; + u8 sig; }; /* Header which precedes every msg and is also part of it */ @@ -313,6 +322,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ +M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \ + nix_bp_cfg_rsp) \ +M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \ + msg_rsp) \ M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ msg_req, nix_inline_ipsec_cfg) \ M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \ @@ -520,6 +533,8 @@ struct get_hw_cap_rsp { u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ u8 nix_shaping; /* Is shaping and coloring supported */ u8 npc_hash_extract; /* Is hash extract supported */ +#define HW_CAP_MACSEC BIT_ULL(1) + u64 hw_caps; }; /* CGX mbox message formats */ @@ -644,11 +659,17 @@ struct cgx_lmac_fwdata_s { u64 supported_link_modes; /* only applicable if AN is supported */ u64 advertised_fec; - u64 advertised_link_modes; + u64 advertised_link_modes_own:1; /* CGX_CMD_OWN */ + u64 advertised_link_modes:63; /* Only applicable if SFP/QSFP slot is present */ struct sfp_eeprom_s sfp_eeprom; struct phy_s phy; -#define LMAC_FWDATA_RESERVED_MEM 1021 + u32 lmac_type; + u32 portm_idx; + u64 mgmt_port:1; + u64 advertised_an:1; + u64 port; +#define LMAC_FWDATA_RESERVED_MEM 1018 u64 reserved[LMAC_FWDATA_RESERVED_MEM]; }; @@ -661,12 +682,13 @@ struct cgx_set_link_mode_args { u32 speed; u8 duplex; u8 an; - u8 ports; + u8 mode_baseidx; + u8 multimode; u64 mode; + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); }; struct cgx_set_link_mode_req { -#define AUTONEG_UNKNOWN 0xff struct mbox_msghdr hdr; struct cgx_set_link_mode_args args; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index d39d86e694cc..d7030dfa5dad 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; event->intr_mask &= pfvf->intr_mask; @@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) struct mcs_intr_info *req; int pf; - pf = rvu_get_pf(event->pcifunc); + pf = rvu_get_pf(rvu->pdev, event->pcifunc); mutex_lock(&rvu->mbox_lock); @@ -143,6 +143,8 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; @@ -191,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu, if (pcifunc & RVU_PFVF_FUNC_MASK) pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)]; else - pfvf = &mcs->pf[rvu_get_pf(pcifunc)]; + pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)]; mcs->pf_map[0] = pcifunc; pfvf->intr_mask = req->intr_mask; @@ -925,7 +927,6 @@ void rvu_mcs_exit(struct rvu *rvu) if (!rvu->mcs_intr_wq) return; - flush_workqueue(rvu->mcs_intr_wq); destroy_workqueue(rvu->mcs_intr_wq); rvu->mcs_intr_wq = NULL; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index bcc96eed2481..66749b3649c1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev, spin_lock_init(&ptp->ptp_lock); if (cn10k_ptp_errata(ptp)) { ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec; - hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - ptp->hrtimer.function = ptp_reset_thresh; + hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL); } else { ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index cd0d7b7774f1..c6bb3aaa8e0d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -20,6 +20,8 @@ #include "rvu_trace.h" #include "rvu_npc_hash.h" +#include "cn20k/reg.h" +#include "cn20k/api.h" #define DRV_NAME "rvu_af" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" @@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)); -enum { - TYPE_AFVF, - TYPE_AFPF, -}; +static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq); +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq); /* Supported devices */ static const struct pci_device_id rvu_id_table[] = { @@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; - devnum = rvu_get_pf(pcifunc); + devnum = rvu_get_pf(rvu->pdev, pcifunc); } /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or @@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, devnum = rvu_get_hwvf(rvu, pcifunc); } else { is_pf = true; - devnum = rvu_get_pf(pcifunc); + devnum = rvu_get_pf(rvu->pdev, pcifunc); } block->fn_map[lf] = attach ? pcifunc : 0; @@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); } -inline int rvu_get_pf(u16 pcifunc) -{ - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; -} - void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) { u64 cfg; @@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc) int pf, func; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); func = pcifunc & RVU_PFVF_FUNC_MASK; /* Get first HWVF attached to this PF */ @@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) if (pcifunc & RVU_PFVF_FUNC_MASK) return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; else - return &rvu->pf[rvu_get_pf(pcifunc)]; + return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; } static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) @@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) int pf, vf, nvfs; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (pf >= rvu->hw->total_pfs) return false; @@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu) rvu_reset_msix(rvu); mutex_destroy(&rvu->rsrc_lock); + + /* Free the QINT/CINT memory */ + pfvf = &rvu->pf[RVU_AFPF]; + qmem_free(rvu->dev, pfvf->nix_qints_ctx); + qmem_free(rvu->dev, pfvf->cq_ints_ctx); } static void rvu_setup_pfvf_macaddress(struct rvu *rvu) @@ -1393,8 +1393,6 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (blkaddr < 0) return; - if (blktype == BLKTYPE_NIX) - rvu_nix_reset_mac(pfvf, pcifunc); block = &hw->block[blkaddr]; @@ -1407,6 +1405,10 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (lf < 0) /* This should never happen */ continue; + if (blktype == BLKTYPE_NIX) { + rvu_nix_reset_mac(pfvf, pcifunc); + rvu_npc_clear_ucast_entry(rvu, pcifunc, lf); + } /* Disable the LF */ rvu_write64(rvu, blkaddr, block->lfcfg_reg | (lf << block->lfshift), 0x00ULL); @@ -1485,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); /* All CGX mapped PFs are set with assigned NIX block during init */ - if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { blkaddr = pf->nix_blkaddr; } else if (is_lbk_vf(rvu, pcifunc)) { vf = pcifunc - 1; @@ -1499,7 +1501,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) } /* if SDP1 then the blkaddr is NIX1 */ - if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) + if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1) blkaddr = BLKADDR_NIX1; switch (blkaddr) { @@ -2004,7 +2006,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, vf = pcifunc & RVU_PFVF_FUNC_MASK; cfg = rvu_read64(rvu, BLKADDR_RVUM, - RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); + RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc))); numvfs = (cfg >> 12) & 0xFF; if (vf && vf <= numvfs) @@ -2031,6 +2033,9 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, rsp->nix_shaping = hw->cap.nix_shaping; rsp->npc_hash_extract = hw->cap.npc_hash_extract; + if (rvu->mcs_blk_cnt) + rsp->hw_caps = HW_CAP_MACSEC; + return 0; } @@ -2173,7 +2178,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, if (rsp && err) \ rsp->hdr.rc = err; \ \ - trace_otx2_msg_process(mbox->pdev, _id, err); \ + trace_otx2_msg_process(mbox->pdev, _id, err, req->pcifunc); \ return rsp ? err : -ENOMEM; \ } MBOX_MESSAGES @@ -2218,15 +2223,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) { + req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs; + rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL, + RVU_AF_BAR2_PFID); + if (type == TYPE_AFPF) + rvu_write64(rvu, BLKADDR_NIX0, + AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)), + 0x1); + else + rvu_write64(rvu, BLKADDR_NIX0, + AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)), + 0x1); + usleep_range(5000, 6000); + goto done; + } + for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) { msg = mdev->mbase + offset; /* Set which PF/VF sent this message based on mbox IRQ */ switch (type) { case TYPE_AFPF: - msg->pcifunc &= - ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); - msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); + msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev); + msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0); break; case TYPE_AFVF: msg->pcifunc &= @@ -2244,16 +2264,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", err, otx2_mbox_id2name(msg->id), - msg->id, rvu_get_pf(msg->pcifunc), + msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc), (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", err, otx2_mbox_id2name(msg->id), msg->id, devid); } +done: mw->mbox_wrk[devid].num_msgs = 0; - if (poll) + if (!is_cn20k(mbox->pdev) && poll) otx2_mbox_wait_for_zero(mbox, devid); /* Send mbox responses to VF/PF */ @@ -2359,13 +2380,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) __rvu_mbox_up_handler(mwork, TYPE_AFVF); } -static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, +static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr, int num, int type, unsigned long *pf_bmap) { struct rvu_hwinfo *hw = rvu->hw; int region; u64 bar4; + /* For cn20k platform AF mailbox region is allocated by software + * and the corresponding IOVA is programmed in hardware unlike earlier + * silicons where software uses the hardware region after ioremap. + */ + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr, + num, type, pf_bmap); + /* For cn10k platform VF mailbox regions of a PF follows after the * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from * RVU_PF_VF_BAR4_ADDR register. @@ -2384,7 +2413,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } - mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } @@ -2407,7 +2436,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, RVU_AF_PF_BAR4_ADDR); bar4 += region * MBOX_SIZE; } - mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE); if (!mbox_addr[region]) goto error; } @@ -2415,20 +2444,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, error: while (region--) - iounmap((void __iomem *)mbox_addr[region]); + iounmap(mbox_addr[region]); return -ENOMEM; } +static struct mbox_ops rvu_mbox_ops = { + .pf_intr_handler = rvu_mbox_pf_intr_handler, + .afvf_intr_handler = rvu_mbox_intr_handler, +}; + static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) { - int err = -EINVAL, i, dir, dir_up; + void __iomem **mbox_regions; + struct ng_rvu *ng_rvu_mbox; + int err, i, dir, dir_up; void __iomem *reg_base; struct rvu_work *mwork; unsigned long *pf_bmap; - void **mbox_regions; const char *name; u64 cfg; @@ -2436,6 +2471,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, if (!pf_bmap) return -ENOMEM; + ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL); + if (!ng_rvu_mbox) { + err = -ENOMEM; + goto free_bitmap; + } + /* RVU VFs */ if (type == TYPE_AFVF) bitmap_set(pf_bmap, 0, num); @@ -2449,12 +2490,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, } } + rvu->ng_rvu = ng_rvu_mbox; + + rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops; + + err = cn20k_rvu_mbox_init(rvu, type, num); + if (err) + goto free_mem; + mutex_init(&rvu->mbox_lock); - mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); + mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL); if (!mbox_regions) { err = -ENOMEM; - goto free_bitmap; + goto free_qmem; } switch (type) { @@ -2477,11 +2526,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; break; default: + err = -EINVAL; goto free_regions; } mw->mbox_wq = alloc_workqueue("%s", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + WQ_HIGHPRI | WQ_MEM_RECLAIM, num, name); if (!mw->mbox_wq) { err = -ENOMEM; @@ -2524,7 +2574,11 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, mwork->rvu = rvu; INIT_WORK(&mwork->work, mbox_up_handler); } - goto free_regions; + + kfree(mbox_regions); + bitmap_free(pf_bmap); + + return 0; exit: destroy_workqueue(mw->mbox_wq); @@ -2533,6 +2587,10 @@ unmap_regions: iounmap((void __iomem *)mbox_regions[num]); free_regions: kfree(mbox_regions); +free_qmem: + cn20k_free_mbox_memory(rvu); +free_mem: + kfree(rvu->ng_rvu); free_bitmap: bitmap_free(pf_bmap); return err; @@ -2559,8 +2617,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw) otx2_mbox_destroy(&mw->mbox_up); } -static void rvu_queue_work(struct mbox_wq_info *mw, int first, - int mdevs, u64 intr) +void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -2634,7 +2692,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); - vfs -= 64; + vfs = 64; } intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); @@ -2651,6 +2709,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; + if (is_cn20k(rvu->pdev)) { + cn20k_rvu_enable_mbox_intr(rvu); + return; + } + /* Clear spurious irqs, if any */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); @@ -2768,7 +2831,7 @@ static void rvu_flr_handler(struct work_struct *work) cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); for (vf = 0; vf < numvfs; vf++) __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); @@ -2904,9 +2967,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu_cpt_unregister_interrupts(rvu); - /* Disable the Mbox interrupt */ - rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, - INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + if (!is_cn20k(rvu->pdev)) + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + else + cn20k_rvu_unregister_interrupts(rvu); /* Disable the PF FLR interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, @@ -2939,6 +3005,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) * VF interrupts can be handled. Offset equal to zero means * that PF vectors are not configured and overlapping AF vectors. */ + if (is_cn20k(rvu->pdev)) + return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT + + RVU_MBOX_PF_INT_VEC_CNT) && offset; + return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && offset; } @@ -2969,18 +3039,30 @@ static int rvu_register_interrupts(struct rvu *rvu) return ret; } - /* Register mailbox interrupt handler */ - sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); - ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), - rvu_mbox_pf_intr_handler, 0, - &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); - if (ret) { - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for mbox irq\n"); - goto fail; - } + if (!is_cn20k(rvu->pdev)) { + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], + "RVUAF Mbox"); + ret = request_irq(pci_irq_vector + (rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * + NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox\n"); + goto fail; + } - rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + } else { + ret = cn20k_register_afpf_mbox_intr(rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox\n"); + goto fail; + } + } /* Enable mailbox interrupts from all PFs */ rvu_enable_mbox_intr(rvu); @@ -3035,34 +3117,40 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Get PF MSIX vectors offset. */ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + if (!is_cn20k(rvu->pdev)) { + /* Register MBOX0 interrupt. */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox0\n"); - /* Register MBOX0 interrupt. */ - offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; - sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); - ret = request_irq(pci_irq_vector(rvu->pdev, offset), - rvu_mbox_intr_handler, 0, - &rvu->irq_name[offset * NAME_SIZE], - rvu); - if (ret) - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for Mbox0\n"); - - rvu->irq_allocated[offset] = true; + rvu->irq_allocated[offset] = true; - /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so - * simply increment current offset by 1. - */ - offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; - sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); - ret = request_irq(pci_irq_vector(rvu->pdev, offset), - rvu_mbox_intr_handler, 0, - &rvu->irq_name[offset * NAME_SIZE], - rvu); - if (ret) - dev_err(rvu->dev, - "RVUAF: IRQ registration failed for Mbox1\n"); + /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so + * simply increment current offset by 1. + */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox1\n"); - rvu->irq_allocated[offset] = true; + rvu->irq_allocated[offset] = true; + } else { + ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox\n"); + } /* Register FLR interrupt handler for AF's VFs */ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; @@ -3173,6 +3261,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_disable_afvf_intr(rvu, vfs); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); @@ -3189,6 +3280,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu) { int vfs = rvu->vfs; + if (is_cn20k(rvu->pdev)) + return cn20k_rvu_enable_afvf_intr(rvu, vfs); + /* Clear any pending interrupts and enable AF VF interrupts for * the first 64 VFs. */ @@ -3433,6 +3527,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, rvu->fwdata->ptp_ext_tstamp); + /* Alloc CINT and QINT memory */ + rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0, + (rvu->hw->block[BLKADDR_NIX0].lf.max)); return 0; err_dl: rvu_unregister_dl(rvu); @@ -3484,6 +3581,9 @@ static void rvu_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); devm_kfree(&pdev->dev, rvu->hw); + if (is_cn20k(rvu->pdev)) + cn20k_free_mbox_memory(rvu); + kfree(rvu->ng_rvu); devm_kfree(&pdev->dev, rvu); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index a383b5ef5b2d..7ee1fdeb5295 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -10,6 +10,7 @@ #include <linux/pci.h> #include <net/devlink.h> +#include <linux/soc/marvell/silicons.h> #include "rvu_struct.h" #include "rvu_devlink.h" @@ -30,6 +31,8 @@ #define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 +#define PCI_SUBSYS_DEVID_CN20KA 0xC220 +#define PCI_SUBSYS_DEVID_CNF20KA 0xC320 /* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 @@ -41,12 +44,39 @@ #define MAX_CPT_BLKS 2 /* PF_FUNC */ -#define RVU_PFVF_PF_SHIFT 10 -#define RVU_PFVF_PF_MASK 0x3F -#define RVU_PFVF_FUNC_SHIFT 0 -#define RVU_PFVF_FUNC_MASK 0x3FF +#define RVU_OTX2_PFVF_PF_SHIFT 10 +#define RVU_OTX2_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF +#define RVU_CN20K_PFVF_PF_SHIFT 9 +#define RVU_CN20K_PFVF_PF_MASK 0x7F + +static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func) +{ + if (is_cn20k(pdev)) + return ((pf & RVU_CN20K_PFVF_PF_MASK) << + RVU_CN20K_PFVF_PF_SHIFT) | + ((func & RVU_PFVF_FUNC_MASK) << + RVU_PFVF_FUNC_SHIFT); + else + return ((pf & RVU_OTX2_PFVF_PF_MASK) << + RVU_OTX2_PFVF_PF_SHIFT) | + ((func & RVU_PFVF_FUNC_MASK) << + RVU_PFVF_FUNC_SHIFT); +} + +static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev) +{ + if (is_cn20k(pdev)) + return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT); + else + return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT); +} + +#define RVU_AFPF 25 #ifdef CONFIG_DEBUG_FS + struct dump_ctx { int lf; int id; @@ -444,6 +474,23 @@ struct mbox_wq_info { struct workqueue_struct *mbox_wq; }; +struct rvu_irq_data { + u64 intr_status; + void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + struct rvu *rvu; + int vec_num; + int start; + int mdevs; +}; + +struct mbox_ops { + irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq); + irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq); +}; + struct channel_fwdata { struct sdp_node_info info; u8 valid; @@ -609,6 +656,8 @@ struct rvu { struct list_head rep_evtq_head; /* Representor event lock */ spinlock_t rep_evtq_lock; + + struct ng_rvu *ng_rvu; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -834,7 +883,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); -int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); @@ -863,8 +911,8 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); /* SDP APIs */ int rvu_sdp_init(struct rvu *rvu); -bool is_sdp_pfvf(u16 pcifunc); -bool is_sdp_pf(u16 pcifunc); +bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc); +bool is_sdp_pf(struct rvu *rvu, u16 pcifunc); bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) @@ -875,11 +923,21 @@ static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) return false; } +static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc) +{ + if (is_cn20k(pdev)) + return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) & + RVU_CN20K_PFVF_PF_MASK; + else + return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) & + RVU_OTX2_PFVF_PF_MASK; +} + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && - !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); + !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0)); } static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) @@ -891,7 +949,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc) { return ((pcifunc & RVU_PFVF_FUNC_MASK) && - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))); + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))); } #define M(_name, _id, fn_name, req, rsp) \ @@ -899,6 +957,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *); MBOX_MESSAGES #undef M +/* Mbox APIs */ +void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr); + int rvu_cgx_init(struct rvu *rvu); int rvu_cgx_exit(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); @@ -953,7 +1015,8 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx, u16 mcam_index); void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); - +int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int nixlf); /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); int rvu_npc_get_pkind(struct rvu *rvu, u16 pf); @@ -967,8 +1030,6 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable); void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, @@ -994,6 +1055,8 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf); + bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 992fa0b82e8d..3303c475414a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -34,7 +34,7 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \ + trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req), 0); \ return req; \ } @@ -272,6 +272,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } @@ -455,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu) inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return false; return true; } @@ -482,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -499,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -524,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int i = 0, lmac_count = 0; struct mac_ops *mac_ops; u8 max_dmac_filters; @@ -575,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; int stat = 0, err = 0; u64 tx_stat, rx_stat; @@ -631,7 +633,7 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct rvu_pfvf *parent_pf; struct mac_ops *mac_ops; u8 cgx_idx, lmac; @@ -661,7 +663,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_idx, lmac; void *cgxd; @@ -679,17 +681,20 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; - if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + if (!is_pf_cgxmapped(rvu, pf)) + return LMAC_AF_ERR_PF_NOT_MAPPED; if (rvu_npc_exact_has_match_table(rvu)) return rvu_npc_exact_mac_addr_set(rvu, req, rsp); rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + pfvf = &rvu->pf[pf]; + ether_addr_copy(pfvf->mac_addr, req->mac_addr); cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); return 0; @@ -699,7 +704,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; int rc = 0; @@ -723,7 +728,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -741,7 +746,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, struct cgx_max_dmac_entries_get_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; /* If msg is received from PFs(which are not mapped to CGX LMACs) @@ -767,20 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); - u8 cgx_id, lmac_id; - int rc = 0; - u64 cfg; + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); - if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; - - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) + return LMAC_AF_ERR_PF_NOT_MAPPED; - rsp->hdr.rc = rc; - cfg = cgx_lmac_addr_get(cgx_id, lmac_id); - /* copy 48 bit mac address to req->mac_addr */ - u64_to_ether_addr(cfg, rsp->mac_addr); + ether_addr_copy(rsp->mac_addr, pfvf->mac_addr); return 0; } @@ -788,7 +785,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -807,7 +804,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -826,7 +823,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -862,7 +859,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc))) return -EPERM; return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); @@ -876,7 +873,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) @@ -915,7 +912,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; int pf, err; - pf = rvu_get_pf(req->hdr.pcifunc); + pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return -ENODEV; @@ -931,7 +928,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, struct msg_req *req, struct cgx_features_info_msg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_idx, lmac; void *cgxd; @@ -973,7 +970,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1003,7 +1000,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 rx_pfc = 0, tx_pfc = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1044,7 +1041,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *req, struct cgx_pause_frm_cfg *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; int err = 0; @@ -1071,7 +1068,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1104,7 +1101,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, /* Assumes LF of a PF and all of its VF belongs to the same * NIX block */ - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; @@ -1131,10 +1128,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) struct rvu_pfvf *parent_pf, *pfvf; int cgx_users, err = 0; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; pfvf = rvu_get_pfvf(rvu, pcifunc); mutex_lock(&rvu->cgx_cfg_lock); @@ -1177,7 +1174,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, struct fec_mode *req, struct fec_mode *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) @@ -1193,7 +1190,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, struct cgx_fw_data *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!rvu->fwdata) @@ -1220,7 +1217,8 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, struct cgx_set_link_mode_req *req, struct cgx_set_link_mode_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); + struct cgx_lmac_fwdata_s *linkmodes; u8 cgx_idx, lmac; void *cgxd; @@ -1229,14 +1227,20 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); - rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + linkmodes = &rvu->fwdata->cgx_fw_data_usx[cgx_idx][lmac]; + else + linkmodes = &rvu->fwdata->cgx_fw_data[cgx_idx][lmac]; + + rsp->status = cgx_set_link_mode(cgxd, req->args, linkmodes, + cgx_idx, lmac); return 0; } int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -1254,7 +1258,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) @@ -1270,7 +1274,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 rx_8023 = 0, tx_8023 = 0; struct mac_ops *mac_ops; u8 cgx_id, lmac_id; @@ -1308,7 +1312,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, struct cgx_pfc_cfg *req, struct cgx_pfc_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -1333,7 +1337,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, void rvu_mac_reset(struct rvu *rvu, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; struct cgx *cgxd; u8 cgx, lmac; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7fa98aeb3663..d2163da28d18 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); @@ -141,7 +155,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, int err = 0; u64 val; - /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + /* Check if PF_FUNC wants to use its own local memory as LMTLINE * region, if so, convert that IOVA to physical address and * populate LMT table with that address */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 3c5bbaf12e59..f404117bf6c8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -410,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; - if (rvu_get_pf(pcifunc) != cpt_pf_num) + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) return false; if (pcifunc & RVU_PFVF_FUNC_MASK) return false; @@ -422,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) { int cpt_pf_num = rvu->cpt_pf_num; - if (rvu_get_pf(pcifunc) != cpt_pf_num) + if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num) return false; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) return false; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 148144f5b61d..8375f18c8e07 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); @@ -683,7 +688,7 @@ static int get_max_column_width(struct rvu *rvu) for (pf = 0; pf < rvu->hw->total_pfs; pf++) { for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { - pcifunc = pf << 10 | vf; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); if (!pcifunc) continue; @@ -754,7 +759,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { off = 0; flag = 0; - pcifunc = pf << 10 | vf; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf); if (!pcifunc) continue; @@ -837,7 +842,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) cgx[0] = 0; lmac[0] = 0; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pfvf = rvu_get_pfvf(rvu, pcifunc); if (pfvf->nix_blkaddr == BLKADDR_NIX0) @@ -862,6 +867,71 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); +static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused) +{ + struct rvu *rvu = s->private; + struct rvu_fwdata *fwdata; + u8 mac[ETH_ALEN]; + int count = 0, i; + + if (!rvu->fwdata) + return -EAGAIN; + + fwdata = rvu->fwdata; + seq_puts(s, "\nRVU Firmware Data:\n"); + seq_puts(s, "\n\t\tPTP INFORMATION\n"); + seq_puts(s, "\t\t===============\n"); + seq_printf(s, "\t\texternal clockrate \t :%x\n", + fwdata->ptp_ext_clk_rate); + seq_printf(s, "\t\texternal timestamp \t :%x\n", + fwdata->ptp_ext_tstamp); + seq_puts(s, "\n"); + + seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n"); + seq_puts(s, "\t\t=======================\n"); + seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid); + seq_printf(s, "\t\tNode ID \t\t :%x\n", + fwdata->channel_data.info.node_id); + seq_printf(s, "\t\tNumber of VFs \t\t :%x\n", + fwdata->channel_data.info.max_vfs); + seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n", + fwdata->channel_data.info.num_pf_rings); + seq_printf(s, "\t\tPF SRN \t\t\t :%x\n", + fwdata->channel_data.info.pf_srn); + seq_puts(s, "\n"); + + seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n"); + seq_puts(s, "\t\t====================\n"); + for (i = 0; i < PF_MACNUM_MAX; i++) { + u64_to_ether_addr(fwdata->pf_macs[i], mac); + if (!is_zero_ether_addr(mac)) { + seq_printf(s, "\t\t %d %pM\n", i, mac); + count++; + } + } + + if (!count) + seq_puts(s, "\t\tNo valid address found\n"); + + seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n"); + seq_puts(s, "\t\t====================\n"); + count = 0; + for (i = 0; i < VF_MACNUM_MAX; i++) { + u64_to_ether_addr(fwdata->vf_macs[i], mac); + if (!is_zero_ether_addr(mac)) { + seq_printf(s, "\t\t %d %pM\n", i, mac); + count++; + } + } + + if (!count) + seq_puts(s, "\t\tNo valid address found\n"); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL); + static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, u16 *pcifunc) { @@ -917,19 +987,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) /* The 'qsize' entry dumps current Aura/Pool context Qsize * and each context's current enable/disable status in a bitmap. */ -static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, +static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused, int blktype) { - void (*print_qsize)(struct seq_file *filp, + void (*print_qsize)(struct seq_file *s, struct rvu_pfvf *pfvf) = NULL; - struct dentry *current_dir; struct rvu_pfvf *pfvf; struct rvu *rvu; int qsize_id; u16 pcifunc; int blkaddr; - rvu = filp->private; + rvu = s->private; switch (blktype) { case BLKTYPE_NPA: qsize_id = rvu->rvu_dbg.npa_qsize_id; @@ -945,32 +1014,28 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, return -EINVAL; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->file->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(s->file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); - print_qsize(filp, pfvf); + print_qsize(s, pfvf); return 0; } -static ssize_t rvu_dbg_qsize_write(struct file *filp, +static ssize_t rvu_dbg_qsize_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int blktype) { char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; - struct seq_file *seqfile = filp->private_data; + struct seq_file *seqfile = file->private_data; char *cmd_buf, *cmd_buf_tmp, *subtoken; struct rvu *rvu = seqfile->private; - struct dentry *current_dir; int blkaddr; u16 pcifunc; int ret, lf; @@ -996,13 +1061,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, goto qsize_write_done; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { ret = -EINVAL; @@ -2626,10 +2688,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused) pcifunc = ipolicer->pfvf_map[idx]; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(m, "Allocated to :: PF %d\n", - rvu_get_pf(pcifunc)); + rvu_get_pf(rvu->pdev, pcifunc)); else seq_printf(m, "Allocated to :: PF %d VF %d\n", - rvu_get_pf(pcifunc), + rvu_get_pf(rvu->pdev, pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); print_band_prof_ctx(m, &aq_rsp.prof); } @@ -2704,8 +2766,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) &rvu_dbg_nix_ndc_tx_hits_miss_fops); debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_rx_hits_miss_fops); - debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_qsize_fops); + debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu, + blkaddr, &rvu_dbg_nix_qsize_fops); debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_band_prof_ctx_fops); debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, @@ -2854,28 +2916,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) +static int rvu_dbg_derive_lmacid(struct seq_file *s) { - struct dentry *current_dir; - char *buf; - - current_dir = filp->file->f_path.dentry->d_parent; - buf = strrchr(current_dir->d_name.name, 'c'); - if (!buf) - return -EINVAL; - - return kstrtoint(buf + 1, 10, lmac_id); + return debugfs_get_aux_num(s->file); } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused) { - int lmac_id, err; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_stats(filp, lmac_id); - - return err; + return cgx_print_stats(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); @@ -2933,18 +2981,103 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) return 0; } -static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused) +{ + return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s)); +} + +RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); + +static int cgx_print_fwdata(struct seq_file *s, int lmac_id) { - int err, lmac_id; + struct cgx_lmac_fwdata_s *fwdata; + void *cgxd = s->private; + struct phy_s *phy; + struct rvu *rvu; + int cgx_id, i; - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_dmac_flt(filp, lmac_id); + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; - return err; + if (!rvu->fwdata) + return -EAGAIN; + + cgx_id = cgx_get_cgxid(cgxd); + + if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) + fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id]; + else + fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id]; + + seq_puts(s, "\nFIRMWARE SHARED:\n"); + seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n"); + seq_puts(s, "\t\t==========================\n"); + seq_printf(s, "\t\t Link modes \t\t :%llx\n", + fwdata->supported_link_modes); + seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an); + seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec); + seq_puts(s, "\n"); + + seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n"); + seq_puts(s, "\t\t==========================\n"); + seq_printf(s, "\t\t Link modes \t\t :%llx\n", + (u64)fwdata->advertised_link_modes); + seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an); + seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec); + seq_puts(s, "\n"); + + seq_puts(s, "\t\tLMAC CONFIG\t\t\n"); + seq_puts(s, "\t\t============\n"); + seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid); + seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type); + seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx); + seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port); + seq_printf(s, "\t\t Link modes own \t :%llx\n", + (u64)fwdata->advertised_link_modes_own); + seq_puts(s, "\n"); + + seq_puts(s, "\n\t\tEEPROM DATA\n"); + seq_puts(s, "\t\t===========\n"); + seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id); + seq_puts(s, "\t\t data \t\t\t :\n"); + seq_puts(s, "\t\t"); + for (i = 0; i < SFP_EEPROM_SIZE; i++) { + seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]); + if ((i + 1) % 16 == 0) { + seq_puts(s, "\n"); + seq_puts(s, "\t\t"); + } + } + seq_puts(s, "\n"); + + phy = &fwdata->phy; + seq_puts(s, "\n\t\tPHY INFORMATION\n"); + seq_puts(s, "\t\t===============\n"); + seq_printf(s, "\t\t Mod type configurable \t\t :%x\n", + phy->misc.can_change_mod_type); + seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type); + seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats); + seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n", + phy->fec_stats.rsfec_corr_cws); + seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n", + phy->fec_stats.rsfec_uncorr_cws); + seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n", + phy->fec_stats.brfec_corr_blks); + seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n", + phy->fec_stats.brfec_uncorr_blks); + seq_puts(s, "\n"); + + return 0; } -RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); +static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused) +{ + return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s)); +} + +RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL); static void rvu_dbg_cgx_init(struct rvu *rvu) { @@ -2980,11 +3113,14 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.lmac = debugfs_create_dir(dname, rvu->rvu_dbg.cgx); - debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, - cgx, &rvu_dbg_cgx_stat_fops); - debugfs_create_file("mac_filter", 0600, - rvu->rvu_dbg.lmac, cgx, + debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac, + cgx, lmac_id, &rvu_dbg_cgx_stat_fops); + debugfs_create_file_aux_num("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, lmac_id, &rvu_dbg_cgx_dmac_flt_fops); + debugfs_create_file("fwdata", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_fwdata_fops); } } } @@ -3006,10 +3142,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, if (!(pcifunc & RVU_PFVF_FUNC_MASK)) seq_printf(s, "\n\t\t Device \t\t: PF%d\n", - rvu_get_pf(pcifunc)); + rvu_get_pf(rvu->pdev, pcifunc)); else seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n", - rvu_get_pf(pcifunc), + rvu_get_pf(rvu->pdev, pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK) - 1); if (entry_acnt) { @@ -3072,13 +3208,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) seq_puts(filp, "\n\t\t Current allocation\n"); seq_puts(filp, "\t\t====================\n"); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); numvfs = (cfg >> 12) & 0xFF; for (vf = 0; vf < numvfs; vf++) { - pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); rvu_print_npc_mcam_info(filp, pcifunc, blkaddr); } } @@ -3349,7 +3485,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) mutex_lock(&mcam->lock); list_for_each_entry(iter, &mcam->mcam_rules, list) { - pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, iter->owner); seq_printf(s, "\n\tInstalled by: PF%d ", pf); if (iter->owner & RVU_PFVF_FUNC_MASK) { @@ -3367,7 +3503,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) rvu_dbg_npc_mcam_show_flows(s, iter); if (is_npc_intf_rx(iter->intf)) { target = iter->rx_action.pf_func; - pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + pf = rvu_get_pf(rvu->pdev, target); seq_printf(s, "\tForward to: PF%d ", pf); if (target & RVU_PFVF_FUNC_MASK) { @@ -3831,6 +3967,9 @@ void rvu_dbg_init(struct rvu *rvu) debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_lmtst_map_table_fops); + debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu, + &rvu_dbg_rvu_fwdata_fops); + if (!cgx_get_cgxcnt_max()) goto create; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index dab4deca893f..27c3a2daaaa9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -207,7 +207,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu) rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; } - for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 5d5a01dbbca1..60db1f616cc8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -315,7 +315,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, if (lvl >= hw->cap.nix_tx_aggr_lvl) { if ((nix_get_tx_link(rvu, map_func) != nix_get_tx_link(rvu, pcifunc)) && - (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) + (rvu_get_pf(rvu->pdev, map_func) != + rvu_get_pf(rvu->pdev, pcifunc))) return false; else return true; @@ -339,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, bool from_vf; int err; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && type != NIX_INTF_TYPE_SDP) return 0; @@ -416,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, break; case NIX_INTF_TYPE_SDP: from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); - parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)]; sdp_info = parent_pf->sdp_info; if (!sdp_info) { dev_err(rvu->dev, "Invalid sdp_info pointer\n"); @@ -569,9 +570,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) mutex_unlock(&rvu->rsrc_lock); } -int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct msg_rsp *rsp) +static u16 nix_get_channel(u16 chan, bool cpt_link) +{ + /* CPT channel for a given link channel is always + * assumed to be BIT(11) set in link channel. + */ + return cpt_link ? chan | BIT(11) : chan; +} + +static int nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp, bool cpt_link) { u16 pcifunc = req->hdr.pcifunc; int blkaddr, pf, type, err; @@ -579,16 +588,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; struct nix_bp *bp; + u16 chan_v; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) type = NIX_INTF_TYPE_SDP; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) @@ -597,8 +610,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, bp = &nix_hw->bp; chan_base = pfvf->rx_chan_base + req->chan_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + chan_v = nix_get_channel(chan, cpt_link); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg & ~BIT_ULL(16)); if (type == NIX_INTF_TYPE_LBK) { @@ -617,6 +631,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, true); +} + static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { @@ -696,20 +724,22 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, return bpid; } -int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct nix_bp_cfg_rsp *rsp) +static int nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp, + bool cpt_link) { int blkaddr, pf, type, chan_id = 0; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; u16 chan_base, chan; s16 bpid, bpid_base; + u16 chan_v; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) type = NIX_INTF_TYPE_SDP; /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ @@ -717,6 +747,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, type != NIX_INTF_TYPE_SDP) return 0; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); @@ -730,9 +763,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return -EINVAL; } - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + chan_v = nix_get_channel(chan, cpt_link); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); cfg &= ~GENMASK_ULL(8, 0); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); @@ -750,6 +785,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, true); +} + static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, u64 format, bool v4, u64 *fidx) { @@ -1626,7 +1675,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, } intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - if (is_sdp_pfvf(pcifunc)) + if (is_sdp_pfvf(rvu, pcifunc)) intf = NIX_INTF_TYPE_SDP; err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, @@ -1750,7 +1799,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); if (rc < 0) { dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return NIX_AF_ERR_MARK_CFG_FAIL; } @@ -2002,7 +2052,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id = 0, lmac_id = 0; if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ @@ -2020,7 +2070,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, int link, int *start, int *end) { struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); /* LBK links */ if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { @@ -2378,7 +2428,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, { struct nix_smq_flush_ctx *smq_flush_ctx; int err, restore_tx_en = 0, i; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id = 0, lmac_id = 0; u16 tl2_tl3_link_schq; u8 link, link_level; @@ -2772,7 +2822,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, { struct rvu_hwinfo *hw = rvu->hw; int lbk_link_start, lbk_links; - u8 pf = rvu_get_pf(pcifunc); + u8 pf = rvu_get_pf(rvu->pdev, pcifunc); int schq; u64 cfg; @@ -3142,7 +3192,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); if (err) { dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", - rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + rvu_get_pf(rvu->pdev, pcifunc), + pcifunc & RVU_PFVF_FUNC_MASK); return err; } return 0; @@ -3410,7 +3461,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc, dev_err(rvu->dev, "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n", __func__, idx, mce_list->max, - pcifunc >> RVU_PFVF_PF_SHIFT); + rvu_get_pf(rvu->pdev, pcifunc)); return -EINVAL; } @@ -3462,7 +3513,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type, struct rvu_pfvf *pfvf; if (!hw->cap.nix_rx_multicast || - !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) { + !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, + pcifunc & ~RVU_PFVF_FUNC_MASK))) { *mce_list = NULL; *mce_idx = 0; return; @@ -3496,13 +3548,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int pf; /* skip multicast pkt replication for AF's VFs & SDP links */ - if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_pf_cgxmapped(rvu, pf)) return 0; @@ -3571,7 +3623,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw) for (idx = 0; idx < (numvfs + 1); idx++) { /* idx-0 is for PF, followed by VFs */ - pcifunc = (pf << RVU_PFVF_PF_SHIFT); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); pcifunc |= idx; /* Add dummy entries now, so that we don't have to check * for whether AQ_OP should be INIT/WRITE later on. @@ -4506,7 +4558,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, static void nix_find_link_frs(struct rvu *rvu, struct nix_frs_cfg *req, u16 pcifunc) { - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct rvu_pfvf *pfvf; int maxlen, minlen; int numvfs, hwvf; @@ -4553,7 +4605,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); int blkaddr, link = -1; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; @@ -4672,6 +4724,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); + /* Set SDP link credit */ + rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT); + /* Set default min/max packet lengths allowed on NIX Rx links. * * With HW reset minlen value of 60byte, HW will treat ARP pkts @@ -4995,7 +5050,7 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) (ltdefs->rx_apad1.ltype_match << 4) | ltdefs->rx_apad1.ltype_mask); - /* Receive ethertype defination register defines layer + /* Receive ethertype definition register defines layer * information in NPC_RESULT_S to identify the Ethertype * location in L2 header. Used for Ethertype overwriting * in inline IPsec flow. @@ -5200,7 +5255,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, rvu_switch_update_rules(rvu, pcifunc, true); - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) rvu_rep_notify_pfvf_state(rvu, pcifunc, true); @@ -5233,7 +5288,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, rvu_switch_update_rules(rvu, pcifunc, false); rvu_cgx_tx_enable(rvu, pcifunc, true); - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; @@ -5245,7 +5300,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); struct mac_ops *mac_ops; u8 cgx_id, lmac_id; u64 sa_base; @@ -5334,7 +5389,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int nixlf; u64 cfg; - pf = rvu_get_pf(pcifunc); + pf = rvu_get_pf(rvu->pdev, pcifunc); if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 821fe242f821..c7c70429eb6c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type) { - int pf = rvu_get_pf(pcifunc); + struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); + struct rvu *rvu = hw->rvu; + int pf = rvu_get_pf(rvu->pdev, pcifunc); int index; /* Check if this is for a PF */ @@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && - is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_MCAST; pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); @@ -820,24 +822,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } -void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, - bool enable) -{ - struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, index; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); - if (blkaddr < 0) - return; - - /* Get 'pcifunc' of PF device */ - pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; - - index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, - NIXLF_BCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); -} - void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan) { @@ -1125,6 +1109,7 @@ void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; int index, blkaddr; @@ -1133,9 +1118,12 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, return; /* Ucast MCAM match entry of this PF/VF */ - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + if (npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), + pfvf->nix_rx_intf)) { + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); + } /* Nothing to do for VFs, on platforms where pkt replication * is not supported @@ -3448,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr, nixlf, rc, intf_mode; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u64 rxpkind, txpkind; u8 cgx_id, lmac_id; @@ -3588,3 +3576,33 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, return 0; } + +void rvu_npc_clear_ucast_entry(struct rvu *rvu, int pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int ucast_idx, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + npc_enable_mcam_entry(rvu, mcam, blkaddr, ucast_idx, false); + + npc_set_mcam_action(rvu, mcam, blkaddr, ucast_idx, 0); + + npc_clear_mcam_entry(rvu, mcam, blkaddr, ucast_idx); + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (rule->entry == ucast_idx) { + list_del(&rule->list); + kfree(rule); + break; + } + } + mutex_unlock(&mcam->lock); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index da69e454662a..1b765045aa63 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1452,23 +1452,21 @@ process_flow: * hence modify pcifunc accordingly. */ - /* AF installing for a PF/VF */ - if (!req->hdr.pcifunc) + if (!req->hdr.pcifunc) { + /* AF installing for a PF/VF */ target = req->vf; - - /* PF installing for its VF */ - if (!from_vf && req->vf && !from_rep_dev) { + } else if (!from_vf && req->vf && !from_rep_dev) { + /* PF installing for its VF */ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); - } - - /* Representor device installing for a representee */ - if (from_rep_dev && req->vf) + } else if (from_rep_dev && req->vf) { + /* Representor device installing for a representee */ target = req->vf; - else + } else { /* msg received from PF/VF */ target = req->hdr.pcifunc; + } /* ignore chan_mask in case pf func is not AF, revisit later */ if (!is_pffunc_af(req->hdr.pcifunc)) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index d2661e7fabdb..999f6d93c7fe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; @@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc) int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) { struct npc_exact_table *table; - int pf = rvu_get_pf(pcifunc); + int pf = rvu_get_pf(rvu->pdev, pcifunc); u8 cgx_id, lmac_id; u32 drop_mcam_idx; bool *promisc; @@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc) int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; @@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu, struct cgx_mac_addr_update_req *req, struct cgx_mac_addr_update_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct npc_exact_table_entry *entry; struct npc_exact_table *table; struct rvu_pfvf *pfvf; @@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu, struct cgx_mac_addr_add_req *req, struct cgx_mac_addr_add_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; int rc = 0; @@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, struct cgx_mac_addr_del_req *req, struct msg_rsp *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); int rc; rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index); @@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu, int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { - int pf = rvu_get_pf(req->hdr.pcifunc); + int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc); u32 seq_id = req->index; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; @@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu) } /* Filter rules are only for PF */ - pcifunc = RVU_PFFUNC(i, 0); + pcifunc = RVU_PFFUNC(rvu->pdev, i, 0); dev_dbg(rvu->dev, "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n", diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h index 57a09328d46b..cb25cf478f1f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h @@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = { #define NPC_MCAM_DROP_RULE_MAX 30 #define NPC_MCAM_SDP_DROP_RULE_IDX 0 -#define RVU_PFFUNC(pf, func) \ - ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \ - (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT)) +#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func) enum npc_exact_opc_type { NPC_EXACT_OPC_MEM, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c index 052ae5923e3a..03099bc570bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -39,7 +39,7 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) struct rep_event *msg; int pf; - pf = rvu_get_pf(event->pcifunc); + pf = rvu_get_pf(rvu->pdev, event->pcifunc); if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); @@ -60,6 +60,8 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); + mutex_unlock(&rvu->mbox_lock); return 0; } @@ -112,10 +114,10 @@ int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) struct rep_event *req; int pf; - if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) return 0; - pf = rvu_get_pf(rvu->rep_pcifunc); + pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc); mutex_lock(&rvu->mbox_lock); req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); @@ -323,7 +325,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu_get_nix_blkaddr(rvu, pcifunc); rep = true; for (i = 0; i < 2; i++) { @@ -343,8 +345,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << RVU_PFVF_PF_SHIFT | - ((vf + 1) & RVU_PFVF_FUNC_MASK); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1); rvu_get_nix_blkaddr(rvu, pcifunc); /* Skip installimg rules if nixlf is not attached */ @@ -452,7 +453,7 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << RVU_PFVF_PF_SHIFT; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); rvu->rep2pfvf_map[rep] = pcifunc; rsp->rep_pf_map[rep] = pcifunc; rep++; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index 38cfe148f4b7..e4a5f9fa6fd4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -17,9 +17,9 @@ /* SDP PF number */ static int sdp_pf_num[MAX_SDP] = {-1, -1}; -bool is_sdp_pfvf(u16 pcifunc) +bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc) { - u16 pf = rvu_get_pf(pcifunc); + u16 pf = rvu_get_pf(rvu->pdev, pcifunc); u32 found = 0, i = 0; while (i < MAX_SDP) { @@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc) return true; } -bool is_sdp_pf(u16 pcifunc) +bool is_sdp_pf(struct rvu *rvu, u16 pcifunc) { - return (is_sdp_pfvf(pcifunc) && + return (is_sdp_pfvf(rvu, pcifunc) && !(pcifunc & RVU_PFVF_FUNC_MASK)); } @@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc) if (!(pcifunc & ~RVU_PFVF_FUNC_MASK)) return (rvu->vf_devid == RVU_SDP_VF_DEVID); - return (is_sdp_pfvf(pcifunc) && + return (is_sdp_pfvf(rvu, pcifunc) && !!(pcifunc & RVU_PFVF_FUNC_MASK)); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 77ac94cb2ec4..0596a3ac4c12 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -33,7 +33,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, BLKADDR_APR = 0x16ULL, - BLK_COUNT = 0x17ULL, + BLKADDR_MBOX = 0x1bULL, + BLK_COUNT = 0x1cULL, }; /* RVU Block Type Enumeration */ @@ -49,7 +50,8 @@ enum rvu_block_type_e { BLKTYPE_TIM = 0x8, BLKTYPE_CPT = 0x9, BLKTYPE_NDC = 0xa, - BLKTYPE_MAX = 0xa, + BLKTYPE_MBOX = 0x13, + BLKTYPE_MAX = 0x13, }; /* RVU Admin function Interrupt Vector Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 268efb7c1c15..49ce38685a7e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); /* rvu_get_nix_blkaddr sets up the corresponding NIX block * address and NIX RX and TX interfaces for a pcifunc. * Generally it is called during attach call of a pcifunc but it @@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); rvu_get_nix_blkaddr(rvu, pcifunc); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); @@ -236,7 +236,7 @@ void rvu_switch_disable(struct rvu *rvu) if (!is_pf_cgxmapped(rvu, pf)) continue; - pcifunc = pf << 10; + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, @@ -248,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu) rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); for (vf = 0; vf < numvfs; vf++) { - pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1)); err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); if (err) dev_err(rvu->dev, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 775fd4c35794..19e0d16b12f6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -11,3 +11,5 @@ EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt); EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process); +EXPORT_TRACEPOINT_SYMBOL(otx2_msg_status); +EXPORT_TRACEPOINT_SYMBOL(otx2_parse_dump); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 5704520f9b02..4cd0fc4b0d20 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -18,33 +18,42 @@ #include "mbox.h" TRACE_EVENT(otx2_msg_alloc, - TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), - TP_ARGS(pdev, id, size), + TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size, u16 pcifunc), + TP_ARGS(pdev, id, size, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(u64, size) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->size = size; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->size) + TP_printk("[%s] msg:(%s) size:%lld pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->size, + __entry->pcifunc) ); TRACE_EVENT(otx2_msg_send, - TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size), - TP_ARGS(pdev, num_msgs, msg_size), + TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size, + u16 id, u16 pcifunc), + TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, num_msgs) __field(u64, msg_size) + __field(u16, id) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->num_msgs = num_msgs; __entry->msg_size = msg_size; + __entry->id = id; + __entry->pcifunc = pcifunc; ), - TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev), - __entry->num_msgs, __entry->msg_size) + TP_printk("[%s] sent %d msg(s) of size:%lld msg:(%s) pcifunc:0x%x\n", + __get_str(dev), __entry->num_msgs, __entry->msg_size, + otx2_mbox_id2name(__entry->id), __entry->pcifunc) ); TRACE_EVENT(otx2_msg_check, @@ -81,18 +90,73 @@ TRACE_EVENT(otx2_msg_interrupt, ); TRACE_EVENT(otx2_msg_process, - TP_PROTO(const struct pci_dev *pdev, u16 id, int err), - TP_ARGS(pdev, id, err), + TP_PROTO(const struct pci_dev *pdev, u16 id, int err, u16 pcifunc), + TP_ARGS(pdev, id, err, pcifunc), TP_STRUCT__entry(__string(dev, pci_name(pdev)) __field(u16, id) __field(int, err) + __field(u16, pcifunc) ), TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->err = err; + __entry->pcifunc = pcifunc; + ), + TP_printk("[%s] msg:(%s) error:%d pcifunc:0x%x\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), + __entry->err, __entry->pcifunc) +); + +TRACE_EVENT(otx2_msg_wait_rsp, + TP_PROTO(const struct pci_dev *pdev), + TP_ARGS(pdev), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + ), + TP_fast_assign(__assign_str(dev) + ), + TP_printk("[%s] timed out while waiting for response\n", + __get_str(dev)) +); + +TRACE_EVENT(otx2_msg_status, + TP_PROTO(const struct pci_dev *pdev, const char *msg, u16 num_msgs), + TP_ARGS(pdev, msg, num_msgs), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u16, num_msgs) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->num_msgs = num_msgs; + ), + TP_printk("[%s] %s num_msgs:%d\n", __get_str(dev), + __get_str(str), __entry->num_msgs) +); + +TRACE_EVENT(otx2_parse_dump, + TP_PROTO(const struct pci_dev *pdev, char *msg, u64 *word), + TP_ARGS(pdev, msg, word), + TP_STRUCT__entry(__string(dev, pci_name(pdev)) + __string(str, msg) + __field(u64, w0) + __field(u64, w1) + __field(u64, w2) + __field(u64, w3) + __field(u64, w4) + __field(u64, w5) + ), + TP_fast_assign(__assign_str(dev); + __assign_str(str); + __entry->w0 = *(word + 0); + __entry->w1 = *(word + 1); + __entry->w2 = *(word + 2); + __entry->w3 = *(word + 3); + __entry->w4 = *(word + 4); + __entry->w5 = *(word + 5); ), - TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev), - otx2_mbox_id2name(__entry->id), __entry->err) + TP_printk("[%s] nix parse %s W0:%#llx W1:%#llx W2:%#llx W3:%#llx W4:%#llx W5:%#llx\n", + __get_str(dev), __get_str(str), __entry->w0, __entry->w1, __entry->w2, + __entry->w3, __entry->w4, __entry->w5) ); #endif /* __RVU_TRACE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index dbc971266865..883e9f4d601c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -8,12 +8,13 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ - otx2_devlink.o qos_sq.o qos.o + otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \ + otx2_devlink.o qos_sq.o qos.o otx2_xsk.o rvu_nicvf-y := otx2_vf.o rvu_rep-y := rep.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o +rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index a15cc86635d6..bec7d5b4d7cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -14,6 +14,7 @@ static struct dev_hw_ops otx2_hw_ops = { .sqe_flush = otx2_sqe_flush, .aura_freeptr = otx2_aura_freeptr, .refill_pool_ptrs = otx2_refill_pool_ptrs, + .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, }; static struct dev_hw_ops cn10k_hw_ops = { @@ -21,8 +22,20 @@ static struct dev_hw_ops cn10k_hw_ops = { .sqe_flush = cn10k_sqe_flush, .aura_freeptr = cn10k_aura_freeptr, .refill_pool_ptrs = cn10k_refill_pool_ptrs, + .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler, }; +void otx2_init_hw_ops(struct otx2_nic *pfvf) +{ + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; + return; + } + + pfvf->hw_ops = &cn10k_hw_ops; +} +EXPORT_SYMBOL(otx2_init_hw_ops); + int cn10k_lmtst_init(struct otx2_nic *pfvf) { @@ -30,12 +43,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf) struct otx2_lmt_info *lmt_info; int err, cpu; - if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { - pfvf->hw_ops = &otx2_hw_ops; + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) return 0; - } - pfvf->hw_ops = &cn10k_hw_ops; /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE); pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info); @@ -112,9 +122,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) struct otx2_nic *pfvf = dev; int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; + struct otx2_pool *pool; dma_addr_t bufptr; int num_ptrs = 1; + pool = &pfvf->qset.pool[cq->cq_idx]; + /* Refill pool with new buffers */ while (cq->pool_ptrs) { if (otx2_alloc_buffer(pfvf, cq, &bufptr)) { @@ -124,7 +137,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) break; } cq->pool_ptrs--; - ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM; + ptrs[num_ptrs] = pool->xsk_pool ? + (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM; + num_ptrs++; if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) { __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, @@ -352,9 +367,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf) mutex_lock(&pfvf->mbox.lock); /* Remove RQ's policer mapping */ - for (qidx = 0; qidx < hw->rx_queues; qidx++) - cn10k_map_unmap_rq_policer(pfvf, qidx, - hw->matchall_ipolicer, false); + for (qidx = 0; qidx < hw->rx_queues; qidx++) { + rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false); + if (rc) + dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).", + qidx, rc); + } rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index e3f0bce9908f..945ab10bd4ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf); int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile, u32 burst, u64 rate, bool pps); int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf); +void otx2_init_hw_ops(struct otx2_nic *pfvf); #endif /* CN10K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c new file mode 100644 index 000000000000..c691f0722154 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -0,0 +1,1041 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#include <net/xfrm.h> +#include <linux/netdevice.h> +#include <linux/bitfield.h> +#include <crypto/aead.h> +#include <crypto/gcm.h> + +#include "otx2_common.h" +#include "otx2_struct.h" +#include "cn10k_ipsec.h" + +static bool is_dev_support_ipsec_offload(struct pci_dev *pdev) +{ + return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev); +} + +static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf) +{ + enum cn10k_cpt_hw_state_e state; + + while (true) { + state = atomic_cmpxchg(&pf->ipsec.cpt_state, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE); + if (state == CN10K_CPT_HW_AVAILABLE) + return true; + if (state == CN10K_CPT_HW_UNAVAILABLE) + return false; + + mdelay(1); + } +} + +static void cn10k_cpt_device_set_available(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE); +} + +static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE); +} + +static int cn10k_outb_cptlf_attach(struct otx2_nic *pf) +{ + struct rsrc_attach *attach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + /* Get memory to put this msg */ + attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox); + if (!attach) + goto unlock; + + attach->cptlfs = true; + attach->modify = true; + + /* Send attach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_detach(struct otx2_nic *pf) +{ + struct rsrc_detach *detach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox); + if (!detach) + goto unlock; + + detach->partial = true; + detach->cptlfs = true; + + /* Send detach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf) +{ + struct cpt_lf_alloc_req_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox); + if (!req) + goto unlock; + + /* PF function */ + req->nix_pf_func = pf->pcifunc; + /* Enable SE-IE Engine Group */ + req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP; + + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_free(struct otx2_nic *pf) +{ + mutex_lock(&pf->mbox.lock); + otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox); + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + +static int cn10k_outb_cptlf_config(struct otx2_nic *pf) +{ + struct cpt_inline_ipsec_cfg_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox); + if (!req) + goto unlock; + + req->dir = CPT_INLINE_OUTBOUND; + req->enable = 1; + req->nix_pf_func = pf->pcifunc; + ret = otx2_sync_mbox_msg(&pf->mbox); +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf) +{ + u64 reg_val; + + /* Set Execution Enable of instruction queue */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + reg_val |= BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Set iqueue's enqueuing */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL); + reg_val |= BIT_ULL(0); + otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val); +} + +static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf) +{ + u32 inflight, grb_cnt, gwb_cnt; + u32 nq_ptr, dq_ptr; + int timeout = 20; + u64 reg_val; + int cnt; + + /* Disable instructions enqueuing */ + otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull); + + /* Wait for instruction queue to become empty. + * CPT_LF_INPROG.INFLIGHT count is zero + */ + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + if (!inflight) + break; + + usleep_range(10000, 20000); + if (timeout-- < 0) { + netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n"); + break; + } + } while (1); + + /* Disable executions in the LF's queue, + * the queue should be empty at this point + */ + reg_val &= ~BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Wait for instruction queue to become empty */ + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + if (reg_val & BIT_ULL(31)) + cnt = 0; + else + cnt++; + reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR); + nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + } while ((cnt < 10) && (nq_ptr != dq_ptr)); + + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val); + gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val); + if (inflight == 0 && gwb_cnt < 40 && + (grb_cnt == 0 || grb_cnt == 40)) + cnt++; + else + cnt = 0; + } while (cnt < 10); +} + +/* Allocate memory for CPT outbound Instruction queue. + * Instruction queue memory format is: + * ----------------------------- + * | Instruction Group memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | x 16 Bytes) | + * | | + * ----------------------------- <-- CPT_LF_Q_BASE[ADDR] + * | Flow Control (128 Bytes) | + * | | + * ----------------------------- + * | Instruction Memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | × 40 × 64 bytes) | + * | | + * ----------------------------- + */ +static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN + + CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN; + + iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size, + &iq->real_dma_addr, GFP_KERNEL); + if (!iq->real_vaddr) + return -ENOMEM; + + /* iq->vaddr/dma_addr points to Flow Control location */ + iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES; + iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES; + + /* Align pointers */ + iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN); + iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN); + return 0; +} + +static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + if (iq->real_vaddr) + dma_free_coherent(pf->dev, iq->size, iq->real_vaddr, + iq->real_dma_addr); + + iq->real_vaddr = NULL; + iq->vaddr = NULL; +} + +static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf) +{ + u64 reg_val; + int ret; + + /* Allocate Memory for CPT IQ */ + ret = cn10k_outb_cptlf_iq_alloc(pf); + if (ret) + return ret; + + /* Disable IQ */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr); + + /* Set IQ size */ + reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 + + CN10K_CPT_EXTRA_SIZE_DIV40); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val); + + return 0; +} + +static int cn10k_outb_cptlf_init(struct otx2_nic *pf) +{ + int ret; + + /* Initialize CPTLF Instruction Queue (IQ) */ + ret = cn10k_outb_cptlf_iq_init(pf); + if (ret) + return ret; + + /* Configure CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_config(pf); + if (ret) + goto iq_clean; + + /* Enable CPTLF IQ */ + cn10k_outb_cptlf_iq_enable(pf); + return 0; +iq_clean: + cn10k_outb_cptlf_iq_free(pf); + return ret; +} + +static int cn10k_outb_cpt_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int ret; + + /* Attach a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_attach(pf); + if (ret) + return ret; + + /* Allocate a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_alloc(pf); + if (ret) + goto detach; + + /* Initialize the CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_init(pf); + if (ret) + goto lf_free; + + pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf, + CN10K_CPT_LF_NQX(0)); + + /* Set ipsec offload enabled for this device */ + pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + cn10k_cpt_device_set_available(pf); + return 0; + +lf_free: + cn10k_outb_cptlf_free(pf); +detach: + cn10k_outb_cptlf_detach(pf); + return ret; +} + +static int cn10k_outb_cpt_clean(struct otx2_nic *pf) +{ + int ret; + + if (!cn10k_cpt_device_set_inuse(pf)) { + netdev_err(pf->netdev, "CPT LF device unavailable\n"); + return -ENODEV; + } + + /* Set ipsec offload disabled for this device */ + pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + /* Disable CPTLF Instruction Queue (IQ) */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address and size to 0 */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0); + + /* Free CPTLF IQ */ + cn10k_outb_cptlf_iq_free(pf); + + /* Free and detach CPT LF */ + cn10k_outb_cptlf_free(pf); + ret = cn10k_outb_cptlf_detach(pf); + if (ret) + netdev_err(pf->netdev, "Failed to detach CPT LF\n"); + + cn10k_cpt_device_set_unavailable(pf); + return ret; +} + +static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst, + u64 size) +{ + struct otx2_lmt_info *lmt_info; + u64 val = 0, tar_addr = 0; + + lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id()); + /* FIXME: val[0:10] LMT_ID. + * [12:15] no of LMTST - 1 in the burst. + * [19:63] data size of each LMTST in the burst except first. + */ + val = (lmt_info->lmt_id & 0x7FF); + /* Target address for LMTST flush tells HW how many 128bit + * words are present. + * tar_addr[6:4] size of first LMTST - 1 in units of 128b. + */ + tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4; + dma_wmb(); + memcpy((u64 *)lmt_info->lmt_addr, inst, size); + cn10k_lmt_flush(val, tar_addr); +} + +static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf, + struct cpt_res_s *res) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + u64 *completion_ptr = (u64 *)res; + + do { + if (time_after(jiffies, timeout)) { + netdev_err(pf->netdev, "CPT response timeout\n"); + return -EBUSY; + } + } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) == + CN10K_CPT_COMP_E_NOTDONE); + + if (!(res->compcode == CN10K_CPT_COMP_E_GOOD || + res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) { + netdev_err(pf->netdev, "compcode=%x doneint=%x\n", + res->compcode, res->doneint); + netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n", + res->uc_compcode, (u64)res->uc_info, res->esn); + } + return 0; +} + +static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) +{ + dma_addr_t res_iova, dptr_iova, sa_iova; + struct cn10k_tx_sa_s *sa_dptr; + struct cpt_inst_s inst = {}; + struct cpt_res_s *res; + u32 sa_size, off; + u64 *sptr, *dptr; + u64 reg_val; + int ret; + + sa_iova = sa_info->iova; + if (!sa_iova) + return -EINVAL; + + res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s), + &res_iova, GFP_ATOMIC); + if (!res) + return -ENOMEM; + + sa_size = sizeof(struct cn10k_tx_sa_s); + sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC); + if (!sa_dptr) { + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, + res_iova); + return -ENOMEM; + } + + sptr = (__force u64 *)sa_info->base; + dptr = (__force u64 *)sa_dptr; + for (off = 0; off < (sa_size / 8); off++) + *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off)); + + res->compcode = CN10K_CPT_COMP_E_NOTDONE; + inst.res_addr = res_iova; + inst.dptr = (u64)dptr_iova; + inst.param2 = sa_size >> 3; + inst.dlen = sa_size; + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA; + inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA; + inst.cptr = sa_iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* Check if CPT-LF available */ + if (!cn10k_cpt_device_set_inuse(pf)) { + ret = -ENODEV; + goto free_mem; + } + + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + dma_wmb(); + ret = cn10k_wait_for_cpt_respose(pf, res); + if (ret) + goto set_available; + + /* Trigger CTX flush to write dirty data back to DRAM */ + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7); + otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); + +set_available: + cn10k_cpt_device_set_available(pf); +free_mem: + dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova); + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova); + return ret; +} + +static int cn10k_ipsec_get_hw_ctx_offset(void) +{ + /* Offset on Hardware-context offset in word */ + return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F; +} + +static int cn10k_ipsec_get_ctx_push_size(void) +{ + /* Context push size is round up and in multiple of 8 Byte */ + return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F; +} + +static int cn10k_ipsec_get_aes_key_len(int key_len) +{ + /* key_len is aes key length in bytes */ + switch (key_len) { + case 16: + return CN10K_IPSEC_SA_AES_KEY_LEN_128; + case 24: + return CN10K_IPSEC_SA_AES_KEY_LEN_192; + default: + return CN10K_IPSEC_SA_AES_KEY_LEN_256; + } +} + +static void cn10k_outb_prepare_sa(struct xfrm_state *x, + struct cn10k_tx_sa_s *sa_entry) +{ + int key_len = (x->aead->alg_key_len + 7) / 8; + struct net_device *netdev = x->xso.dev; + u8 *key = x->aead->alg_key; + struct otx2_nic *pf; + u32 *tmp_salt; + u64 *tmp_key; + int idx; + + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + + /* context size, 128 Byte aligned up */ + pf = netdev_priv(netdev); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset(); + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + + /* Ucode to skip two words of CPT_CTX_HW_S */ + sa_entry->ctx_hdr_size = 1; + + /* Allow Atomic operation (AOP) */ + sa_entry->aop_valid = 1; + + /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */ + sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB; + sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP; + sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM; + sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET; + if (x->props.mode == XFRM_MODE_TUNNEL) + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL; + else + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT; + + /* Last 4 bytes are salt */ + key_len -= 4; + sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len); + memcpy(sa_entry->cipher_key, key, key_len); + tmp_key = (u64 *)sa_entry->cipher_key; + + for (idx = 0; idx < key_len / 8; idx++) + tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]); + + memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4); + tmp_salt = (u32 *)&sa_entry->iv_gcm_salt; + *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt); + + /* Write SA context data to memory before enabling */ + wmb(); + + /* Enable SA */ + sa_entry->sa_valid = 1; +} + +static int cn10k_ipsec_validate_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->props.aalgo != SADB_AALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload authenticated xfrm states"); + return -EINVAL; + } + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { + NL_SET_ERR_MSG_MOD(extack, + "Only AES-GCM-ICV16 xfrm state may be offloaded"); + return -EINVAL; + } + if (x->props.calgo != SADB_X_CALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload compressed xfrm states"); + return -EINVAL; + } + if (x->props.flags & XFRM_STATE_ESN) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); + return -EINVAL; + } + if (x->props.family != AF_INET && x->props.family != AF_INET6) { + NL_SET_ERR_MSG_MOD(extack, + "Only IPv4/v6 xfrm states may be offloaded"); + return -EINVAL; + } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload other than crypto-mode"); + return -EINVAL; + } + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) { + NL_SET_ERR_MSG_MOD(extack, + "Only tunnel/transport xfrm states may be offloaded"); + return -EINVAL; + } + if (x->id.proto != IPPROTO_ESP) { + NL_SET_ERR_MSG_MOD(extack, + "Only ESP xfrm state may be offloaded"); + return -EINVAL; + } + if (x->encap) { + NL_SET_ERR_MSG_MOD(extack, + "Encapsulated xfrm state may not be offloaded"); + return -EINVAL; + } + if (!x->aead) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without aead"); + return -EINVAL; + } + + if (x->aead->alg_icv_len != 128) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD ICV length other than 128bit"); + return -EINVAL; + } + if (x->aead->alg_key_len != 128 + 32 && + x->aead->alg_key_len != 192 + 32 && + x->aead->alg_key_len != 256 + 32) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD key length other than 128/192/256bit"); + return -EINVAL; + } + if (x->tfcpad) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with tfc padding"); + return -EINVAL; + } + if (!x->geniv) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without geniv"); + return -EINVAL; + } + if (strcmp(x->geniv, "seqiv")) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with geniv other than seqiv"); + return -EINVAL; + } + return 0; +} + +static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported"); + return -EOPNOTSUPP; +} + +static int cn10k_ipsec_outb_add_state(struct net_device *dev, + struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + err = cn10k_ipsec_validate_state(x, extack); + if (err) + return err; + + pf = netdev_priv(dev); + + err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); + if (err) + return err; + + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + cn10k_outb_prepare_sa(x, sa_entry); + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA"); + qmem_free(pf->dev, sa_info); + return err; + } + + x->xso.offload_handle = (unsigned long)sa_info; + /* Enable static branch when first SA setup */ + if (!pf->ipsec.outb_sa_count) + static_branch_enable(&cn10k_ipsec_sa_enabled); + pf->ipsec.outb_sa_count++; + return 0; +} + +static int cn10k_ipsec_add_state(struct net_device *dev, + struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return cn10k_ipsec_inb_add_state(x, extack); + else + return cn10k_ipsec_outb_add_state(dev, x, extack); +} + +static void cn10k_ipsec_del_state(struct net_device *dev, struct xfrm_state *x) +{ + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return; + + pf = netdev_priv(dev); + + sa_info = (struct qmem *)x->xso.offload_handle; + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + /* Disable SA in CPT h/w */ + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->aop_valid = 1; + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) + netdev_err(dev, "Error (%d) deleting SA\n", err); + + x->xso.offload_handle = 0; + qmem_free(pf->dev, sa_info); + + /* If no more SA's then update netdev feature for potential change + * in NETIF_F_HW_ESP. + */ + if (!--pf->ipsec.outb_sa_count) + queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); +} + +static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = { + .xdo_dev_state_add = cn10k_ipsec_add_state, + .xdo_dev_state_delete = cn10k_ipsec_del_state, +}; + +static void cn10k_ipsec_sa_wq_handler(struct work_struct *work) +{ + struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec, + sa_work); + struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec); + + /* Disable static branch when no more SA enabled */ + static_branch_disable(&cn10k_ipsec_sa_enabled); + rtnl_lock(); + netdev_update_features(pf->netdev); + rtnl_unlock(); +} + +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + /* IPsec offload supported on cn10k */ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return -EOPNOTSUPP; + + /* Initialize CPT for outbound ipsec offload */ + if (enable) + return cn10k_outb_cpt_init(netdev); + + /* Don't do CPT cleanup if SA installed */ + if (pf->ipsec.outb_sa_count) { + netdev_err(pf->netdev, "SA installed on this device\n"); + return -EBUSY; + } + + return cn10k_outb_cpt_clean(pf); +} + +int cn10k_ipsec_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + u32 sa_size; + + if (!is_dev_support_ipsec_offload(pf->pdev)) + return 0; + + /* Each SA entry size is 128 Byte round up in size */ + sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ? + (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) * + OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s); + pf->ipsec.sa_size = sa_size; + + INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); + pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0); + if (!pf->ipsec.sa_workq) { + netdev_err(pf->netdev, "SA alloc workqueue failed\n"); + return -ENOMEM; + } + + /* Set xfrm device ops */ + netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops; + netdev->hw_features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + + cn10k_cpt_device_set_unavailable(pf); + return 0; +} +EXPORT_SYMBOL(cn10k_ipsec_init); + +void cn10k_ipsec_clean(struct otx2_nic *pf) +{ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return; + + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + return; + + if (pf->ipsec.sa_workq) { + destroy_workqueue(pf->ipsec.sa_workq); + pf->ipsec.sa_workq = NULL; + } + + cn10k_outb_cpt_clean(pf); +} +EXPORT_SYMBOL(cn10k_ipsec_clean); + +static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x, + struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct iphdr *iph; + u8 *src; + + src = (u8 *)skb->data + ETH_HLEN; + + if (x->props.family == AF_INET) { + iph = (struct iphdr *)src; + return ntohs(iph->tot_len); + } + + ipv6h = (struct ipv6hdr *)src; + return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr); +} + +/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure. + * SG of NIX and CPT are same in size. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + struct cpt_sg_s *cpt_sg = NULL; + struct nix_sqe_sg_s *sg = NULL; + u64 dma_addr, *iova = NULL; + u64 *cpt_iova = NULL; + u16 *sg_lens = NULL; + int seg, len; + + sq->sg[sq->head].num_segs = 0; + cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size); + + for (seg = 0; seg < num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + + cpt_sg += (seg / MAX_SEGS_PER_SG) * 4; + cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg); + } + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + return false; + + sg_lens[seg % MAX_SEGS_PER_SG] = len; + sg->segs++; + *iova++ = dma_addr; + *cpt_iova++ = dma_addr; + + /* Save DMA mapping info for later unmapping */ + sq->sg[sq->head].dma_addr[seg] = dma_addr; + sq->sg[sq->head].size[seg] = len; + sq->sg[sq->head].num_segs++; + + *cpt_sg = *(struct cpt_sg_s *)sg; + cpt_sg->rsvd_63_50 = 0; + } + + sq->sg[sq->head].skb = (u64)skb; + return true; +} + +static u16 cn10k_ipsec_get_param1(u8 iv_offset) +{ + u16 param1_val; + + /* Set Crypto mode, disable L3/L4 checksum */ + param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM | + CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM; + param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT; + return param1_val; +} + +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + struct cpt_inst_s inst; + struct cpt_res_s *res; + struct xfrm_state *x; + struct qmem *sa_info; + dma_addr_t dptr_iova; + struct sec_path *sp; + u8 encap_offset; + u8 auth_offset; + u8 gthr_size; + u8 iv_offset; + u16 dlen; + + /* Check for IPSEC offload enabled */ + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + goto drop; + + sp = skb_sec_path(skb); + if (unlikely(!sp->len)) + goto drop; + + x = xfrm_input_state(skb); + if (unlikely(!x)) + goto drop; + + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) + goto drop; + + dlen = cn10k_ipsec_get_ip_data_len(x, skb); + if (dlen == 0 && netif_msg_tx_err(pf)) { + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); + goto drop; + } + + /* Check for valid SA context */ + sa_info = (struct qmem *)x->xso.offload_handle; + if (!sa_info) + goto drop; + + memset(&inst, 0, sizeof(struct cpt_inst_s)); + + /* Get authentication offset */ + if (x->props.family == AF_INET) + auth_offset = sizeof(struct iphdr); + else + auth_offset = sizeof(struct ipv6hdr); + + /* IV offset is after ESP header */ + iv_offset = auth_offset + sizeof(struct ip_esp_hdr); + /* Encap will start after IV */ + encap_offset = iv_offset + GCM_RFC4106_IV_SIZE; + + /* CPT Instruction word-1 */ + res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head)); + res->compcode = 0; + inst.res_addr = sq->cpt_resp->iova + (64 * sq->head); + + /* CPT Instruction word-2 */ + inst.rvu_pf_func = pf->pcifunc; + + /* CPT Instruction word-3: + * Set QORD to force CPT_RES_S write completion + */ + inst.qord = 1; + + /* CPT Instruction word-4 */ + /* inst.dlen should not include ICV length */ + inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8); + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC; + inst.param1 = cn10k_ipsec_get_param1(iv_offset); + + inst.param2 = encap_offset << + CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT; + inst.param2 |= (u16)auth_offset << + CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT; + + /* CPT Instruction word-5 */ + gthr_size = num_segs / MAX_SEGS_PER_SG; + gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size; + + gthr_size &= 0xF; + dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2))); + inst.dptr = dptr_iova | ((u64)gthr_size << 60); + + /* CPT Instruction word-6 */ + inst.rptr = inst.dptr; + + /* CPT Instruction word-7 */ + inst.cptr = sa_info->iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* CPT Instruction word-0 */ + inst.nixtxl = (size / 16) - 1; + inst.dat_offset = ETH_HLEN; + inst.nixtx_offset = sq->sqe_size; + + netdev_tx_sent_queue(txq, skb->len); + + /* Finally Flush the CPT instruction */ + sq->head++; + sq->head &= (sq->sqe_cnt - 1); + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + return true; +drop: + dev_kfree_skb_any(skb); + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h new file mode 100644 index 000000000000..43fbce0d6039 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#ifndef CN10K_IPSEC_H +#define CN10K_IPSEC_H + +#include <linux/types.h> + +DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + +/* CPT instruction size in bytes */ +#define CN10K_CPT_INST_SIZE 64 + +/* CPT instruction (CPT_INST_S) queue length */ +#define CN10K_CPT_INST_QLEN 8200 + +/* CPT instruction queue size passed to HW is in units of + * 40*CPT_INST_S messages. + */ +#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40) + +/* CPT needs 320 free entries */ +#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE) +#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40) + +/* CPT instruction queue length in bytes */ +#define CN10K_CPT_INST_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \ + CN10K_CPT_INST_QLEN_EXTRA_BYTES) + +/* CPT instruction group queue length in bytes */ +#define CN10K_CPT_INST_GRP_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16) + +/* CPT FC length in bytes */ +#define CN10K_CPT_Q_FC_LEN 128 + +/* Default CPT engine group for ipsec offload */ +#define CN10K_DEF_CPT_IPSEC_EGRP 1 + +/* CN10K CPT LF registers */ +#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT) +#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10) +#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40) +#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0) +#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100) +#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110) +#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120) +#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3) +#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510) + +/* IPSEC Instruction opcodes */ +#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL +#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL +#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL + +enum cn10k_cpt_comp_e { + CN10K_CPT_COMP_E_NOTDONE = 0x00, + CN10K_CPT_COMP_E_GOOD = 0x01, + CN10K_CPT_COMP_E_FAULT = 0x02, + CN10K_CPT_COMP_E_HWERR = 0x04, + CN10K_CPT_COMP_E_INSTERR = 0x05, + CN10K_CPT_COMP_E_WARN = 0x06, + CN10K_CPT_COMP_E_MASK = 0x3F +}; + +struct cn10k_cpt_inst_queue { + u8 *vaddr; + u8 *real_vaddr; + dma_addr_t dma_addr; + dma_addr_t real_dma_addr; + u32 size; +}; + +enum cn10k_cpt_hw_state_e { + CN10K_CPT_HW_UNAVAILABLE, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE +}; + +struct cn10k_ipsec { + /* Outbound CPT */ + u64 io_addr; + atomic_t cpt_state; + struct cn10k_cpt_inst_queue iq; + + /* SA info */ + u32 sa_size; + u32 outb_sa_count; + struct work_struct sa_work; + struct workqueue_struct *sa_workq; +}; + +/* CN10K IPSEC Security Association (SA) */ +/* SA direction */ +#define CN10K_IPSEC_SA_DIR_INB 0 +#define CN10K_IPSEC_SA_DIR_OUTB 1 +/* SA protocol */ +#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0 +#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1 +/* SA Encryption Type */ +#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5 +/* SA IPSEC mode Transport/Tunnel */ +#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0 +#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1 +/* SA AES Key Length */ +#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1 +#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2 +#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3 +/* IV Source */ +#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0 +#define CN10K_IPSEC_SA_IV_SRC_PACKET 3 + +struct cn10k_tx_sa_s { + u64 esn_en : 1; /* W0 */ + u64 rsvd_w0_1_8 : 8; + u64 hw_ctx_off : 7; + u64 ctx_id : 16; + u64 rsvd_w0_32_47 : 16; + u64 ctx_push_size : 7; + u64 rsvd_w0_55 : 1; + u64 ctx_hdr_size : 2; + u64 aop_valid : 1; + u64 rsvd_w0_59 : 1; + u64 ctx_size : 4; + u64 w1; /* W1 */ + u64 sa_valid : 1; /* W2 */ + u64 sa_dir : 1; + u64 rsvd_w2_2_3 : 2; + u64 ipsec_mode : 1; + u64 ipsec_protocol : 1; + u64 aes_key_len : 2; + u64 enc_type : 3; + u64 rsvd_w2_11_19 : 9; + u64 iv_src : 2; + u64 rsvd_w2_22_31 : 10; + u64 rsvd_w2_32_63 : 32; + u64 w3; /* W3 */ + u8 cipher_key[32]; /* W4 - W7 */ + u32 rsvd_w8_0_31; /* W8 : IV */ + u32 iv_gcm_salt; + u64 rsvd_w9_w30[22]; /* W9 - W30 */ + u64 hw_ctx[6]; /* W31 - W36 */ +}; + +/* CPT instruction parameter-1 */ +#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1 +#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2 +#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20 +#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8 + +/* CPT instruction parameter-2 */ +#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0 +#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8 + +/* CPT Instruction Structure */ +struct cpt_inst_s { + u64 nixtxl : 3; /* W0 */ + u64 doneint : 1; + u64 rsvd_w0_4_15 : 12; + u64 dat_offset : 8; + u64 ext_param1 : 8; + u64 nixtx_offset : 20; + u64 rsvd_w0_52_63 : 12; + u64 res_addr; /* W1 */ + u64 tag : 32; /* W2 */ + u64 tt : 2; + u64 grp : 10; + u64 rsvd_w2_44_47 : 4; + u64 rvu_pf_func : 16; + u64 qord : 1; /* W3 */ + u64 rsvd_w3_1_2 : 2; + u64 wqe_ptr : 61; + u64 dlen : 16; /* W4 */ + u64 param2 : 16; + u64 param1 : 16; + u64 opcode_major : 8; + u64 opcode_minor : 8; + u64 dptr; /* W5 */ + u64 rptr; /* W6 */ + u64 cptr : 60; /* W7 */ + u64 ctx_val : 1; + u64 egrp : 3; +}; + +/* CPT Instruction Result Structure */ +struct cpt_res_s { + u64 compcode : 7; /* W0 */ + u64 doneint : 1; + u64 uc_compcode : 8; + u64 uc_info : 48; + u64 esn; /* W1 */ +}; + +/* CPT SG structure */ +struct cpt_sg_s { + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_63_50 : 14; +}; + +/* CPT LF_INPROG Register */ +#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0) +#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32) +#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40) + +/* CPT LF_Q_GRP_PTR Register */ +#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0) +#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) + +/* CPT LF CTX Flush Register */ +#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0) + +#ifdef CONFIG_XFRM_OFFLOAD +int cn10k_ipsec_init(struct net_device *netdev); +void cn10k_ipsec_clean(struct otx2_nic *pf); +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable); +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset); +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size); +#else +static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev) +{ + return 0; +} + +static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf) +{ +} + +static inline __maybe_unused +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + return 0; +} + +static inline bool __maybe_unused +otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + return true; +} + +static inline bool __maybe_unused +cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + return true; +} +#endif +#endif // CN10K_IPSEC_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 6cc7a78968fc..4c7e0f345cb5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) return "SA"; default: return "Unknown"; - }; - - return "Unknown"; + } } static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, @@ -533,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, if (sw_tx_sc->encrypt) sectag_tci |= (MCS_TCI_E | MCS_TCI_C); - policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, + pfvf->netdev->mtu + OTX2_ETH_HLEN); /* Write SecTag excluding AN bits(1..0) */ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c new file mode 100644 index 000000000000..ec8cde98076d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include "otx2_common.h" +#include "otx2_reg.h" +#include "otx2_struct.h" +#include "cn10k.h" + +static struct dev_hw_ops cn20k_hw_ops = { + .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, + .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, +}; + +void cn20k_init(struct otx2_nic *pfvf) +{ + pfvf->hw_ops = &cn20k_hw_ops; +} +EXPORT_SYMBOL(cn20k_init); +/* CN20K mbox AF => PFx irq handler */ +irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct otx2_nic *pf = pf_irq; + struct mbox *mw = &pf->mbox; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 pf_trig_val; + + pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL; + + /* Clear the IRQ */ + otx2_write64(pf, RVU_PF_INT, pf_trig_val); + + if (pf_trig_val & BIT_ULL(0)) { + mbox = &mw->mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_up_wrk); + + trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", + BIT_ULL(0)); + } + + if (pf_trig_val & BIT_ULL(1)) { + mbox = &mw->mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_wrk); + trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", + BIT_ULL(1)); + } + + return IRQ_HANDLED; +} + +irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq) +{ + struct otx2_nic *vf = vf_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 vf_trig_val; + + vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL; + /* Clear the IRQ */ + otx2_write64(vf, RVU_VF_INT, vf_trig_val); + + /* Read latest mbox data */ + smp_rmb(); + + if (vf_trig_val & BIT_ULL(1)) { + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF", + BIT_ULL(1)); + } + + if (vf_trig_val & BIT_ULL(0)) { + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF", + BIT_ULL(0)); + } + + return IRQ_HANDLED; +} + +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + /* Clear PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); + + /* Enable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs)); + if (numvfs > 64) { + numvfs -= 64; + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), + INTR_MASK(numvfs)); + } +} + +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + int vector, intr_vec, vec = 0; + + /* Disable PF <=> VF mailbox IRQ */ + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull); + + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); + + if (numvfs > 64) { + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); + } + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { + vector = pci_irq_vector(pf->pdev, intr_vec); + free_irq(vector, pf->hw.pfvf_irq_devid[vec]); + } +} + +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq) +{ + struct pf_irq_data *irq_data = pf_irq; + struct otx2_nic *pf = irq_data->pf; + struct mbox *mbox; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + + /* Clear interrupts */ + intr = otx2_read64(pf, irq_data->intr_status); + otx2_write64(pf, irq_data->intr_status, intr); + mbox = pf->mbox_pfvf; + + if (intr) + trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr); + + irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start, + irq_data->mdevs, intr); + + return IRQ_HANDLED; +} + +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) +{ + struct otx2_hw *hw = &pf->hw; + struct pf_irq_data *irq_data; + int intr_vec, ret, vec = 0; + char *irq_name; + + /* irq data for 4 PF intr vectors */ + irq_data = devm_kcalloc(pf->dev, 4, + sizeof(struct pf_irq_data), GFP_KERNEL); + if (!irq_data) + return -ENOMEM; + + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { + switch (intr_vec) { + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(0); + irq_data[vec].start = 0; + irq_data[vec].mdevs = 64; + break; + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: + irq_data[vec].intr_status = + RVU_MBOX_PF_VFPF1_INTX(1); + irq_data[vec].start = 64; + irq_data[vec].mdevs = 96; + break; + } + irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work; + irq_data[vec].vec_num = intr_vec; + irq_data[vec].pf = pf; + + /* Register mailbox interrupt handler */ + irq_name = &hw->irq_name[intr_vec * NAME_SIZE]; + if (pf->pcifunc) + snprintf(irq_name, NAME_SIZE, + "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev, + pf->pcifunc), vec / 2, vec % 2); + else + snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d", + vec / 2, vec % 2); + + hw->pfvf_irq_devid[vec] = &irq_data[vec]; + ret = request_irq(pci_irq_vector(pf->pdev, intr_vec), + pf->hw_ops->pfvf_mbox_intr_handler, 0, + irq_name, + &irq_data[vec]); + if (ret) { + dev_err(pf->dev, + "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); + return ret; + } + } + + cn20k_enable_pfvf_mbox_intr(pf, numvfs); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h new file mode 100644 index 000000000000..832adaf8c57f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef CN20K_H +#define CN20K_H + +#include "otx2_common.h" + +void cn20k_init(struct otx2_nic *pfvf); +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs); +#endif /* CN20K_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 523ecb798a7a..f674729124e6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -10,22 +10,30 @@ #include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> +#include <linux/dcbnl.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_struct.h" #include "cn10k.h" +#include "otx2_xsk.h" + +static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf) +{ + return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en; +} static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; - u64 *ptr; + void __iomem *ptr; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } @@ -33,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { u64 incr = (u64)qidx << 32; - u64 *ptr; + void __iomem *ptr; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); stats->bytes = otx2_atomic64_add(incr, ptr); - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); stats->pkts = otx2_atomic64_add(incr, ptr); } @@ -310,19 +318,22 @@ fail: return err; } -int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) +int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; const int index = rss->rss_size * ctx_id; struct mbox *mbox = &pfvf->mbox; - struct otx2_rss_ctx *rss_ctx; struct nix_aq_enq_req *aq; int idx, err; mutex_lock(&mbox->lock); - rss_ctx = rss->rss_ctx[ctx_id]; + ind_tbl = ind_tbl ?: rss->ind_tbl; /* Get memory to put this msg */ for (idx = 0; idx < rss->rss_size; idx++) { + /* Ignore the queue if AF_XDP zero copy is enabled */ + if (test_bit(ind_tbl[idx], pfvf->af_xdp_zc_qidx)) + continue; + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); if (!aq) { /* The shared memory buffer can be full. @@ -340,7 +351,7 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) } } - aq->rss.rq = rss_ctx->ind_tbl[idx]; + aq->rss.rq = ind_tbl[idx]; /* Fill AQ info */ aq->qidx = index + idx; @@ -378,30 +389,22 @@ void otx2_set_rss_key(struct otx2_nic *pfvf) int otx2_rss_init(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; - struct otx2_rss_ctx *rss_ctx; int idx, ret = 0; - rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); + rss->rss_size = sizeof(*rss->ind_tbl); /* Init RSS key if it is not setup already */ if (!rss->enable) netdev_rss_key_fill(rss->key, sizeof(rss->key)); otx2_set_rss_key(pfvf); - if (!netif_is_rxfh_configured(pfvf->netdev)) { - /* Set RSS group 0 as default indirection table */ - rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size, - GFP_KERNEL); - if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]) - return -ENOMEM; - - rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]; + if (!netif_is_rxfh_configured(pfvf->netdev)) for (idx = 0; idx < rss->rss_size; idx++) - rss_ctx->ind_tbl[idx] = + rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, pfvf->hw.rx_queues); - } - ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP); + + ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL); if (ret) return ret; @@ -542,10 +545,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, } static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma) + dma_addr_t *dma, int qidx, int idx) { u8 *buf; + if (pool->xsk_pool) + return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx); + if (pool->page_pool) return otx2_alloc_pool_buf(pfvf, pool, dma); @@ -564,12 +570,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, } int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma) + dma_addr_t *dma, int qidx, int idx) { int ret; local_bh_disable(); - ret = __otx2_alloc_rbuf(pfvf, pool, dma); + ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx); local_bh_enable(); return ret; } @@ -577,7 +583,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma, + cq->cq_idx, cq->pool_ptrs - 1))) return -ENOMEM; return 0; } @@ -844,9 +851,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) { int qidx, sqe_tail, sqe_head; struct otx2_snd_queue *sq; - u64 incr, *ptr, val; + void __iomem *ptr; + u64 incr, val; - ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { sq = &pfvf->qset.sq[qidx]; if (!sq->sqb_ptrs) @@ -877,7 +885,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ -static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) +int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) { struct otx2_qset *qset = &pfvf->qset; struct nix_aq_enq_req *aq; @@ -964,6 +972,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; + /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG. + * SG of NIX and CPT are same in size. Allocate memory for CPT SG + * same as NIX SQE for base address alignment. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ + err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt, + sq->sqe_size * 2); + if (err) + return err; + + err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64); + if (err) + return err; + if (qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, TSO_HEADER_SIZE); @@ -998,6 +1029,10 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; + /* Attach XSK_BUFF_POOL to XDP queue */ + if (qidx > pfvf->hw.xdp_queues) + otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues)); + chan_offset = qidx % pfvf->hw.tx_chan_cnt; err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); @@ -1011,12 +1046,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) } -static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) +int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) { struct otx2_qset *qset = &pfvf->qset; int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; + struct otx2_pool *pool; cq = &qset->cq[qidx]; cq->cq_idx = qidx; @@ -1025,8 +1061,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - if (pfvf->xdp_prog) + if (pfvf->xdp_prog) { xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + pool = &qset->pool[qidx]; + if (pool->xsk_pool) { + xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL); + xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq); + } else if (pool->page_pool) { + xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, + MEM_TYPE_PAGE_POOL, + pool->page_pool); + } + } } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; @@ -1245,9 +1293,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_head_page(phys_to_virt(pa)); - if (pool->page_pool) { page_pool_put_full_page(pool->page_pool, page, true); + } else if (pool->xsk_pool) { + /* Note: No way of identifying xdp_buff */ } else { dma_unmap_page_attrs(pfvf->dev, iova, size, DMA_FROM_DEVICE, @@ -1262,6 +1311,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) int pool_id, pool_start = 0, pool_end = 0, size = 0; struct otx2_pool *pool; u64 iova; + int idx; if (type == AURA_NIX_SQ) { pool_start = otx2_get_pool_idx(pfvf, type, 0); @@ -1276,16 +1326,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) /* Free SQB and RQB pointers from the aura pool */ for (pool_id = pool_start; pool_id < pool_end; pool_id++) { - iova = otx2_aura_allocptr(pfvf, pool_id); pool = &pfvf->qset.pool[pool_id]; + iova = otx2_aura_allocptr(pfvf, pool_id); while (iova) { if (type == AURA_NIX_RQ) iova -= OTX2_HEAD_ROOM; - otx2_free_bufs(pfvf, pool, iova, size); - iova = otx2_aura_allocptr(pfvf, pool_id); } + + for (idx = 0 ; idx < pool->xdp_cnt; idx++) { + if (!pool->xdp[idx]) + continue; + + xsk_buff_free(pool->xdp[idx]); + } } } @@ -1302,7 +1357,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) qmem_free(pfvf->dev, pool->stack); qmem_free(pfvf->dev, pool->fc_addr); page_pool_destroy(pool->page_pool); - pool->page_pool = NULL; + devm_kfree(pfvf->dev, pool->xdp); + pool->xsk_pool = NULL; } devm_kfree(pfvf->dev, pfvf->qset.pool); pfvf->qset.pool = NULL; @@ -1389,6 +1445,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { struct page_pool_params pp_params = { 0 }; + struct xsk_buff_pool *xsk_pool; struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1432,21 +1489,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, aq->ctype = NPA_AQ_CTYPE_POOL; aq->op = NPA_AQ_INSTOP_INIT; - if (type != AURA_NIX_RQ) { - pool->page_pool = NULL; + if (type != AURA_NIX_RQ) + return 0; + + if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) { + pp_params.order = get_order(buf_size); + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } return 0; } - pp_params.order = get_order(buf_size); - pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); - pp_params.nid = NUMA_NO_NODE; - pp_params.dev = pfvf->dev; - pp_params.dma_dir = DMA_FROM_DEVICE; - pool->page_pool = page_pool_create(&pp_params); - if (IS_ERR(pool->page_pool)) { - netdev_err(pfvf->netdev, "Creation of page pool failed\n"); - return PTR_ERR(pool->page_pool); + /* Set XSK pool to support AF_XDP zero-copy */ + xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id); + if (xsk_pool) { + pool->xsk_pool = xsk_pool; + pool->xdp_cnt = numptrs; + pool->xdp = devm_kcalloc(pfvf->dev, + numptrs, sizeof(struct xdp_buff *), GFP_KERNEL); + if (IS_ERR(pool->xdp)) { + netdev_err(pfvf->netdev, "Creation of xsk pool failed\n"); + return PTR_ERR(pool->xdp); + } } return 0; @@ -1507,9 +1578,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) } for (ptr = 0; ptr < num_sqbs; ptr++) { - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); - if (err) + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); + if (err) { + if (pool->xsk_pool) { + ptr--; + while (ptr >= 0) { + xsk_buff_free(pool->xdp[ptr]); + ptr--; + } + } goto err_mem; + } + pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } @@ -1559,11 +1639,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) /* Allocate pointers and free them to aura/pool */ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; + for (ptr = 0; ptr < num_ptrs; ptr++) { - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); - if (err) + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); + if (err) { + if (pool->xsk_pool) { + while (ptr) + xsk_buff_free(pool->xdp[--ptr]); + } return -ENOMEM; + } + pfvf->hw_ops->aura_freeptr(pfvf, pool_id, + pool->xsk_pool ? bufptr : bufptr + OTX2_HEAD_ROOM); } } @@ -1722,18 +1810,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) return -ENOMEM; req->chan_base = 0; -#ifdef CONFIG_DCB - req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; - req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; -#else - req->chan_cnt = 1; - req->bpid_per_chan = 0; -#endif + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = pfvf->hw.rx_chan_cnt; + req->bpid_per_chan = 0; + } return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) +{ + struct nix_bp_cfg_req *req; + + if (enable) + req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox); + + if (!req) + return -ENOMEM; + + req->chan_base = 0; + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = pfvf->hw.rx_chan_cnt; + req->bpid_per_chan = 0; + } + + return otx2_sync_mbox_msg(&pfvf->mbox); +} +EXPORT_SYMBOL(otx2_nix_cpt_config_bp); + /* Mbox message handlers */ void mbox_handler_cgx_stats(struct otx2_nic *pfvf, struct cgx_stats_rsp *rsp) @@ -1934,6 +2047,43 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t } EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf) +{ + struct mbox *mbox = &pfvf->mbox; + struct otx2_hw *hw = &pfvf->hw; + struct get_hw_cap_rsp *rsp; + struct msg_req *req; + int ret = -ENOMEM; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_get_hw_cap(mbox); + if (!req) + goto fail; + + ret = otx2_sync_mbox_msg(mbox); + if (ret) + goto fail; + + rsp = (struct get_hw_cap_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, + 0, &req->hdr); + if (IS_ERR(rsp)) { + ret = -EINVAL; + goto fail; + } + + if (rsp->hw_caps & HW_CAP_MACSEC) + __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); + + mutex_unlock(&mbox->lock); + + return 0; +fail: + dev_err(pfvf->dev, "Cannot get MACSEC capability from AF\n"); + mutex_unlock(&mbox->lock); + return ret; +} + #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ int __weak \ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ @@ -1947,3 +2097,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES #undef M + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + const skb_frag_t *frag; + struct page *page; + int offset; + + /* Crypto hardware need write permission for ipsec crypto offload */ + if (unlikely(xfrm_offload(skb))) { + dir = DMA_BIDIRECTIONAL; + skb = skb_unshare(skb, GFP_ATOMIC); + } + + /* First segment is always skb->data */ + if (!seg) { + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + *len = skb_headlen(skb); + } else { + frag = &skb_shinfo(skb)->frags[seg - 1]; + page = skb_frag_page(frag); + offset = skb_frag_off(frag); + *len = skb_frag_size(frag); + } + return otx2_dma_map_page(pfvf, page, offset, *len, dir); +} + +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + struct sk_buff *skb = NULL; + int seg; + + skb = (struct sk_buff *)sg->skb; + if (unlikely(xfrm_offload(skb))) + dir = DMA_BIDIRECTIONAL; + + for (seg = 0; seg < sg->num_segs; seg++) { + otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], + sg->size[seg], dir); + } + sg->num_segs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 566848663fea..e3765b73c434 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -21,15 +21,19 @@ #include <linux/time64.h> #include <linux/dim.h> #include <uapi/linux/if_macsec.h> +#include <net/page_pool/helpers.h> #include <mbox.h> #include <npc.h> #include "otx2_reg.h" #include "otx2_txrx.h" #include "otx2_devlink.h" +#include <rvu.h> #include <rvu_trace.h> #include "qos.h" #include "rep.h" +#include "cn10k_ipsec.h" +#include "cn20k.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -40,6 +44,7 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 #define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 @@ -55,6 +60,15 @@ #define NIX_PF_PFC_PRIO_MAX 8 #endif +/* Number of segments per SG structure */ +#define MAX_SEGS_PER_SG 3 + +irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq); +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq); +irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq); + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -79,10 +93,6 @@ struct otx2_lmt_info { u64 lmt_addr; u16 lmt_id; }; -/* RSS configuration */ -struct otx2_rss_ctx { - u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; -}; struct otx2_rss_info { u8 enable; @@ -90,7 +100,7 @@ struct otx2_rss_info { u16 rss_size; #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */ u8 key[RSS_HASH_KEY_SIZE]; - struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS]; + u32 ind_tbl[MAX_RSS_INDIR_TBL_SIZE]; }; /* NIX (or NPC) RX errors */ @@ -123,6 +133,12 @@ enum otx2_errcodes_re { ERRCODE_IL4_CSUM = 0x22, }; +enum otx2_xdp_action { + OTX2_XDP_TX = BIT(0), + OTX2_XDP_REDIRECT = BIT(1), + OTX2_AF_XDP_FRAME = BIT(2), +}; + struct otx2_dev_stats { u64 rx_bytes; u64 rx_frames; @@ -233,6 +249,7 @@ struct otx2_hw { u16 nix_msixoff; /* Offset of NIX vectors */ char *irq_name; cpumask_var_t *affinity_mask; + struct pf_irq_data *pfvf_irq_devid[4]; /* Stats */ struct otx2_dev_stats dev_stats; @@ -344,6 +361,7 @@ struct otx2_flow_config { struct list_head flow_list_tc; u8 ucast_flt_cnt; bool ntuple; + u16 ntuple_cnt; }; struct dev_hw_ops { @@ -353,6 +371,9 @@ struct dev_hw_ops { int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); + irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq); + irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq); + irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq); }; #define CN10K_MCS_SA_PER_SC 4 @@ -420,6 +441,16 @@ struct cn10k_mcs_cfg { struct list_head rxsc_list; }; +struct pf_irq_data { + u64 intr_status; + void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw, + int first, int mdevs, u64 intr); + struct otx2_nic *pf; + int vec_num; + int start; + int mdevs; +}; + struct otx2_nic { void __iomem *reg_base; struct net_device *netdev; @@ -448,6 +479,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) #define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) #define OTX2_FLAG_PORT_UP BIT_ULL(19) +#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) u64 flags; u64 *cq_op_addr; @@ -462,6 +494,7 @@ struct otx2_nic { struct mbox *mbox_pfvf; struct workqueue_struct *mbox_wq; struct workqueue_struct *mbox_pfvf_wq; + struct qmem *pfvf_mbox_addr; u8 total_vfs; u16 pcifunc; /* RVU PF_FUNC */ @@ -499,9 +532,9 @@ struct otx2_nic { /* Devlink */ struct otx2_devlink *dl; -#ifdef CONFIG_DCB /* PFC */ u8 pfc_en; +#ifdef CONFIG_DCB u8 *queue_to_pfc_map; u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; @@ -522,6 +555,11 @@ struct otx2_nic { u16 rep_pf_map[RVU_MAX_REP]; u16 esw_mode; #endif + + /* Inline ipsec */ + struct cn10k_ipsec ipsec; + /* af_xdp zero-copy */ + unsigned long *af_xdp_zc_qidx; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -572,6 +610,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; } +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && + (pdev->revision & 0xFF) == 0x54) + return true; + + return false; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -604,9 +651,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); } - - if (is_dev_cn10kb(pfvf->pdev)) - __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -621,6 +665,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) case BLKTYPE_NPA: blkaddr = BLKADDR_NPA; break; + case BLKTYPE_CPT: + blkaddr = BLKADDR_CPT0; + break; default: blkaddr = BLKADDR_RVUM; break; @@ -702,8 +749,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr) ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr)); } -static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) +static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) { + u64 __iomem *ptr = addr; u64 result; __asm__ volatile(".cpu generic+lse\n" @@ -716,7 +764,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) #else #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr) -#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) + +static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr) +{ + return 0; +} #endif static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura, @@ -766,7 +818,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf) /* Alloc pointer from pool/aura */ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura) { - u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); + void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0)); u64 incr = (u64)aura | BIT_ULL(63); return otx2_atomic64_add(incr, ptr); @@ -841,6 +893,7 @@ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ { \ struct _req_type *req; \ + u16 pcifunc = mbox->pfvf->pcifunc; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &mbox->mbox, 0, sizeof(struct _req_type), \ @@ -849,7 +902,8 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ + req->hdr.pcifunc = pcifunc; \ + trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ return req; \ } @@ -869,21 +923,11 @@ MBOX_UP_MCS_MESSAGES /* Time to wait before watchdog kicks off */ #define OTX2_TX_TIMEOUT (100 * HZ) -#define RVU_PFVF_PF_SHIFT 10 -#define RVU_PFVF_PF_MASK 0x3F -#define RVU_PFVF_FUNC_SHIFT 0 -#define RVU_PFVF_FUNC_MASK 0x3FF - static inline bool is_otx2_vf(u16 pcifunc) { return !!(pcifunc & RVU_PFVF_FUNC_MASK); } -static inline int rvu_get_pf(u16 pcifunc) -{ - return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; -} - static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf, struct page *page, size_t offset, size_t size, @@ -981,10 +1025,11 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); void otx2_free_pending_sqe(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma); + dma_addr_t *dma, int qidx, int idx); int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); @@ -1010,12 +1055,15 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); void otx2_disable_mbox_intr(struct otx2_nic *pf); void otx2_disable_napi(struct otx2_nic *pf); irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); +int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); +int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); int otx2_set_flowkey_cfg(struct otx2_nic *pfvf); void otx2_set_rss_key(struct otx2_nic *pfvf); -int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id); +int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id, const u32 *ind_tbl); /* Mbox handlers */ void mbox_handler_msix_offset(struct otx2_nic *pfvf, @@ -1072,7 +1120,10 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, + u64 iova, int len, u16 qidx, u16 flags); +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); @@ -1149,4 +1200,11 @@ static inline int mcam_entry_cmp(const void *a, const void *b) { return *(u16 *)a - *(u16 *)b; } + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len); +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); +int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); +void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index 294fba58b670..f110dfa42360 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -435,6 +435,9 @@ process_pfc: return err; } + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pfvf, false); + /* Request Per channel Bpids */ if (pfc->pfc_en) otx2_nix_config_bp(pfvf, true); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 33ec9a7f7c03..e13ae5484c19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, if (!pfvf->flow_cfg) return 0; + pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16; otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 2d53dc77ef1e..998c734ff839 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -15,6 +15,7 @@ #include "otx2_common.h" #include "otx2_ptp.h" +#include <cgx_fw_if.h> #define DRV_NAME "rvu-nicpf" #define DRV_VF_NAME "rvu-nicvf" @@ -315,7 +316,7 @@ static void otx2_get_pauseparam(struct net_device *netdev, struct otx2_nic *pfvf = netdev_priv(netdev); struct cgx_pause_frm_cfg *req, *rsp; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return; mutex_lock(&pfvf->mbox.lock); @@ -347,7 +348,7 @@ static int otx2_set_pauseparam(struct net_device *netdev, if (pause->autoneg) return -EOPNOTSUPP; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return -EOPNOTSUPP; if (pause->rx_pause) @@ -559,10 +560,13 @@ static int otx2_set_coalesce(struct net_device *netdev, return 0; } -static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, - struct ethtool_rxnfc *nfc) +static int otx2_get_rss_hash_opts(struct net_device *dev, + struct ethtool_rxfh_fields *nfc) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + + rss = &pfvf->hw.rss_info; if (!(rss->flowkey_cfg & (NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))) @@ -609,12 +613,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf, return 0; } -static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, - struct ethtool_rxnfc *nfc) +static int otx2_set_rss_hash_opts(struct net_device *dev, + const struct ethtool_rxfh_fields *nfc, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3; - u32 rss_cfg = rss->flowkey_cfg; + struct otx2_rss_info *rss; + u32 rss_cfg; + + rss = &pfvf->hw.rss_info; + rss_cfg = rss->flowkey_cfg; if (!rss->enable) { netdev_err(pfvf->netdev, @@ -743,8 +752,6 @@ static int otx2_get_rxnfc(struct net_device *dev, if (netif_running(dev) && ntuple) ret = otx2_get_all_flows(pfvf, nfc, rules); break; - case ETHTOOL_GRXFH: - return otx2_get_rss_hash_opts(pfvf, nfc); default: break; } @@ -759,9 +766,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) pfvf->flow_cfg->ntuple = ntuple; switch (nfc->cmd) { - case ETHTOOL_SRXFH: - ret = otx2_set_rss_hash_opts(pfvf, nfc); - break; case ETHTOOL_SRXCLSRLINS: if (netif_running(dev) && ntuple) ret = otx2_add_flow(pfvf, nfc); @@ -792,60 +796,91 @@ static u32 otx2_get_rxfh_indir_size(struct net_device *dev) return MAX_RSS_INDIR_TBL_SIZE; } -static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id) +static int otx2_create_rxfh(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct otx2_nic *pfvf = netdev_priv(dev); + struct otx2_rss_info *rss; + unsigned int queues; + u32 *ind_tbl; + int idx; + + rss = &pfvf->hw.rss_info; + queues = pfvf->hw.rx_queues; - otx2_rss_ctx_flow_del(pfvf, ctx_id); - kfree(rss->rss_ctx[ctx_id]); - rss->rss_ctx[ctx_id] = NULL; + if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + ctx->hfunc = ETH_RSS_HASH_TOP; + if (!rss->enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + ind_tbl = rxfh->indir; + if (!ind_tbl) { + ind_tbl = ethtool_rxfh_context_indir(ctx); + for (idx = 0; idx < rss->rss_size; idx++) + ind_tbl[idx] = ethtool_rxfh_indir_default(idx, queues); + } + + otx2_set_rss_table(pfvf, rxfh->rss_context, ind_tbl); return 0; } -static int otx2_rss_ctx_create(struct otx2_nic *pfvf, - u32 *rss_context) +static int otx2_modify_rxfh(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { - struct otx2_rss_info *rss = &pfvf->hw.rss_info; - u8 ctx; + struct otx2_nic *pfvf = netdev_priv(dev); - for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) { - if (!rss->rss_ctx[ctx]) - break; + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!pfvf->hw.rss_info.enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; } - if (ctx == MAX_RSS_GROUPS) - return -EINVAL; - rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL); - if (!rss->rss_ctx[ctx]) - return -ENOMEM; - *rss_context = ctx; + if (rxfh->indir) + otx2_set_rss_table(pfvf, rxfh->rss_context, rxfh->indir); return 0; } +static int otx2_remove_rxfh(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + + if (!pfvf->hw.rss_info.enable) { + netdev_err(dev, "RSS is disabled, cannot change settings\n"); + return -EIO; + } + + otx2_rss_ctx_flow_del(pfvf, rss_context); + return 0; +} + /* Configure RSS table and hash key */ static int otx2_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) { - u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; struct otx2_nic *pfvf = netdev_priv(dev); - struct otx2_rss_ctx *rss_ctx; struct otx2_rss_info *rss; - int ret, idx; + int idx; if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - rss_context = rxfh->rss_context; - - if (rss_context != ETH_RXFH_CONTEXT_ALLOC && - rss_context >= MAX_RSS_GROUPS) - return -EINVAL; - rss = &pfvf->hw.rss_info; if (!rss->enable) { @@ -857,21 +892,12 @@ static int otx2_set_rxfh(struct net_device *dev, memcpy(rss->key, rxfh->key, sizeof(rss->key)); otx2_set_rss_key(pfvf); } - if (rxfh->rss_delete) - return otx2_rss_ctx_delete(pfvf, rss_context); - - if (rss_context == ETH_RXFH_CONTEXT_ALLOC) { - ret = otx2_rss_ctx_create(pfvf, &rss_context); - rxfh->rss_context = rss_context; - if (ret) - return ret; - } + if (rxfh->indir) { - rss_ctx = rss->rss_ctx[rss_context]; for (idx = 0; idx < rss->rss_size; idx++) - rss_ctx->ind_tbl[idx] = rxfh->indir[idx]; + rss->ind_tbl[idx] = rxfh->indir[idx]; } - otx2_set_rss_table(pfvf, rss_context); + otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP, NULL); return 0; } @@ -880,9 +906,7 @@ static int otx2_set_rxfh(struct net_device *dev, static int otx2_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { - u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP; struct otx2_nic *pfvf = netdev_priv(dev); - struct otx2_rss_ctx *rss_ctx; struct otx2_rss_info *rss; u32 *indir = rxfh->indir; int idx, rx_queues; @@ -890,28 +914,21 @@ static int otx2_get_rxfh(struct net_device *dev, rss = &pfvf->hw.rss_info; rxfh->hfunc = ETH_RSS_HASH_TOP; - if (rxfh->rss_context) - rss_context = rxfh->rss_context; - if (!indir) return 0; - if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) { + if (!rss->enable) { rx_queues = pfvf->hw.rx_queues; for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++) indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues); return 0; } - if (rss_context >= MAX_RSS_GROUPS) - return -ENOENT; - rss_ctx = rss->rss_ctx[rss_context]; - if (!rss_ctx) - return -ENOENT; - - if (indir) { - for (idx = 0; idx < rss->rss_size; idx++) - indir[idx] = rss_ctx->ind_tbl[idx]; + for (idx = 0; idx < rss->rss_size; idx++) { + /* Ignore if the rx queue is AF_XDP zero copy enabled */ + if (test_bit(rss->ind_tbl[idx], pfvf->af_xdp_zc_qidx)) + continue; + indir[idx] = rss->ind_tbl[idx]; } if (rxfh->key) memcpy(rxfh->key, rss->key, sizeof(rss->key)); @@ -937,8 +954,8 @@ static u32 otx2_get_link(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); - /* LBK link is internal and always UP */ - if (is_otx2_lbkvf(pfvf->pdev)) + /* LBK and SDP links are internal and always UP */ + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 1; return pfvf->linfo.link_up; } @@ -1119,17 +1136,9 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap, *link_ksettings) { __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, }; - const int otx2_sgmii_features[6] = { - ETHTOOL_LINK_MODE_10baseT_Half_BIT, - ETHTOOL_LINK_MODE_10baseT_Full_BIT, - ETHTOOL_LINK_MODE_100baseT_Half_BIT, - ETHTOOL_LINK_MODE_100baseT_Full_BIT, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - }; /* CGX link modes to Ethtool link mode mapping */ - const int cgx_link_mode[27] = { - 0, /* SGMII Mode */ + const int cgx_link_mode[CGX_MODE_MAX] = { + 0, /* SGMII 1000baseT */ ETHTOOL_LINK_MODE_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, @@ -1159,14 +1168,19 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap, }; u8 bit; - for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { - /* SGMII mode is set */ - if (bit == 0) - linkmode_set_bit_array(otx2_sgmii_features, - ARRAY_SIZE(otx2_sgmii_features), - otx2_link_modes); - else + for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, ARRAY_SIZE(cgx_link_mode)) { + if (bit == CGX_MODE_SGMII_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, otx2_link_modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, otx2_link_modes); + } else if (bit == CGX_MODE_SGMII_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, otx2_link_modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, otx2_link_modes); + } else if (bit == CGX_MODE_SGMII) { + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, otx2_link_modes); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, otx2_link_modes); + } else { linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes); + } } if (req_mode == OTX2_MODE_ADVERTISED) @@ -1207,23 +1221,10 @@ static int otx2_get_link_ksettings(struct net_device *netdev, return 0; } -static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd, - u64 *mode) -{ - u32 bit_pos; - - /* Firmware does not support requesting multiple advertised modes - * return first set bit - */ - bit_pos = find_first_bit(cmd->link_modes.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); - if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS) - *mode = bit_pos; -} - static int otx2_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct otx2_nic *pf = netdev_priv(netdev); struct ethtool_link_ksettings cur_ks; struct cgx_set_link_mode_req *req; @@ -1260,7 +1261,20 @@ static int otx2_set_link_ksettings(struct net_device *netdev, */ req->args.duplex = cmd->base.duplex ^ 0x1; req->args.an = cmd->base.autoneg; - otx2_get_advertised_mode(cmd, &req->args.mode); + /* Mask unsupported modes and send message to AF */ + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mask); + + linkmode_copy(req->args.advertising, + cmd->link_modes.advertising); + linkmode_andnot(req->args.advertising, + req->args.advertising, mask); + + /* inform AF that we need parse this differently */ + if (bitmap_weight(req->args.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) >= 2) + req->args.multimode = true; err = otx2_sync_mbox_msg(&pf->mbox); end: @@ -1302,12 +1316,12 @@ static void otx2_get_fec_stats(struct net_device *netdev, } static const struct ethtool_ops otx2_ethtool_ops = { - .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, + .rxfh_max_num_contexts = MAX_RSS_GROUPS, .get_link = otx2_get_link, .get_drvinfo = otx2_get_drvinfo, .get_strings = otx2_get_strings, @@ -1325,6 +1339,11 @@ static const struct ethtool_ops otx2_ethtool_ops = { .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, + .get_rxfh_fields = otx2_get_rss_hash_opts, + .set_rxfh_fields = otx2_set_rss_hash_opts, + .create_rxfh_context = otx2_create_rxfh, + .modify_rxfh_context = otx2_modify_rxfh, + .remove_rxfh_context = otx2_remove_rxfh, .get_msglevel = otx2_get_msglevel, .set_msglevel = otx2_set_msglevel, .get_pauseparam = otx2_get_pauseparam, @@ -1409,7 +1428,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); - if (is_otx2_lbkvf(pfvf->pdev)) { + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) { cmd->base.duplex = DUPLEX_FULL; cmd->base.speed = SPEED_100000; } else { @@ -1419,12 +1438,12 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, } static const struct ethtool_ops otx2vf_ethtool_ops = { - .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, + .rxfh_max_num_contexts = MAX_RSS_GROUPS, .get_link = otx2_get_link, .get_drvinfo = otx2vf_get_drvinfo, .get_strings = otx2vf_get_strings, @@ -1438,6 +1457,11 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, .set_rxfh = otx2_set_rxfh, + .get_rxfh_fields = otx2_get_rss_hash_opts, + .set_rxfh_fields = otx2_set_rss_hash_opts, + .create_rxfh_context = otx2_create_rxfh, + .modify_rxfh_context = otx2_modify_rxfh, + .remove_rxfh_context = otx2_remove_rxfh, .get_ringparam = otx2_get_ringparam, .set_ringparam = otx2_set_ringparam, .get_coalesce = otx2_get_coalesce, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 47bfd1fb37d4..64c6d9162ef6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf) mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ - count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; @@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT; /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e310f99b1736..b23585c5e5c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -26,6 +26,8 @@ #include "cn10k.h" #include "qos.h" #include <rvu_trace.h> +#include "cn10k_ipsec.h" +#include "otx2_xsk.h" #define DRV_NAME "rvu_nicpf" #define DRV_STRING "Marvell RVU NIC Physical Function Driver" @@ -204,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) /* Register ME interrupt handler*/ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc)); + snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0), otx2_pf_me_intr_handler, 0, irq_name, pf); if (ret) { @@ -214,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) /* Register FLR interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc)); + snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0), otx2_pf_flr_intr_handler, 0, irq_name, pf); if (ret) { @@ -226,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) if (numvfs > 64) { irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFME1), otx2_pf_me_intr_handler, 0, irq_name, pf); @@ -236,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs) } irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE]; snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); ret = request_irq(pci_irq_vector (pf->pdev, RVU_PF_INT_VEC_VFFLR1), otx2_pf_flr_intr_handler, 0, irq_name, pf); @@ -292,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) return 0; } -static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, - int first, int mdevs, u64 intr) +void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -463,6 +467,9 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF down queue handler(forwarding)", + vf_mbox->num_msgs); + for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + offset); @@ -471,7 +478,7 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) goto inval_msg; /* Set VF's number in each of the msg */ - msg->pcifunc &= RVU_PFVF_FUNC_MASK; + msg->pcifunc &= ~RVU_PFVF_FUNC_MASK; msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK; offset = msg->next_msgoff; } @@ -501,6 +508,9 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-VF up queue handler(response)", + vf_mbox->up_num_msgs); + for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; @@ -537,7 +547,7 @@ end: } } -static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) +irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)(pf_irq); int vfs = pf->total_vfs; @@ -566,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) return IRQ_HANDLED; } +static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs) +{ + struct qmem *mbox_addr; + int err; + + err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE); + if (err) { + dev_err(pf->dev, "qmem alloc fail\n"); + return ERR_PTR(-ENOMEM); + } + + otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova); + pf->pfvf_mbox_addr = mbox_addr; + + return mbox_addr->base; +} + static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) { void __iomem *hwbase; @@ -587,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) if (!pf->mbox_pfvf_wq) return -ENOMEM; - /* On CN10K platform, PF <-> VF mailbox region follows after - * PF <-> AF mailbox region. + /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF + * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX + * gives the aliased address to access PF/VF mailbox regions. */ - if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - MBOX_SIZE; - else - base = readq((void __iomem *)((u64)pf->reg_base + - RVU_PF_VF_BAR4_ADDR)); + if (is_cn20k(pf->pdev)) { + hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs); + } else { + /* On CN10K platform, PF <-> VF mailbox region follows after + * PF <-> AF mailbox region. + */ + if (test_bit(CN10K_MBOX, &pf->hw.cap_flag)) + base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + + MBOX_SIZE; + else + base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR); - hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); - if (!hwbase) { - err = -ENOMEM; - goto free_wq; + hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs); + if (!hwbase) { + err = -ENOMEM; + goto free_wq; + } } mbox = &pf->mbox_pfvf[0]; @@ -624,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) return 0; free_iomem: - if (hwbase) + if (hwbase && !(is_cn20k(pf->pdev))) iounmap(hwbase); free_wq: destroy_workqueue(pf->mbox_pfvf_wq); @@ -643,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf) pf->mbox_pfvf_wq = NULL; } - if (mbox->mbox.hwbase) + if (mbox->mbox.hwbase && !is_cn20k(pf->pdev)) iounmap(mbox->mbox.hwbase); + else + qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr); otx2_mbox_destroy(&mbox->mbox); } @@ -668,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) { int vector; + if (is_cn20k(pf->pdev)) + return cn20k_disable_pfvf_mbox_intr(pf, numvfs); + /* Disable PF <=> VF mailbox IRQ */ otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull); otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull); @@ -689,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) char *irq_name; int err; + if (is_cn20k(pf->pdev)) + return cn20k_register_pfvf_mbox_intr(pf, numvfs); + /* Register MBOX0 interrupt handler */ irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, - "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc)); + "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0"); err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0), @@ -709,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE]; if (pf->pcifunc) snprintf(irq_name, NAME_SIZE, - "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc)); + "RVUPF%d_VF Mbox1", + rvu_get_pf(pf->pdev, pf->pcifunc)); else snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1"); err = request_irq(pci_irq_vector(pf->pdev, @@ -817,6 +860,9 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; + trace_otx2_msg_status(pf->pdev, "PF-AF down queue handler(response)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); @@ -972,6 +1018,9 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + trace_otx2_msg_status(pf->pdev, "PF-AF up queue handler(notification)", + num_msgs); + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); @@ -992,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) otx2_mbox_msg_send(mbox, 0); } -static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) +irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; struct mbox *mw = &pf->mbox; @@ -1021,6 +1070,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF up work queued(interrupt)", + hdr->num_msgs); } if (mbox_data & MBOX_DOWN_MSG) { @@ -1037,6 +1089,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", BIT_ULL(0)); + + trace_otx2_msg_status(pf->pdev, "PF-AF down work queued(interrupt)", + hdr->num_msgs); } return IRQ_HANDLED; @@ -1044,10 +1099,18 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) void otx2_disable_mbox_intr(struct otx2_nic *pf) { - int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + int vector; /* Disable AF => PF mailbox IRQ */ - otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + if (!is_cn20k(pf->pdev)) { + vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); + otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); + } else { + vector = pci_irq_vector(pf->pdev, + RVU_MBOX_PF_INT_VEC_AFPF_MBOX); + otx2_write64(pf, RVU_PF_INT_ENA_W1C, + BIT_ULL(0) | BIT_ULL(1)); + } free_irq(vector, pf); } EXPORT_SYMBOL(otx2_disable_mbox_intr); @@ -1060,10 +1123,24 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) int err; /* Register mailbox interrupt handler */ - irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox"); - err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), - otx2_pfaf_mbox_intr_handler, 0, irq_name, pf); + if (!is_cn20k(pf->pdev)) { + irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox", + rvu_get_pf(pf->pdev, pf->pcifunc)); + err = request_irq(pci_irq_vector + (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX), + pf->hw_ops->pfaf_mbox_intr_handler, + 0, irq_name, pf); + } else { + irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX * + NAME_SIZE]; + snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox", + rvu_get_pf(pf->pdev, pf->pcifunc)); + err = request_irq(pci_irq_vector + (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX), + pf->hw_ops->pfaf_mbox_intr_handler, + 0, irq_name, pf); + } if (err) { dev_err(pf->dev, "RVUPF: IRQ registration failed for PFAF mbox irq\n"); @@ -1073,8 +1150,14 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) /* Enable mailbox interrupt for msgs coming from AF. * First clear to avoid spurious interrupts, if any. */ - otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); - otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + if (!is_cn20k(pf->pdev)) { + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0)); + } else { + otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1)); + otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) | + BIT_ULL(1)); + } if (!probe_af) return 0; @@ -1105,7 +1188,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) pf->mbox_wq = NULL; } - if (mbox->mbox.hwbase) + if (mbox->mbox.hwbase && !is_cn20k(pf->pdev)) iounmap((void __iomem *)mbox->mbox.hwbase); otx2_mbox_destroy(&mbox->mbox); @@ -1125,12 +1208,20 @@ int otx2_pfaf_mbox_init(struct otx2_nic *pf) if (!pf->mbox_wq) return -ENOMEM; - /* Mailbox is a reserved memory (in RAM) region shared between - * admin function (i.e AF) and this PF, shouldn't be mapped as - * device memory to allow unaligned accesses. + /* For CN20K, AF allocates mbox memory in DRAM and writes PF + * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX + * gives the aliased address to access AF/PF mailbox regions. */ - hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM), - MBOX_SIZE); + if (is_cn20k(pf->pdev)) + hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX + + ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT); + else + /* Mailbox is a reserved memory (in RAM) region shared between + * admin function (i.e AF) and this PF, shouldn't be mapped as + * device memory to allow unaligned accesses. + */ + hwbase = ioremap_wc(pci_resource_start + (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE); if (!hwbase) { dev_err(pf->dev, "Unable to map PFAF mailbox region\n"); err = -ENOMEM; @@ -1303,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) { struct otx2_nic *pf = data; struct otx2_snd_queue *sq; - u64 val, *ptr; - u64 qidx = 0; + void __iomem *ptr; + u64 val, qidx = 0; /* CQ */ for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) { @@ -1484,6 +1575,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf) if (!sq->sqe) continue; qmem_free(pf->dev, sq->sqe); + qmem_free(pf->dev, sq->sqe_ring); + qmem_free(pf->dev, sq->cpt_resp); qmem_free(pf->dev, sq->tso_hdrs); kfree(sq->sg); kfree(sq->sqb_ptrs); @@ -1551,6 +1644,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf) if (err) goto err_free_npa_lf; + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pf, false); + /* Enable backpressure for CGX mapped PF/VFs */ if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, true); @@ -1656,9 +1752,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf) struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_cq_queue *cq; - struct otx2_pool *pool; struct msg_req *req; - int pool_id; int qidx; /* Ensure all SQE are processed */ @@ -1699,13 +1793,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf) /* Free RQ buffer pointers*/ otx2_free_aura_ptr(pf, AURA_NIX_RQ); - for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { - pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); - pool = &pf->qset.pool[pool_id]; - page_pool_destroy(pool->page_pool); - pool->page_pool = NULL; - } - otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ @@ -1956,7 +2043,7 @@ int otx2_open(struct net_device *netdev) if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for QERR\n", - rvu_get_pf(pf->pcifunc)); + rvu_get_pf(pf->pdev, pf->pcifunc)); goto err_disable_napi; } @@ -1974,7 +2061,7 @@ int otx2_open(struct net_device *netdev) if (name_len >= NAME_SIZE) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", - rvu_get_pf(pf->pcifunc), qidx); + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); err = -EINVAL; goto err_free_cints; } @@ -1985,7 +2072,7 @@ int otx2_open(struct net_device *netdev) if (err) { dev_err(pf->dev, "RVUPF%d: IRQ registration failed for CQ%d\n", - rvu_get_pf(pf->pcifunc), qidx); + rvu_get_pf(pf->pdev, pf->pcifunc), qidx); goto err_free_cints; } vec++; @@ -2071,7 +2158,6 @@ int otx2_stop(struct net_device *netdev) struct otx2_nic *pf = netdev_priv(netdev); struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; - struct otx2_rss_info *rss; int qidx, vec, wrk; /* If the DOWN flag is set resources are already freed */ @@ -2089,10 +2175,7 @@ int otx2_stop(struct net_device *netdev) otx2_rxtx_enable(pf, false); /* Clear RSS enable flag */ - rss = &pf->hw.rss_info; - rss->enable = false; - if (!netif_is_rxfh_configured(netdev)) - kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); + pf->hw.rss_info.enable = false; /* Cleanup Queue IRQ */ vec = pci_irq_vector(pf->pdev, @@ -2273,6 +2356,10 @@ static int otx2_set_features(struct net_device *netdev, return otx2_enable_rxvlan(pf, features & NETIF_F_HW_VLAN_CTAG_RX); + if (changed & NETIF_F_HW_ESP) + return cn10k_ipsec_ethtool_init(netdev, + features & NETIF_F_HW_ESP); + return otx2_handle_ntuple_tc_features(netdev, features); } @@ -2681,7 +2768,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf, static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, int qidx) { - struct page *page; u64 dma_addr; int err = 0; @@ -2691,11 +2777,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, if (dma_mapping_error(pf->dev, dma_addr)) return -ENOMEM; - err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); + err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len, + qidx, OTX2_XDP_REDIRECT); if (!err) { otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); - page = virt_to_page(xdpf->data); - put_page(page); + xdp_return_frame(xdpf); return -ENOMEM; } return 0; @@ -2779,6 +2865,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return otx2_xdp_setup(pf, xdp->prog); + case XDP_SETUP_XSK_POOL: + return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } @@ -2856,6 +2944,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, .ndo_bpf = otx2_xdp, + .ndo_xsk_wakeup = otx2_xsk_wakeup, .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, @@ -2976,8 +3065,13 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) if (err) return err; - err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, - RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + if (!is_cn20k(pf->pdev)) + err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, + RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + else + err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT, + RVU_MBOX_PF_INT_VEC_CNT, + PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", __func__, num_vec); @@ -2986,6 +3080,11 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) otx2_setup_dev_hw_settings(pf); + if (is_cn20k(pf->pdev)) + cn20k_init(pf); + else + otx2_init_hw_ops(pf); + /* Init PF <=> AF mailbox stuff */ err = otx2_pfaf_mbox_init(pf); if (err) @@ -3044,7 +3143,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -3053,7 +3152,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -3063,10 +3162,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -3124,6 +3221,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_ptp_destroy; + otx2_set_hw_capabilities(pf); + err = cn10k_mcs_init(pf); if (err) goto err_del_mcam_entries; @@ -3162,10 +3261,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* reset CGX/RPM MAC stats */ otx2_reset_mac_stats(pf); + err = cn10k_ipsec_init(netdev); + if (err) + goto err_mcs_free; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_mcs_free; + goto err_ipsec_clean; } err = otx2_wq_init(pf); @@ -3190,22 +3293,36 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); + pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL); + if (!pf->af_xdp_zc_qidx) { + err = -ENOMEM; + goto err_sriov_cleannup; + } + #ifdef CONFIG_DCB err = otx2_dcbnl_set_ops(netdev); if (err) - goto err_pf_sriov_init; + goto err_free_zc_bmap; #endif otx2_qos_init(pf, qos_txqs); return 0; +#ifdef CONFIG_DCB +err_free_zc_bmap: + bitmap_free(pf->af_xdp_zc_qidx); +#endif +err_sriov_cleannup: + otx2_sriov_vfcfg_cleanup(pf); err_pf_sriov_init: otx2_shutdown_tc(pf); err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(pf); err_mcs_free: cn10k_mcs_free(pf); err_del_mcam_entries: @@ -3224,8 +3341,6 @@ err_detach_rsrc: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -3267,6 +3382,7 @@ static void otx2_vf_link_event_task(struct work_struct *work) req = (struct cgx_link_info_msg *)msghdr; req->hdr.id = MBOX_MSG_CGX_LINK_EVENT; req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = pf->pcifunc; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); @@ -3403,6 +3519,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_unregister_dl(pf); unregister_netdev(netdev); + cn10k_ipsec_clean(pf); cn10k_mcs_free(pf); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); @@ -3424,8 +3541,6 @@ static void otx2_remove(struct pci_dev *pdev) pci_free_irq_vectors(pf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2_pf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 63130ba37e9d..e52cc6b1a26c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -193,7 +193,7 @@ static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period) return otx2_sync_mbox_msg(&ptp->nic->mbox); } -static u64 ptp_cc_read(const struct cyclecounter *cc) +static u64 ptp_cc_read(struct cyclecounter *cc) { struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index e3aee6e36215..1cd576fd09c5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -44,6 +44,17 @@ #define RVU_PF_VF_MBOX_ADDR (0xC40) #define RVU_PF_LMTLINE_ADDR (0xC48) +#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3) +#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3) + +#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3) +#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3) + /* RVU VF registers */ #define RVU_VF_VFPF_MBOX0 (0x00000) #define RVU_VF_VFPF_MBOX1 (0x00008) @@ -58,6 +69,11 @@ #define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3) #define RVU_VF_MBOX_REGION (0xC0000) +/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */ +#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4) +#define RVU_PFX_FUNC_PFAF_MBOX (0x80000) +#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000) + #define RVU_FUNC_BLKADDR_SHIFT 20 #define RVU_FUNC_BLKADDR_MASK 0x1FULL @@ -138,39 +154,12 @@ #define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12) #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) -/* NIX AF transmit scheduler registers */ -#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16) -#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16) -#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16) -#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16) -#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16) -#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16) -#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16) -#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16) -#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16) -#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16) -#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16) -#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16) -#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16) -#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16) -#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16) -#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16) -#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16) -#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16) -#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16) -#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16) -#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16) -#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16) -#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16) -#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16) -#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16) -#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16) -#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16) -#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3) - /* LMT LF registers */ #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) #define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12) #define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400) +/* CN20K registers */ +#define RVU_PF_DISC (0x0) + #endif /* OTX2_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 9a226ca74425..5f80b23c5335 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -467,7 +467,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, target = act->dev; if (target->dev.parent) { priv = netdev_priv(target); - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + if (rvu_get_pf(nic->pdev, nic->pcifunc) != + rvu_get_pf(nic->pdev, priv->pcifunc)) { NL_SET_ERR_MSG_MOD(extack, "can't redirect to other pf/vf"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 04bc06a80e23..625bb5a05344 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -11,6 +11,8 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/ip6_checksum.h> +#include <net/xfrm.h> +#include <net/xdp.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -18,6 +20,7 @@ #include "otx2_txrx.h" #include "otx2_ptp.h" #include "cn10k.h" +#include "otx2_xsk.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) #define PTP_PORT 0x13F @@ -26,11 +29,30 @@ */ #define PTP_SYNC_SEC_OFFSET 34 +DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + +static int otx2_get_free_sqe(struct otx2_snd_queue *sq) +{ + return (sq->cons_head - sq->head - 1 + sq->sqe_cnt) + & (sq->sqe_cnt - 1); +} + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, - bool *need_xdp_flush); + u32 *metasize, bool *need_xdp_flush); + +static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq, + struct sk_buff *skb) +{ + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + sq->sqe_base = sq->sqe_ring->base + sq->sqe_size + + (sq->head * (sq->sqe_size * 2)); + else + sq->sqe_base = sq->sqe->base; +} static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) @@ -80,54 +102,24 @@ static unsigned int frag_num(unsigned int i) #endif } -static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, - struct sk_buff *skb, int seg, int *len) -{ - const skb_frag_t *frag; - struct page *page; - int offset; - - /* First segment is always skb->data */ - if (!seg) { - page = virt_to_page(skb->data); - offset = offset_in_page(skb->data); - *len = skb_headlen(skb); - } else { - frag = &skb_shinfo(skb)->frags[seg - 1]; - page = skb_frag_page(frag); - offset = skb_frag_off(frag); - *len = skb_frag_size(frag); - } - return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); -} - -static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) -{ - int seg; - - for (seg = 0; seg < sg->num_segs; seg++) { - otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], - sg->size[seg], DMA_TO_DEVICE); - } - sg->num_segs = 0; -} - static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, - struct nix_cqe_tx_s *cqe) + struct nix_cqe_tx_s *cqe, + int *xsk_frames) { struct nix_send_comp_s *snd_comp = &cqe->comp; struct sg_list *sg; - struct page *page; - u64 pa; sg = &sq->sg[snd_comp->sqe_id]; + if (sg->flags & OTX2_AF_XDP_FRAME) { + (*xsk_frames)++; + return; + } - pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); - otx2_dma_unmap_page(pfvf, sg->dma_addr[0], - sg->size[0], DMA_TO_DEVICE); - page = virt_to_page(phys_to_virt(pa)); - put_page(page); + if (sg->flags & OTX2_XDP_REDIRECT) + otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE); + xdp_return_frame((struct xdp_frame *)sg->skb); + sg->skb = (u64)NULL; } static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, @@ -343,18 +335,24 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; + u64 *word = (u64 *)parse; void *end, *start; + u32 metasize = 0; u64 *seg_addr; u16 *seg_size; int seg; if (unlikely(parse->errlev || parse->errcode)) { - if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) + if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) { + trace_otx2_parse_dump(pfvf->pdev, "Err:", word); return; + } } + trace_otx2_parse_dump(pfvf->pdev, "", word); if (pfvf->xdp_prog) - if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, + &metasize, need_xdp_flush)) return; skb = napi_get_frags(napi); @@ -386,6 +384,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, skb->mark = parse->match_id; skb_mark_for_recycle(skb); + if (metasize) + skb_metadata_set(skb, metasize); napi_gro_frags(napi); } @@ -449,6 +449,18 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) return cnt - cq->pool_ptrs; } +static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool, + int *xsk_frames, int qidx, int budget) +{ + if (*xsk_frames) + xsk_tx_completed(xsk_pool, *xsk_frames); + + if (xsk_uses_need_wakeup(xsk_pool)) + xsk_set_tx_need_wakeup(xsk_pool); + + otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget); +} + static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { @@ -457,16 +469,22 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct nix_cqe_tx_s *cqe; struct net_device *ndev; int processed_cqe = 0; + int xsk_frames = 0; + + qidx = cq->cq_idx - pfvf->hw.rx_queues; + sq = &pfvf->qset.sq[qidx]; if (cq->pend_cqe >= budget) goto process_cqe; - if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) { + if (sq->xsk_pool) + otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, + qidx, budget); return 0; + } process_cqe: - qidx = cq->cq_idx - pfvf->hw.rx_queues; - sq = &pfvf->qset.sq[qidx]; while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); @@ -476,10 +494,8 @@ process_cqe: break; } - qidx = cq->cq_idx - pfvf->hw.rx_queues; - if (cq->cq_type == CQ_XDP) - otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); + otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames); else otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], cqe, budget, &tx_pkts, &tx_bytes); @@ -520,6 +536,10 @@ process_cqe: netif_carrier_ok(ndev)) netif_tx_wake_queue(txq); } + + if (sq->xsk_pool) + otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget); + return 0; } @@ -545,9 +565,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p int otx2_napi_handler(struct napi_struct *napi, int budget) { struct otx2_cq_queue *rx_cq = NULL; + struct otx2_cq_queue *cq = NULL; + struct otx2_pool *pool = NULL; struct otx2_cq_poll *cq_poll; int workdone = 0, cq_idx, i; - struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; int filled_cnt = -1; @@ -572,6 +593,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (rx_cq && rx_cq->pool_ptrs) filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); + /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -584,20 +606,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) otx2_adjust_adaptive_coalese(pfvf, cq_poll); + if (likely(cq)) + pool = &pfvf->qset.pool[cq->cq_idx]; + if (unlikely(!filled_cnt)) { struct refill_work *work; struct delayed_work *dwork; - work = &pfvf->refill_wrk[cq->cq_idx]; - dwork = &work->pool_refill_work; - /* Schedule a task if no other task is running */ - if (!cq->refill_task_sched) { - work->napi = napi; - cq->refill_task_sched = true; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); + if (likely(cq)) { + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + work->napi = napi; + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + /* Call wake-up for not able to fill buffers */ + if (pool->xsk_pool) + xsk_set_rx_need_wakeup(pool->xsk_pool); } } else { + /* Clear wake-up, since buffers are filled successfully */ + if (pool && pool->xsk_pool) + xsk_clear_rx_need_wakeup(pool->xsk_pool); /* Re-enable interrupts */ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), @@ -625,7 +658,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, sq->head &= (sq->sqe_cnt - 1); } -#define MAX_SEGS_PER_SG 3 /* Add SQE scatter/gather subdescriptor structure */ static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int num_segs, int *offset) @@ -1161,11 +1193,12 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; struct otx2_nic *pfvf = dev; + bool ret; /* Check if there is enough room between producer * and consumer index. */ - free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); + free_desc = otx2_get_free_sqe(sq); if (free_desc < sq->sqe_thresh) return false; @@ -1177,6 +1210,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, /* If SKB doesn't fit in a single SQE, linearize it. * TODO: Consider adding JUMP descriptor instead. */ + if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); @@ -1196,6 +1230,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, return true; } + /* Set sqe base address */ + otx2_sq_set_sqe_base(sq, skb); + /* Set SQE's SEND_HDR. * Do not clear the first 64bit as it contains constant info. */ @@ -1208,7 +1245,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, otx2_sqe_add_ext(pfvf, sq, skb, &offset); /* Add SG subdesc with data frags */ - if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset); + else + ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset); + + if (!ret) { otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); return false; } @@ -1217,11 +1260,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, sqe_hdr->sizem1 = (offset / 16) - 1; + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs, + offset); + netdev_tx_sent_queue(txq, skb->len); /* Flush SQE to HW */ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); - return true; } EXPORT_SYMBOL(otx2_sq_append_skb); @@ -1234,15 +1281,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q u16 pool_id; u64 iova; - if (pfvf->xdp_prog) + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + + if (pfvf->xdp_prog) { + if (pool->page_pool) + xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq); + xdp_rxq_info_unreg(&cq->xdp_rxq); + } if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; - pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); - pool = &pfvf->qset.pool[pool_id]; - while (cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; @@ -1363,8 +1414,8 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf) } } -static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, - int len, int *offset) +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; @@ -1381,16 +1432,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, sq->sg[sq->head].dma_addr[0] = dma_addr; sq->sg[sq->head].size[0] = len; sq->sg[sq->head].num_segs = 1; + sq->sg[sq->head].flags = flags; + sq->sg[sq->head].skb = (u64)xdpf; +} + +int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx) +{ + struct otx2_snd_queue *sq; + int free_sqe; + + sq = &pfvf->qset.sq[qidx]; + free_sqe = otx2_get_free_sqe(sq); + if (free_sqe < sq->sqe_thresh) { + netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx); + return 0; + } + + return free_sqe - sq->sqe_thresh; } -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, + u64 iova, int len, u16 qidx, u16 flags) { struct nix_sqe_hdr_s *sqe_hdr; struct otx2_snd_queue *sq; int offset, free_sqe; sq = &pfvf->qset.sq[qidx]; - free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + free_sqe = otx2_get_free_sqe(sq); if (free_sqe < sq->sqe_thresh) return false; @@ -1409,7 +1478,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) offset = sizeof(*sqe_hdr); - otx2_xdp_sqe_add_sg(sq, iova, len, &offset); + otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags); sqe_hdr->sizem1 = (offset / 16) - 1; pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); @@ -1420,16 +1489,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, - bool *need_xdp_flush) + u32 *metasize, bool *need_xdp_flush) { + struct xdp_buff xdp, *xsk_buff = NULL; unsigned char *hard_start; + struct otx2_pool *pool; + struct xdp_frame *xdpf; int qidx = cq->cq_idx; - struct xdp_buff xdp; struct page *page; u64 iova, pa; u32 act; int err; + pool = &pfvf->qset.pool[qidx]; + + if (pool->xsk_pool) { + xsk_buff = pool->xdp[--cq->rbpool->xdp_top]; + if (!xsk_buff) + return false; + + xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size; + act = bpf_prog_run_xdp(prog, xsk_buff); + goto handle_xdp_verdict; + } + iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_page(phys_to_virt(pa)); @@ -1438,41 +1521,63 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, hard_start = (unsigned char *)phys_to_virt(pa); xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM, - cqe->sg.seg_size, false); + cqe->sg.seg_size, true); act = bpf_prog_run_xdp(prog, &xdp); +handle_xdp_verdict: switch (act) { case XDP_PASS: + *metasize = xdp.data - xdp.data_meta; break; case XDP_TX: qidx += pfvf->hw.tx_queues; cq->pool_ptrs++; - return otx2_xdp_sq_append_pkt(pfvf, iova, - cqe->sg.seg_size, qidx); + xdpf = xdp_convert_buff_to_frame(&xdp); + return otx2_xdp_sq_append_pkt(pfvf, xdpf, + cqe->sg.seg_addr, + cqe->sg.seg_size, + qidx, OTX2_XDP_TX); case XDP_REDIRECT: cq->pool_ptrs++; - err = xdp_do_redirect(pfvf->netdev, &xdp, prog); + if (xsk_buff) { + err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog); + if (!err) { + *need_xdp_flush = true; + return true; + } + return false; + } - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, - DMA_FROM_DEVICE); + err = xdp_do_redirect(pfvf->netdev, &xdp, prog); if (!err) { *need_xdp_flush = true; return true; } - put_page(page); + + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + xdpf = xdp_convert_buff_to_frame(&xdp); + xdp_return_frame(xdpf); break; default: bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); - break; + fallthrough; case XDP_ABORTED: - trace_xdp_exception(pfvf->netdev, prog, act); - break; + if (act == XDP_ABORTED) + trace_xdp_exception(pfvf->netdev, prog, act); + fallthrough; case XDP_DROP: - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, - DMA_FROM_DEVICE); - put_page(page); cq->pool_ptrs++; + if (xsk_buff) { + xsk_buff_free(xsk_buff); + } else if (pp_page_to_nmdesc(page)->pp) { + page_pool_recycle_direct(pool->page_pool, page); + } else { + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + put_page(page); + } return true; } return false; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index e1db5f961877..acf259d72008 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -12,6 +12,7 @@ #include <linux/iommu.h> #include <linux/if_vlan.h> #include <net/xdp.h> +#include <net/xdp_sock_drv.h> #define LBK_CHAN_BASE 0x000 #define SDP_CHAN_BASE 0x700 @@ -76,6 +77,7 @@ struct otx2_rcv_queue { struct sg_list { u16 num_segs; + u16 flags; u64 skb; u64 size[OTX2_MAX_FRAGS_IN_SQE]; u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE]; @@ -101,6 +103,11 @@ struct otx2_snd_queue { struct queue_stats stats; u16 sqb_count; u64 *sqb_ptrs; + /* SQE ring and CPT response queue for Inline IPSEC */ + struct qmem *sqe_ring; + struct qmem *cpt_resp; + /* Buffer pool for af_xdp zero-copy */ + struct xsk_buff_pool *xsk_pool; } ____cacheline_aligned_in_smp; enum cq_type { @@ -124,7 +131,11 @@ struct otx2_pool { struct qmem *stack; struct qmem *fc_addr; struct page_pool *page_pool; + struct xsk_buff_pool *xsk_pool; + struct xdp_buff **xdp; + u16 xdp_cnt; u16 rbsize; + u16 xdp_top; }; struct otx2_cq_queue { @@ -141,6 +152,7 @@ struct otx2_cq_queue { void *cqe_base; struct qmem *cqe; struct otx2_pool *rbpool; + bool xsk_zc_en; struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 839fc77c11b2..5589fccd370b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -14,6 +14,7 @@ #include "otx2_reg.h" #include "otx2_ptp.h" #include "cn10k.h" +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicvf" #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" @@ -135,7 +136,7 @@ static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf, rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT; rsp->hdr.sig = OTX2_MBOX_RSP_SIG; - rsp->hdr.pcifunc = 0; + rsp->hdr.pcifunc = req->pcifunc; rsp->hdr.rc = 0; err = otx2_mbox_up_handler_cgx_link_event( vf, (struct cgx_link_info_msg *)req, rsp); @@ -239,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf) /* Disable VF => PF mailbox IRQ */ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0)); + + if (is_cn20k(vf->pdev)) + otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1)); + free_irq(vector, vf); } @@ -251,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) /* Register mailbox interrupt handler */ irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE]; - snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox"); - err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), - otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc & + RVU_PFVF_FUNC_MASK) - 1)); + + if (!is_cn20k(vf->pdev)) { + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf); + } else { + err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX), + vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name, + vf); + } + if (err) { dev_err(vf->dev, "RVUPF: IRQ registration failed for VFAF mbox irq\n"); @@ -263,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf) /* Enable mailbox interrupt for msgs coming from PF. * First clear to avoid spurious interrupts, if any. */ - otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); - otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + if (!is_cn20k(vf->pdev)) { + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0)); + } else { + otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) | + BIT_ULL(2) | BIT_ULL(3)); + otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) | + BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3)); + } if (!probe_pf) return 0; @@ -314,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf) if (!vf->mbox_wq) return -ENOMEM; - if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { + /* For cn20k platform, VF mailbox region is in dram aliased from AF + * VF MBOX ADDR, MBOX is a separate RVU block. + */ + if (is_cn20k(vf->pdev)) { + hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX << + RVU_FUNC_BLKADDR_SHIFT); + } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) { /* For cn10k platform, VF mailbox region is in its BAR2 * register space */ @@ -547,7 +574,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -556,7 +583,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + return err; } pci_set_master(pdev); @@ -564,10 +591,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) qcount = num_online_cpus(); qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES); netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount); - if (!netdev) { - err = -ENOMEM; - goto err_release_regions; - } + if (!netdev) + return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); @@ -617,6 +642,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } otx2_setup_dev_hw_settings(vf); + + if (is_cn20k(vf->pdev)) + cn20k_init(vf); + else + otx2_init_hw_ops(vf); + /* Init VF <=> PF mailbox stuff */ err = otx2vf_vfaf_mbox_init(vf); if (err) @@ -693,10 +724,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdev->bus->number, n); } + err = cn10k_ipsec_init(netdev); + if (err) + goto err_ptp_destroy; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_ptp_destroy; + goto err_ipsec_clean; } err = otx2_vf_wq_init(vf); @@ -717,19 +752,36 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_shutdown_tc; + vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL); + if (!vf->af_xdp_zc_qidx) { + err = -ENOMEM; + goto err_unreg_devlink; + } + #ifdef CONFIG_DCB - err = otx2_dcbnl_set_ops(netdev); - if (err) - goto err_shutdown_tc; + /* Priority flow control is not supported for LBK and SDP vf(s) */ + if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) { + err = otx2_dcbnl_set_ops(netdev); + if (err) + goto err_free_zc_bmap; + } #endif otx2_qos_init(vf, qos_txqs); return 0; +#ifdef CONFIG_DCB +err_free_zc_bmap: + bitmap_free(vf->af_xdp_zc_qidx); +#endif +err_unreg_devlink: + otx2_unregister_dl(vf); err_shutdown_tc: otx2_shutdown_tc(vf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(vf); err_ptp_destroy: otx2_ptp_destroy(vf); err_detach_rsrc: @@ -746,8 +798,6 @@ err_free_irq_vectors: err_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); -err_release_regions: - pci_release_regions(pdev); return err; } @@ -782,6 +832,7 @@ static void otx2vf_remove(struct pci_dev *pdev) unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + cn10k_ipsec_clean(vf); otx2_ptp_destroy(vf); otx2_mcam_flow_del(vf); otx2_shutdown_tc(vf); @@ -795,8 +846,6 @@ static void otx2vf_remove(struct pci_dev *pdev) pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); - - pci_release_regions(pdev); } static struct pci_driver otx2vf_driver = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c new file mode 100644 index 000000000000..7d67b4cbaf71 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/bpf_trace.h> +#include <linux/stringify.h> +#include <net/xdp_sock_drv.h> +#include <net/xdp.h> + +#include "otx2_common.h" +#include "otx2_struct.h" +#include "otx2_xsk.h" + +int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, + dma_addr_t *dma, int idx) +{ + struct xdp_buff *xdp; + int delta; + + xdp = xsk_buff_alloc(pool->xsk_pool); + if (!xdp) + return -ENOMEM; + + pool->xdp[pool->xdp_top++] = xdp; + *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp)); + /* Adjust xdp->data for unaligned addresses */ + delta = *dma - xsk_buff_xdp_get_dma(xdp); + xdp->data += delta; + + return 0; +} + +static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id) +{ + struct nix_cn10k_aq_enq_req *cn10k_rq_aq; + struct npa_aq_enq_req *aura_aq; + struct npa_aq_enq_req *pool_aq; + struct nix_aq_enq_req *rq_aq; + + if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); + if (!cn10k_rq_aq) + return -ENOMEM; + cn10k_rq_aq->qidx = qidx; + cn10k_rq_aq->rq.ena = 0; + cn10k_rq_aq->rq_mask.ena = 1; + cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ; + cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE; + } else { + rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!rq_aq) + return -ENOMEM; + rq_aq->qidx = qidx; + rq_aq->sq.ena = 0; + rq_aq->sq_mask.ena = 1; + rq_aq->ctype = NIX_AQ_CTYPE_RQ; + rq_aq->op = NIX_AQ_INSTOP_WRITE; + } + + aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aura_aq) + goto fail; + + aura_aq->aura_id = aura_id; + aura_aq->aura.ena = 0; + aura_aq->aura_mask.ena = 1; + aura_aq->ctype = NPA_AQ_CTYPE_AURA; + aura_aq->op = NPA_AQ_INSTOP_WRITE; + + pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!pool_aq) + goto fail; + + pool_aq->aura_id = aura_id; + pool_aq->pool.ena = 0; + pool_aq->pool_mask.ena = 1; + + pool_aq->ctype = NPA_AQ_CTYPE_POOL; + pool_aq->op = NPA_AQ_INSTOP_WRITE; + + return otx2_sync_mbox_msg(&pfvf->mbox); + +fail: + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + return -ENOMEM; +} + +static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_cq_queue *cq; + struct otx2_pool *pool; + u64 iova; + + /* If the DOWN flag is set SQs are already freed */ + if (pfvf->flags & OTX2_FLAG_INTF_DOWN) + return; + + cq = &qset->cq[qidx]; + if (cq) + otx2_cleanup_rx_cqes(pfvf, cq, qidx); + + pool = &pfvf->qset.pool[qidx]; + iova = otx2_aura_allocptr(pfvf, qidx); + while (iova) { + iova -= OTX2_HEAD_ROOM; + otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); + iova = otx2_aura_allocptr(pfvf, qidx); + } + + mutex_lock(&pfvf->mbox.lock); + otx2_xsk_ctx_disable(pfvf, qidx, qidx); + mutex_unlock(&pfvf->mbox.lock); +} + +int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx) +{ + u16 rx_queues = pf->hw.rx_queues; + u16 tx_queues = pf->hw.tx_queues; + int err; + + if (qidx >= rx_queues || qidx >= tx_queues) + return -EINVAL; + + err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + if (err) + return err; + + set_bit(qidx, pf->af_xdp_zc_qidx); + otx2_clean_up_rq(pf, qidx); + /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */ + otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL); + /* Kick start the NAPI context so that receiving will start */ + return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX); +} + +int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx) +{ + struct net_device *netdev = pf->netdev; + struct xsk_buff_pool *pool; + struct otx2_snd_queue *sq; + + pool = xsk_get_pool_from_qid(netdev, qidx); + if (!pool) + return -EINVAL; + + sq = &pf->qset.sq[qidx + pf->hw.tx_queues]; + sq->xsk_pool = NULL; + otx2_clean_up_rq(pf, qidx); + clear_bit(qidx, pf->af_xdp_zc_qidx); + xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */ + otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL); + + return 0; +} + +int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx) +{ + if (pool) + return otx2_xsk_pool_enable(pf, pool, qidx); + + return otx2_xsk_pool_disable(pf, qidx); +} + +int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct otx2_nic *pf = netdev_priv(dev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return -ENETDOWN; + + if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues) + return -EINVAL; + + cq_poll = &qset->napi[queue_id]; + if (!cq_poll) + return -EINVAL; + + /* Trigger interrupt */ + if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) { + otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0)); + otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0)); + } + + return 0; +} + +void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx) +{ + if (test_bit(qidx, pfvf->af_xdp_zc_qidx)) + sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); +} + +static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, + u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset; + + sq = &pfvf->qset.sq[qidx]; + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); +} + +void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, + int queue, int budget) +{ + struct xdp_desc *xdp_desc = pool->tx_descs; + int i, batch; + + budget = min(budget, otx2_read_free_sqe(pfvf, queue)); + batch = xsk_tx_peek_release_desc_batch(pool, budget); + if (!batch) + return; + + for (i = 0; i < batch; i++) { + dma_addr_t dma_addr; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr); + otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue); + } +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h new file mode 100644 index 000000000000..8047fafee8fe --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU PF/VF Netdev Devlink + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef OTX2_XSK_H +#define OTX2_XSK_H + +struct otx2_nic; +struct xsk_buff_pool; + +int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid); +int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid); +int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid); +int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, + dma_addr_t *dma, int idx); +int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, + int queue, int budget); +void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx); + +#endif /* OTX2_XSK_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 0f844c14485a..5765bac119f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -165,6 +165,11 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL2) { + /* configure parent txschq */ + cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq); + cfg->regval[num_regs] = (u64)hw->tx_link << 16; + num_regs++; + /* configure link cfg */ if (level == pfvf->qos.link_cfg_lvl) { cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); @@ -1633,6 +1638,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force if (!node->is_static) dwrr_del_node = true; + WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); /* destroy the leaf node */ otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); @@ -1677,9 +1683,6 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force } kfree(new_cfg); - /* update tx_real_queues */ - otx2_qos_update_tx_netdev_queues(pfvf); - return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index 9d887bfc3108..2872adabc830 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) } for (ptr = 0; ptr < num_sqbs; ptr++) { - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); if (err) goto sqb_free; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); @@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx) static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx) { int sqe_tail, sqe_head; - u64 incr, *ptr, val; + void __iomem *ptr; + u64 incr, val; - ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); + ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); incr = (u64)qidx << 32; val = otx2_atomic64_add(incr, ptr); sqe_head = (val >> 20) & 0x3F; @@ -256,6 +257,26 @@ out: return err; } +static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf) +{ + struct ndc_sync_op *req; + int rc; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->nix_lf_tx_sync = true; + req->npa_lf_sync = true; + rc = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return rc; +} + void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) { struct otx2_qset *qset = &pfvf->qset; @@ -285,6 +306,8 @@ void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) otx2_qos_sqb_flush(pfvf, sq_idx); otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); + /* NIX/NPA NDC sync */ + otx2_qos_nix_npa_ndc_sync(pfvf); otx2_cleanup_tx_cqes(pfvf, cq); mutex_lock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c index 232b10740c13..25af98034e2e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -67,6 +67,8 @@ static int rvu_rep_mcam_flow_init(struct rep_dev *rep) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; @@ -242,10 +244,10 @@ static int rvu_rep_devlink_port_register(struct rep_dev *rep) if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = rvu_get_pf(rep->pcifunc); + attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc); } else { attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; - attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); + attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc); attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; } @@ -670,7 +672,8 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) rep->pcifunc = pcifunc; snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", - rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); + rvu_get_pf(priv->pdev, pcifunc), + (pcifunc & RVU_PFVF_FUNC_MASK)); ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | @@ -680,14 +683,17 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) ndev->features |= ndev->hw_features; eth_hw_addr_random(ndev); err = rvu_rep_devlink_port_register(rep); - if (err) + if (err) { + free_netdev(ndev); goto exit; + } SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); err = register_netdev(ndev); if (err) { NL_SET_ERR_MSG_MOD(extack, "PFVF representor registration failed"); + rvu_rep_devlink_port_unregister(rep); free_netdev(ndev); goto exit; } @@ -760,7 +766,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_regions(pdev, DRV_NAME); + err = pcim_request_all_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); return err; @@ -769,7 +775,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "DMA mask config failed, abort\n"); - goto err_release_regions; + goto err_set_drv_data; } pci_set_master(pdev); @@ -777,7 +783,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; - goto err_release_regions; + goto err_set_drv_data; } pci_set_drvdata(pdev, priv); @@ -794,7 +800,7 @@ static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = otx2_init_rsrc(pdev, priv); if (err) - goto err_release_regions; + goto err_set_drv_data; priv->iommu_domain = iommu_get_domain_for_dev(dev); @@ -817,9 +823,8 @@ err_detach_rsrc: otx2_disable_mbox_intr(priv); otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(pdev); -err_release_regions: +err_set_drv_data: pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); return err; } @@ -839,7 +844,6 @@ static void rvu_rep_remove(struct pci_dev *pdev) otx2_pfaf_mbox_destroy(priv); pci_free_irq_vectors(priv->pdev); pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); } static struct pci_driver rvu_rep_driver = { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c index 4cd53a2dae46..634f4543c1d7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_counter.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -336,8 +336,7 @@ prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) static void prestera_counter_stats_work(struct work_struct *work) { - struct delayed_work *dl_work = - container_of(work, struct delayed_work, work); + struct delayed_work *dl_work = to_delayed_work(work); struct prestera_counter *counter = container_of(dl_work, struct prestera_counter, stats_dw); struct prestera_counter_block *block; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 22ca6ee9665e..71ffb55d1fc4 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config, } static void prestera_pcs_get_state(struct phylink_pcs *pcs, + unsigned int neg_mode, struct phylink_link_state *state) { struct prestera_port *port = container_of(pcs, struct prestera_port, @@ -395,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port) continue; port->phylink_pcs.ops = &prestera_pcs_ops; - port->phylink_pcs.neg_mode = true; port->phy_config.dev = &port->dev->dev; port->phy_config.type = PHYLINK_NETDEV; @@ -634,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) goto err_dl_port_register; dev->features |= NETIF_F_HW_TC; - dev->netns_local = true; + dev->netns_immutable = true; dev->netdev_ops = &prestera_netdev_ops; dev->ethtool_ops = &prestera_ethtool_ops; SET_NETDEV_DEV(dev, sw->dev->dev); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index 35857dc19542..c45d108b2f6d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -845,9 +845,9 @@ static int prestera_pci_probe(struct pci_dev *pdev, goto err_pci_enable_device; } - err = pci_request_regions(pdev, driver_name); + err = pcim_request_all_regions(pdev, driver_name); if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); + dev_err(&pdev->dev, "pcim_request_all_regions failed\n"); goto err_pci_request_regions; } @@ -938,7 +938,6 @@ err_pci_dev_alloc: err_pp_ioremap: err_mem_ioremap: err_dma_mask: - pci_release_regions(pdev); err_pci_request_regions: err_pci_enable_device: return err; @@ -953,7 +952,6 @@ static void prestera_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdev); destroy_workqueue(fw->wq); prestera_fw_uninit(fw); - pci_release_regions(pdev); } static const struct pci_device_id prestera_pci_devices[] = { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 2bf426cea6dd..68f8a1e36aa6 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -353,7 +353,7 @@ static void rxq_refill(struct net_device *dev) static inline void rxq_refill_timer_wrapper(struct timer_list *t) { - struct pxa168_eth_private *pep = from_timer(pep, t, timeout); + struct pxa168_eth_private *pep = timer_container_of(pep, t, timeout); napi_schedule(&pep->napi); } @@ -1175,7 +1175,7 @@ static int pxa168_eth_stop(struct net_device *dev) /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); napi_disable(&pep->napi); - del_timer_sync(&pep->timeout); + timer_delete_sync(&pep->timeout); netif_carrier_off(dev); free_irq(dev->irq, dev); rxq_deinit(dev); @@ -1229,9 +1229,9 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget) int work_done = 0; /* - * We call txq_reclaim every time since in NAPI interupts are disabled - * and due to this we miss the TX_DONE interrupt,which is not updated in - * interrupt status register. + * We call txq_reclaim every time since in NAPI interrupts are disabled + * and due to this we miss the TX_DONE interrupt, which is not updated + * in interrupt status register. */ txq_reclaim(dev, 0); if (netif_queue_stopped(dev) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 25bf6ec44289..05349a0b2db1 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -1494,7 +1494,7 @@ static int xm_check_link(struct net_device *dev) */ static void xm_link_timer(struct timer_list *t) { - struct skge_port *skge = from_timer(skge, t, link_timer); + struct skge_port *skge = timer_container_of(skge, t, link_timer); struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; int port = skge->port; @@ -2662,7 +2662,7 @@ static int skge_down(struct net_device *dev) netif_tx_disable(dev); if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) - del_timer_sync(&skge->link_timer); + timer_delete_sync(&skge->link_timer); napi_disable(&skge->napi); netif_carrier_off(dev); @@ -3742,10 +3742,7 @@ static int skge_device_event(struct notifier_block *unused, skge = netdev_priv(dev); switch (event) { case NETDEV_CHANGENAME: - if (skge->debugfs) - skge->debugfs = debugfs_rename(skge_debug, - skge->debugfs, - skge_debug, dev->name); + debugfs_change_name(skge->debugfs, "%s", dev->name); break; case NETDEV_GOING_DOWN: diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 3914cd9210d4..3831f533b9db 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */ @@ -2960,7 +2961,7 @@ static int sky2_rx_hung(struct net_device *dev) static void sky2_watchdog(struct timer_list *t) { - struct sky2_hw *hw = from_timer(hw, t, watchdog_timer); + struct sky2_hw *hw = timer_container_of(hw, t, watchdog_timer); /* Check for lost IRQ once a second */ if (sky2_read32(hw, B0_ISRC)) { @@ -4493,10 +4494,7 @@ static int sky2_device_event(struct notifier_block *unused, switch (event) { case NETDEV_CHANGENAME: - if (sky2->debugfs) { - sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, - sky2_debug, dev->name); - } + debugfs_change_name(sky2->debugfs, "%s", dev->name); break; case NETDEV_GOING_DOWN: @@ -5054,7 +5052,7 @@ static int sky2_suspend(struct device *dev) if (!hw) return 0; - del_timer_sync(&hw->watchdog_timer); + timer_delete_sync(&hw->watchdog_timer); cancel_work_sync(&hw->restart_work); rtnl_lock(); |