diff options
Diffstat (limited to 'drivers/net/ethernet/marvell')
31 files changed, 2941 insertions, 996 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index bb14fa2241a3..105247582684 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1638,7 +1638,9 @@ static int mv643xx_eth_set_coalesce(struct net_device *dev, } static void -mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er, + struct kernel_ethtool_ringparam *kernel_er, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -1650,7 +1652,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) } static int -mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er, + struct kernel_ethtool_ringparam *kernel_er, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -3197,7 +3201,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->hw_features = dev->features; dev->priv_flags |= IFF_UNICAST_FLT; - dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; + netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS); /* MTU range: 64 - 9500 */ dev->min_mtu = 64; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5a7bdca22a63..83c8908f0cc7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -38,6 +38,7 @@ #include <net/ipv6.h> #include <net/tso.h> #include <net/page_pool.h> +#include <net/pkt_cls.h> #include <linux/bpf_trace.h> /* Registers */ @@ -247,12 +248,39 @@ #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_PORT_TX_RESET 0x3cf0 #define MVNETA_PORT_TX_DMA_RESET BIT(0) +#define MVNETA_TXQ_CMD1_REG 0x3e00 +#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) +#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) +#define MVNETA_REFILL_NUM_CLK_REG 0x3e08 +#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff #define MVNETA_TX_MTU 0x3e0c #define MVNETA_TX_TOKEN_SIZE 0x3e14 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff +#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) +#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 +#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 +#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff +/* The values of the bucket refill base period and refill period are taken from + * the reference manual, and adds up to a base resolution of 10Kbps. This allows + * to cover all rate-limit values from 10Kbps up to 5Gbps + */ + +/* Base period for the rate limit algorithm */ +#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 + +/* Number of Base Period to wait between each bucket refill */ +#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 + +/* The base resolution for rate limiting, in bps. Any max_rate value should be + * a multiple of that value. + */ +#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ + (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ + MVNETA_TXQ_BUCKET_REFILL_PERIOD)) + #define MVNETA_LPI_CTRL_0 0x2cc0 #define MVNETA_LPI_CTRL_1 0x2cc4 #define MVNETA_LPI_REQUEST_ENABLE BIT(0) @@ -492,13 +520,13 @@ struct mvneta_port { u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; - u8 prio_tc_map[8]; phy_interface_t phy_interface; struct device_node *dn; unsigned int tx_csum_limit; struct phylink *phylink; struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; struct phy *comphy; struct mvneta_bm *bm_priv; @@ -2212,7 +2240,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(pp->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(pp->dev, prog, act); @@ -3819,58 +3847,31 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) return 0; } -static void mvneta_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) +static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + return container_of(pcs, struct mvneta_port, phylink_pcs); +} +static int mvneta_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) +{ /* We only support QSGMII, SGMII, 802.3z and RGMII modes. * When in 802.3z mode, we must have AN enabled: * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... * When <PortType> = 1 (1000BASE-X) this field must be set to 1." */ if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) { - linkmode_zero(supported); - return; - } - - /* Allow all the expected bits */ - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - - /* Asymmetric pause is unsupported */ - phylink_set(mask, Pause); - - /* Half-duplex at speeds higher than 100Mbit is unsupported */ - if (state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - - if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } - - if (!phy_interface_mode_is_8023z(state->interface)) { - /* 10M and 100M are only supported in non-802.3z mode */ - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - } + !phylink_test(state->advertising, Autoneg)) + return -EINVAL; - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + return 0; } -static void mvneta_mac_pcs_get_state(struct phylink_config *config, - struct phylink_link_state *state) +static void mvneta_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - struct net_device *ndev = to_net_dev(config->dev); - struct mvneta_port *pp = netdev_priv(ndev); + struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 gmac_stat; gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); @@ -3888,17 +3889,71 @@ static void mvneta_mac_pcs_get_state(struct phylink_config *config, state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); - state->pause = 0; if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) state->pause |= MLO_PAUSE_RX; if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) state->pause |= MLO_PAUSE_TX; } -static void mvneta_mac_an_restart(struct phylink_config *config) +static int mvneta_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - struct net_device *ndev = to_net_dev(config->dev); - struct mvneta_port *pp = netdev_priv(ndev); + struct mvneta_port *pp = mvneta_pcs_to_port(pcs); + u32 mask, val, an, old_an, changed; + + mask = MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_INBAND_RESTART_AN | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_FLOW_CTRL_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + + if (phylink_autoneg_inband(mode)) { + mask |= MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + val = MVNETA_GMAC_INBAND_AN_ENABLE; + + if (interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the speed and duplex from PHY */ + val |= MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z mode has fixed speed and duplex */ + val |= MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + /* The FLOW_CTRL_EN bit selects either the hardware + * automatically or the CONFIG_FLOW_CTRL manually + * controls the GMAC pause mode. + */ + if (permit_pause_to_mac) + val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; + + /* Update the advertisement bits */ + mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + if (phylink_test(advertising, Pause)) + val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + } + } else { + /* Phy or fixed speed - disable in-band AN modes */ + val = 0; + } + + old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + an = (an & ~mask) | val; + changed = old_an ^ an; + if (changed) + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an); + + /* We are only interested in the advertisement bits changing */ + return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); +} + +static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) +{ + struct mvneta_port *pp = mvneta_pcs_to_port(pcs); u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, @@ -3907,6 +3962,47 @@ static void mvneta_mac_an_restart(struct phylink_config *config) gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); } +static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { + .pcs_validate = mvneta_pcs_validate, + .pcs_get_state = mvneta_pcs_get_state, + .pcs_config = mvneta_pcs_config, + .pcs_an_restart = mvneta_pcs_an_restart, +}; + +static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; + + if (pp->phy_interface != interface || + phylink_autoneg_inband(mode)) { + /* Force the link down when changing the interface or if in + * in-band mode. According to Armada 370 documentation, we + * can only change the port mode and in-band enable when the + * link is down. + */ + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + if (pp->phy_interface != interface) + WARN_ON(phy_power_off(pp->comphy)); + + /* Enable the 1ms clock */ + if (phylink_autoneg_inband(mode)) { + unsigned long rate = clk_get_rate(pp->clk); + + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, + MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); + } + + return 0; +} + static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -3915,20 +4011,11 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); - u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | MVNETA_GMAC2_PORT_RESET); new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); - new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; - new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_INBAND_RESTART_AN | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | - MVNETA_GMAC_AN_FLOW_CTRL_EN | - MVNETA_GMAC_AN_DUPLEX_EN); /* Even though it might look weird, when we're configured in * SGMII or QSGMII mode, the RGMII bit needs to be set. @@ -3940,9 +4027,6 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, phy_interface_mode_is_8023z(state->interface)) new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; - if (phylink_test(state->advertising, Pause)) - new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; - if (!phylink_autoneg_inband(mode)) { /* Phy or fixed speed - nothing to do, leave the * configured speed, duplex and flow control as-is. @@ -3950,66 +4034,23 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { /* SGMII mode receives the state from the PHY */ new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; - new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_CONFIG_MII_SPEED | - MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX)) | - MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN; } else { /* 802.3z negotiation - only 1000base-X */ new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; - new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_CONFIG_MII_SPEED)) | - MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_CONFIG_GMII_SPEED | - /* The MAC only supports FD mode */ - MVNETA_GMAC_CONFIG_FULL_DUPLEX; - - if (state->pause & MLO_PAUSE_AN && state->an_enabled) - new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; - } - - /* Armada 370 documentation says we can only change the port mode - * and in-band enable when the link is down, so force it down - * while making these changes. We also do this for GMAC_CTRL2 - */ - if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || - (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || - (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, - (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | - MVNETA_GMAC_FORCE_LINK_DOWN); } - /* When at 2.5G, the link partner can send frames with shortened * preambles. */ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; - if (pp->phy_interface != state->interface) { - if (pp->comphy) - WARN_ON(phy_power_off(pp->comphy)); - WARN_ON(mvneta_config_interface(pp, state->interface)); - } - if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); if (new_ctrl4 != gmac_ctrl4) mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); - if (new_clk != gmac_clk) - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); - if (new_an != gmac_an) - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & @@ -4018,6 +4059,36 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, } } +static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct mvneta_port *pp = netdev_priv(ndev); + u32 val, clk; + + /* Disable 1ms clock if not in in-band mode */ + if (!phylink_autoneg_inband(mode)) { + clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk); + } + + if (pp->phy_interface != interface) + /* Enable the Serdes PHY */ + WARN_ON(mvneta_config_interface(pp, interface)); + + /* Allow the link to come up if in in-band mode, otherwise the + * link is forced via mac_link_down()/mac_link_up() + */ + if (phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + return 0; +} + static void mvneta_set_eee(struct mvneta_port *pp, bool enable) { u32 lpi_ctl1; @@ -4104,10 +4175,10 @@ static void mvneta_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops mvneta_phylink_ops = { - .validate = mvneta_validate, - .mac_pcs_get_state = mvneta_mac_pcs_get_state, - .mac_an_restart = mvneta_mac_an_restart, + .validate = phylink_generic_validate, + .mac_prepare = mvneta_mac_prepare, .mac_config = mvneta_mac_config, + .mac_finish = mvneta_mac_finish, .mac_link_down = mvneta_mac_link_down, .mac_link_up = mvneta_mac_link_up, }; @@ -4539,8 +4610,11 @@ static void mvneta_ethtool_get_drvinfo(struct net_device *dev, } -static void mvneta_ethtool_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) +static void +mvneta_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(netdev); @@ -4550,8 +4624,11 @@ static void mvneta_ethtool_get_ringparam(struct net_device *netdev, ring->tx_pending = pp->tx_ring_size; } -static int mvneta_ethtool_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) +static int +mvneta_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); @@ -4919,43 +4996,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); } -static void mvneta_setup_rx_prio_map(struct mvneta_port *pp) +static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) { - u32 val = 0; - int i; + u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); - for (i = 0; i < rxq_number; i++) - val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]); + val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); + val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); } +static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) +{ + unsigned long core_clk_rate; + u32 refill_cycles; + u32 val; + + core_clk_rate = clk_get_rate(pp->clk); + if (!core_clk_rate) + return -EINVAL; + + refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / + (NSEC_PER_SEC / core_clk_rate); + + if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) + return -EINVAL; + + /* Enable bw limit algorithm version 3 */ + val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); + val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); + mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); + + /* Set the base refill rate */ + mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles); + + return 0; +} + +static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) +{ + u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); + + val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); + mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val); +} + +static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, + u64 min_rate, u64 max_rate) +{ + u32 refill_val, rem; + u32 val = 0; + + /* Convert to from Bps to bps */ + max_rate *= 8; + + if (min_rate) + return -EINVAL; + + refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, + &rem); + + if (rem || !refill_val || + refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) + return -EINVAL; + + val = refill_val; + val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << + MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); + + mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val); + + return 0; +} + static int mvneta_setup_mqprio(struct net_device *dev, - struct tc_mqprio_qopt *qopt) + struct tc_mqprio_qopt_offload *mqprio) { struct mvneta_port *pp = netdev_priv(dev); + int rxq, txq, tc, ret; u8 num_tc; - int i; - qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = qopt->num_tc; + if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) + return 0; + + num_tc = mqprio->qopt.num_tc; if (num_tc > rxq_number) return -EINVAL; + mvneta_clear_rx_prio_map(pp); + if (!num_tc) { - mvneta_clear_rx_prio_map(pp); + mvneta_disable_per_queue_rate_limit(pp); netdev_reset_tc(dev); return 0; } - memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map)); + netdev_set_num_tc(dev, mqprio->qopt.num_tc); + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { + netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc], + mqprio->qopt.offset[tc]); + + for (rxq = mqprio->qopt.offset[tc]; + rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; + rxq++) { + if (rxq >= rxq_number) + return -EINVAL; - mvneta_setup_rx_prio_map(pp); + mvneta_map_vlan_prio_to_rxq(pp, tc, rxq); + } + } - netdev_set_num_tc(dev, qopt->num_tc); - for (i = 0; i < qopt->num_tc; i++) - netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); + if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + mvneta_disable_per_queue_rate_limit(pp); + return 0; + } + + if (mqprio->qopt.num_tc > txq_number) + return -EINVAL; + + ret = mvneta_enable_per_queue_rate_limit(pp); + if (ret) + return ret; + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { + for (txq = mqprio->qopt.offset[tc]; + txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; + txq++) { + if (txq >= txq_number) + return -EINVAL; + + ret = mvneta_setup_queue_rates(pp, txq, + mqprio->min_rate[tc], + mqprio->max_rate[tc]); + if (ret) + return ret; + } + } return 0; } @@ -5166,6 +5344,9 @@ static int mvneta_probe(struct platform_device *pdev) pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; + pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | + MAC_100 | MAC_1000FD | MAC_2500FD; + phy_interface_set_rgmii(pp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_QSGMII, pp->phylink_config.supported_interfaces); @@ -5237,6 +5418,9 @@ static int mvneta_probe(struct platform_device *pdev) goto err_clk; } + pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; + phylink_set_pcs(phylink, &pp->phylink_pcs); + /* Alloc per-cpu port structure */ pp->ports = alloc_percpu(struct mvneta_pcpu_port); if (!pp->ports) { @@ -5355,7 +5539,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; + netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS); /* MTU range: 68 - 9676 */ dev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index cf8acabb90ac..ad73a488fc5f 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1239,7 +1239,8 @@ struct mvpp2_port { phy_interface_t phy_interface; struct phylink *phylink; struct phylink_config phylink_config; - struct phylink_pcs phylink_pcs; + struct phylink_pcs pcs_gmac; + struct phylink_pcs pcs_xlg; struct phy *comphy; struct mvpp2_bm_pool *pool_long; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 6da8a595026b..7cdbf8b8bbf6 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1488,6 +1488,7 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) static bool mvpp2_is_xlg(phy_interface_t interface) { return interface == PHY_INTERFACE_MODE_10GBASER || + interface == PHY_INTERFACE_MODE_5GBASER || interface == PHY_INTERFACE_MODE_XAUI; } @@ -1627,6 +1628,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) case PHY_INTERFACE_MODE_2500BASEX: mvpp22_gop_init_sgmii(port); break; + case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: if (!mvpp2_port_supports_xlg(port)) goto invalid_conf; @@ -2186,6 +2188,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); switch (interface) { + case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_10GBASER: val = readl(mpcs + MVPP22_MPCS_CLK_RESET); val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | @@ -3820,7 +3823,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, } break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(port->dev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(port->dev, prog, act); @@ -5139,9 +5142,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - if (config.flags) - return -EINVAL; - if (config.tx_type != HWTSTAMP_TX_OFF && config.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -5433,8 +5433,11 @@ static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } -static void mvpp2_ethtool_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) +static void +mvpp2_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); @@ -5444,8 +5447,11 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev, ring->tx_pending = port->tx_ring_size; } -static int mvpp2_ethtool_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) +static int +mvpp2_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); u16 prev_rx_ring_size = port->rx_ring_size; @@ -6109,18 +6115,26 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) return container_of(config, struct mvpp2_port, phylink_config); } -static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs) +static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) { - return container_of(pcs, struct mvpp2_port, phylink_pcs); + return container_of(pcs, struct mvpp2_port, pcs_xlg); +} + +static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mvpp2_port, pcs_gmac); } static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); u32 val; - state->speed = SPEED_10000; + if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) + state->speed = SPEED_5000; + else + state->speed = SPEED_10000; state->duplex = 1; state->an_complete = 1; @@ -6149,10 +6163,25 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { .pcs_config = mvpp2_xlg_pcs_config, }; +static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) +{ + /* When in 802.3z mode, we must have AN enabled: + * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + */ + if (phy_interface_mode_is_8023z(state->interface) && + !phylink_test(state->advertising, Autoneg)) + return -EINVAL; + + return 0; +} + static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 val; val = readl(port->base + MVPP2_GMAC_STATUS0); @@ -6189,7 +6218,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, const unsigned long *advertising, bool permit_pause_to_mac) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 mask, val, an, old_an, changed; mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | @@ -6243,7 +6272,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode, static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) { - struct mvpp2_port *port = mvpp2_pcs_to_port(pcs); + struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, @@ -6253,78 +6282,12 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { + .pcs_validate = mvpp2_gmac_pcs_validate, .pcs_get_state = mvpp2_gmac_pcs_get_state, .pcs_config = mvpp2_gmac_pcs_config, .pcs_an_restart = mvpp2_gmac_pcs_an_restart, }; -static void mvpp2_phylink_validate(struct phylink_config *config, - unsigned long *supported, - struct phylink_link_state *state) -{ - struct mvpp2_port *port = mvpp2_phylink_to_port(config); - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - - /* When in 802.3z mode, we must have AN enabled: - * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... - * When <PortType> = 1 (1000BASE-X) this field must be set to 1. - */ - if (phy_interface_mode_is_8023z(state->interface) && - !phylink_test(state->advertising, Autoneg)) - goto empty_set; - - phylink_set(mask, Autoneg); - phylink_set_port_modes(mask); - - if (port->priv->global_tx_fc) { - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - } - - switch (state->interface) { - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_XAUI: - if (mvpp2_port_supports_xlg(port)) { - phylink_set_10g_modes(mask); - phylink_set(mask, 10000baseKR_Full); - } - break; - - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_SGMII: - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - break; - - case PHY_INTERFACE_MODE_1000BASEX: - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - break; - - case PHY_INTERFACE_MODE_2500BASEX: - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - break; - - default: - goto empty_set; - } - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); - return; - -empty_set: - linkmode_zero(supported); -} - static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { @@ -6404,8 +6367,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); } -static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) +static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct mvpp2_port *port = mvpp2_phylink_to_port(config); + + /* Select the appropriate PCS operations depending on the + * configured interface mode. We will only switch to a mode + * that the validate() checks have already passed. + */ + if (mvpp2_is_xlg(interface)) + return &port->pcs_xlg; + else + return &port->pcs_gmac; +} + +static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) { struct mvpp2_port *port = mvpp2_phylink_to_port(config); @@ -6454,31 +6432,9 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode, } } - /* Select the appropriate PCS operations depending on the - * configured interface mode. We will only switch to a mode - * that the validate() checks have already passed. - */ - if (mvpp2_is_xlg(interface)) - port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops; - else - port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops; - return 0; } -static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, - phy_interface_t interface) -{ - struct mvpp2_port *port = mvpp2_phylink_to_port(config); - int ret; - - ret = mvpp2__mac_prepare(config, mode, interface); - if (ret == 0) - phylink_set_pcs(port->phylink, &port->phylink_pcs); - - return ret; -} - static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -6649,7 +6605,8 @@ static void mvpp2_mac_link_down(struct phylink_config *config, } static const struct phylink_mac_ops mvpp2_phylink_ops = { - .validate = mvpp2_phylink_validate, + .validate = phylink_generic_validate, + .mac_select_pcs = mvpp2_select_pcs, .mac_prepare = mvpp2_mac_prepare, .mac_config = mvpp2_mac_config, .mac_finish = mvpp2_mac_finish, @@ -6667,12 +6624,15 @@ static void mvpp2_acpi_start(struct mvpp2_port *port) struct phylink_link_state state = { .interface = port->phy_interface, }; - mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND, - port->phy_interface); + struct phylink_pcs *pcs; + + pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); + + mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, + port->phy_interface); mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); - port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND, - port->phy_interface, - state.advertising, false); + pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface, + state.advertising, false); mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, port->phy_interface); mvpp2_mac_link_up(&port->phylink_config, NULL, @@ -6901,7 +6861,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, mvpp2_set_hw_csum(port, port->pool_long->id); dev->vlan_features |= features; - dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS); dev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 68 - 9704 */ @@ -6913,12 +6873,44 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; port->phylink_config.type = PHYLINK_NETDEV; + port->phylink_config.mac_capabilities = + MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; + + if (port->priv->global_tx_fc) + port->phylink_config.mac_capabilities |= + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; if (mvpp2_port_supports_xlg(port)) { - __set_bit(PHY_INTERFACE_MODE_10GBASER, - port->phylink_config.supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_XAUI, - port->phylink_config.supported_interfaces); + /* If a COMPHY is present, we can support any of + * the serdes modes and switch between them. + */ + if (comphy) { + __set_bit(PHY_INTERFACE_MODE_5GBASER, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_10GBASER, + port->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_XAUI, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { + __set_bit(PHY_INTERFACE_MODE_5GBASER, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { + __set_bit(PHY_INTERFACE_MODE_10GBASER, + port->phylink_config.supported_interfaces); + } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { + __set_bit(PHY_INTERFACE_MODE_XAUI, + port->phylink_config.supported_interfaces); + } + + if (comphy) + port->phylink_config.mac_capabilities |= + MAC_10000FD | MAC_5000FD; + else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) + port->phylink_config.mac_capabilities |= + MAC_5000FD; + else + port->phylink_config.mac_capabilities |= + MAC_10000FD; } if (mvpp2_port_supports_rgmii(port)) @@ -6948,6 +6940,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->phylink_config.supported_interfaces); } + port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; + port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; + phylink = phylink_create(&port->phylink_config, port_fwnode, phy_mode, &mvpp2_phylink_ops); if (IS_ERR(phylink)) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index d6321de3cc17..e682b7bfde64 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -60,6 +60,8 @@ struct ptp *ptp_get(void) /* Check driver is bound to PTP block */ if (!ptp) ptp = ERR_PTR(-EPROBE_DEFER); + else + pci_dev_get(ptp->pdev); return ptp; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 45357deecabb..a73a8017e0ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -172,14 +172,13 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; - char irq_name[16]; int i, ret; for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { - snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i); + sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); ret = rvu_cpt_do_register_interrupt(block, off + i, rvu_cpt_af_flt_intr_handler, - irq_name); + &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 70bacd38a6d9..d0ab8f233a02 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -41,7 +41,7 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset, struct rvu_devlink *rvu_dl = rvu->rvu_dl; int rc; - sprintf(&rvu->irq_name[offset * NAME_SIZE], name); + sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name); rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, &rvu->irq_name[offset * NAME_SIZE], rvu_dl); if (rc) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 80d4ce61f442..d85db90632d6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev, } static void otx2_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_qset *qs = &pfvf->qset; @@ -372,7 +374,9 @@ static void otx2_get_ringparam(struct net_device *netdev, } static int otx2_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); bool if_up = netif_running(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 1e0d0c9c1dac..6080ebd9bd94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2002,10 +2002,6 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - switch (config.tx_type) { case HWTSTAMP_TX_OFF: otx2_config_hw_tx_tstamp(pfvf, false); @@ -2741,7 +2737,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; - netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 0cc6353254bf..7c4068c5d1ac 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1198,7 +1198,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, put_page(page); break; default: - bpf_warn_invalid_xdp_action(act); + bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); break; case XDP_ABORTED: trace_xdp_exception(pfvf->netdev, prog, act); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 78944ad3492f..925b74ebb8b0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -663,7 +663,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_RXALL; - netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; + netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; @@ -684,7 +684,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_detach_rsrc; + goto err_ptp_destroy; } err = otx2_wq_init(vf); @@ -709,6 +709,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); +err_ptp_destroy: + otx2_ptp_destroy(vf); err_detach_rsrc: if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); @@ -742,6 +744,7 @@ static void otx2vf_remove(struct pci_dev *pdev) unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + otx2_ptp_destroy(vf); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile index 0609df8b913d..d395f4131648 100644 --- a/drivers/net/ethernet/marvell/prestera/Makefile +++ b/drivers/net/ethernet/marvell/prestera/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PRESTERA) += prestera.o prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \ prestera_switchdev.o prestera_acl.o prestera_flow.o \ - prestera_flower.o prestera_span.o + prestera_flower.o prestera_span.o prestera_counter.o \ + prestera_router.o prestera_router_hw.o obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h index 2a4c14c704c0..a0a5a8e6bd8c 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera.h +++ b/drivers/net/ethernet/marvell/prestera/prestera.h @@ -225,6 +225,29 @@ struct prestera_event { }; }; +enum prestera_if_type { + /* the interface is of port type (dev,port) */ + PRESTERA_IF_PORT_E = 0, + + /* the interface is of lag type (lag-id) */ + PRESTERA_IF_LAG_E = 1, + + /* the interface is of Vid type (vlan-id) */ + PRESTERA_IF_VID_E = 3, +}; + +struct prestera_iface { + enum prestera_if_type type; + struct { + u32 hw_dev_num; + u32 port_num; + } dev_port; + u32 hw_dev_num; + u16 vr_id; + u16 lag_id; + u16 vlan_id; +}; + struct prestera_switchdev; struct prestera_span; struct prestera_rxtx; @@ -247,11 +270,22 @@ struct prestera_switch { u32 mtu_min; u32 mtu_max; u8 id; + struct prestera_router *router; struct prestera_lag *lags; + struct prestera_counter *counter; u8 lag_member_max; u8 lag_max; }; +struct prestera_router { + struct prestera_switch *sw; + struct list_head vr_list; + struct list_head rif_entry_list; + struct notifier_block inetaddr_nb; + struct notifier_block inetaddr_valid_nb; + bool aborted; +}; + struct prestera_rxtx_params { bool use_sdma; u32 map_addr; @@ -279,6 +313,9 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw, int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes); +int prestera_router_init(struct prestera_switch *sw); +void prestera_router_fini(struct prestera_switch *sw); + struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id); int prestera_port_cfg_mac_read(struct prestera_port *port, @@ -293,6 +330,8 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid); bool prestera_netdev_check(const struct net_device *dev); +int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr); + bool prestera_port_is_lag_member(const struct prestera_port *port); struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index 83c75ffb1a1c..f0d9f592173b 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -1,35 +1,72 @@ // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 -/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */ #include <linux/rhashtable.h> -#include "prestera.h" -#include "prestera_hw.h" #include "prestera_acl.h" -#include "prestera_span.h" +#include "prestera_flow.h" +#include "prestera_hw.h" +#include "prestera.h" + +#define ACL_KEYMASK_SIZE \ + (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) struct prestera_acl { struct prestera_switch *sw; + struct list_head vtcam_list; struct list_head rules; + struct rhashtable ruleset_ht; + struct rhashtable acl_rule_entry_ht; + struct idr uid; +}; + +struct prestera_acl_ruleset_ht_key { + struct prestera_flow_block *block; +}; + +struct prestera_acl_rule_entry { + struct rhash_head ht_node; + struct prestera_acl_rule_entry_key key; + u32 hw_id; + u32 vtcam_id; + struct { + struct { + u8 valid:1; + } accept, drop, trap; + struct { + u32 id; + struct prestera_counter_block *block; + } counter; + }; }; struct prestera_acl_ruleset { + struct rhash_head ht_node; /* Member of acl HT */ + struct prestera_acl_ruleset_ht_key ht_key; struct rhashtable rule_ht; - struct prestera_switch *sw; - u16 id; + struct prestera_acl *acl; + unsigned long rule_count; + refcount_t refcount; + void *keymask; + u32 vtcam_id; + u16 pcl_id; + bool offload; }; -struct prestera_acl_rule { - struct rhash_head ht_node; +struct prestera_acl_vtcam { struct list_head list; - struct list_head match_list; - struct list_head action_list; - struct prestera_flow_block *block; - unsigned long cookie; - u32 priority; - u8 n_actions; - u8 n_matches; + __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + refcount_t refcount; u32 id; + bool is_keymask_set; + u8 lookup; +}; + +static const struct rhashtable_params prestera_acl_ruleset_ht_params = { + .key_len = sizeof(struct prestera_acl_ruleset_ht_key), + .key_offset = offsetof(struct prestera_acl_ruleset, ht_key), + .head_offset = offsetof(struct prestera_acl_ruleset, ht_node), + .automatic_shrinking = true, }; static const struct rhashtable_params prestera_acl_rule_ht_params = { @@ -39,28 +76,48 @@ static const struct rhashtable_params prestera_acl_rule_ht_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = { + .key_offset = offsetof(struct prestera_acl_rule_entry, key), + .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node), + .key_len = sizeof(struct prestera_acl_rule_entry_key), + .automatic_shrinking = true, +}; + static struct prestera_acl_ruleset * -prestera_acl_ruleset_create(struct prestera_switch *sw) +prestera_acl_ruleset_create(struct prestera_acl *acl, + struct prestera_flow_block *block) { struct prestera_acl_ruleset *ruleset; + u32 uid = 0; int err; ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL); if (!ruleset) return ERR_PTR(-ENOMEM); + ruleset->acl = acl; + ruleset->ht_key.block = block; + refcount_set(&ruleset->refcount, 1); + err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params); if (err) goto err_rhashtable_init; - err = prestera_hw_acl_ruleset_create(sw, &ruleset->id); + err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL); if (err) goto err_ruleset_create; - ruleset->sw = sw; + /* make pcl-id based on uid */ + ruleset->pcl_id = (u8)uid; + err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, + prestera_acl_ruleset_ht_params); + if (err) + goto err_ruleset_ht_insert; return ruleset; +err_ruleset_ht_insert: + idr_remove(&acl->uid, uid); err_ruleset_create: rhashtable_destroy(&ruleset->rule_ht); err_rhashtable_init: @@ -68,117 +125,164 @@ err_rhashtable_init: return ERR_PTR(err); } -static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) +void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask) { - prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id); - rhashtable_destroy(&ruleset->rule_ht); - kfree(ruleset); + ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL); } -struct prestera_flow_block * -prestera_acl_block_create(struct prestera_switch *sw, struct net *net) +int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset) { - struct prestera_flow_block *block; + u32 vtcam_id; + int err; - block = kzalloc(sizeof(*block), GFP_KERNEL); - if (!block) - return NULL; - INIT_LIST_HEAD(&block->binding_list); - block->net = net; - block->sw = sw; - - block->ruleset = prestera_acl_ruleset_create(sw); - if (IS_ERR(block->ruleset)) { - kfree(block); - return NULL; - } + if (ruleset->offload) + return -EEXIST; + + err = prestera_acl_vtcam_id_get(ruleset->acl, 0, + ruleset->keymask, &vtcam_id); + if (err) + return err; - return block; + ruleset->vtcam_id = vtcam_id; + ruleset->offload = true; + return 0; } -void prestera_acl_block_destroy(struct prestera_flow_block *block) +static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset) { - prestera_acl_ruleset_destroy(block->ruleset); - WARN_ON(!list_empty(&block->binding_list)); - kfree(block); + struct prestera_acl *acl = ruleset->acl; + u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER; + + rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, + prestera_acl_ruleset_ht_params); + + if (ruleset->offload) + WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id)); + + idr_remove(&acl->uid, uid); + + rhashtable_destroy(&ruleset->rule_ht); + kfree(ruleset->keymask); + kfree(ruleset); } -static struct prestera_flow_block_binding * -prestera_acl_block_lookup(struct prestera_flow_block *block, - struct prestera_port *port) +static struct prestera_acl_ruleset * +__prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block) { - struct prestera_flow_block_binding *binding; + struct prestera_acl_ruleset_ht_key ht_key; - list_for_each_entry(binding, &block->binding_list, list) - if (binding->port == port) - return binding; - - return NULL; + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.block = block; + return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + prestera_acl_ruleset_ht_params); } -int prestera_acl_block_bind(struct prestera_flow_block *block, - struct prestera_port *port) +struct prestera_acl_ruleset * +prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block) { - struct prestera_flow_block_binding *binding; - int err; + struct prestera_acl_ruleset *ruleset; - if (WARN_ON(prestera_acl_block_lookup(block, port))) - return -EEXIST; + ruleset = __prestera_acl_ruleset_lookup(acl, block); + if (!ruleset) + return ERR_PTR(-ENOENT); - binding = kzalloc(sizeof(*binding), GFP_KERNEL); - if (!binding) - return -ENOMEM; - binding->span_id = PRESTERA_SPAN_INVALID_ID; - binding->port = port; + refcount_inc(&ruleset->refcount); + return ruleset; +} - err = prestera_hw_acl_port_bind(port, block->ruleset->id); - if (err) - goto err_rules_bind; +struct prestera_acl_ruleset * +prestera_acl_ruleset_get(struct prestera_acl *acl, + struct prestera_flow_block *block) +{ + struct prestera_acl_ruleset *ruleset; - list_add(&binding->list, &block->binding_list); - return 0; + ruleset = __prestera_acl_ruleset_lookup(acl, block); + if (ruleset) { + refcount_inc(&ruleset->refcount); + return ruleset; + } -err_rules_bind: - kfree(binding); - return err; + return prestera_acl_ruleset_create(acl, block); } -int prestera_acl_block_unbind(struct prestera_flow_block *block, - struct prestera_port *port) +void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset) { - struct prestera_flow_block_binding *binding; + if (!refcount_dec_and_test(&ruleset->refcount)) + return; - binding = prestera_acl_block_lookup(block, port); - if (!binding) - return -ENOENT; - - list_del(&binding->list); + prestera_acl_ruleset_destroy(ruleset); +} - prestera_hw_acl_port_unbind(port, block->ruleset->id); +int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port) +{ + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_PORT, + .port = port + }; - kfree(binding); - return 0; + return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id, + ruleset->pcl_id); } -struct prestera_acl_ruleset * -prestera_acl_block_ruleset_get(struct prestera_flow_block *block) +int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port) { - return block->ruleset; + struct prestera_acl_iface iface = { + .type = PRESTERA_ACL_IFACE_TYPE_PORT, + .port = port + }; + + return prestera_hw_vtcam_iface_unbind(port->sw, &iface, + ruleset->vtcam_id); } -u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule) +static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_flow_block *block) { - return rule->block->ruleset->id; + struct prestera_flow_block_binding *binding; + int err; + + block->ruleset_zero = ruleset; + list_for_each_entry(binding, &block->binding_list, list) { + err = prestera_acl_ruleset_bind(ruleset, binding->port); + if (err) + goto rollback; + } + return 0; + +rollback: + list_for_each_entry_continue_reverse(binding, &block->binding_list, + list) + err = prestera_acl_ruleset_unbind(ruleset, binding->port); + block->ruleset_zero = NULL; + + return err; } -struct net *prestera_acl_block_net(struct prestera_flow_block *block) +static void +prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_flow_block *block) { - return block->net; + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + prestera_acl_ruleset_unbind(ruleset, binding->port); + block->ruleset_zero = NULL; } -struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block) +void +prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id) { - return block->sw; + struct prestera_acl_match *r_match = &rule->re_key.match; + __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID); + __be16 pcl_id_key = htons(pcl_id); + + rule_match_set(r_match->key, PCL_ID, pcl_id_key); + rule_match_set(r_match->mask, PCL_ID, pcl_id_mask); } struct prestera_acl_rule * @@ -189,8 +293,13 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, prestera_acl_rule_ht_params); } +bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset) +{ + return ruleset->offload; +} + struct prestera_acl_rule * -prestera_acl_rule_create(struct prestera_flow_block *block, +prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, unsigned long cookie) { struct prestera_acl_rule *rule; @@ -199,178 +308,436 @@ prestera_acl_rule_create(struct prestera_flow_block *block, if (!rule) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&rule->match_list); - INIT_LIST_HEAD(&rule->action_list); + rule->ruleset = ruleset; rule->cookie = cookie; - rule->block = block; + + refcount_inc(&ruleset->refcount); return rule; } -struct list_head * -prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule) +void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, + u32 priority) { - return &rule->match_list; + rule->priority = priority; } -struct list_head * -prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule) +void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) { - return &rule->action_list; + prestera_acl_ruleset_put(rule->ruleset); + kfree(rule); } -int prestera_acl_rule_action_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_action_entry *entry) +int prestera_acl_rule_add(struct prestera_switch *sw, + struct prestera_acl_rule *rule) { - struct prestera_acl_rule_action_entry *a_entry; + int err; + struct prestera_acl_ruleset *ruleset = rule->ruleset; + struct prestera_flow_block *block = ruleset->ht_key.block; - a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL); - if (!a_entry) - return -ENOMEM; + /* try to add rule to hash table first */ + err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + if (err) + goto err_ht_insert; - memcpy(a_entry, entry, sizeof(*entry)); - list_add(&a_entry->list, &rule->action_list); + prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id); + rule->re_arg.vtcam_id = ruleset->vtcam_id; + rule->re_key.prio = rule->priority; - rule->n_actions++; + /* setup counter */ + rule->re_arg.count.valid = true; + rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0; + + rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); + err = WARN_ON(rule->re) ? -EEXIST : 0; + if (err) + goto err_rule_add; + + rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key, + &rule->re_arg); + err = !rule->re ? -EINVAL : 0; + if (err) + goto err_rule_add; + + /* bind the block (all ports) to chain index 0 */ + if (!ruleset->rule_count) { + err = prestera_acl_ruleset_block_bind(ruleset, block); + if (err) + goto err_acl_block_bind; + } + + list_add_tail(&rule->list, &sw->acl->rules); + ruleset->rule_count++; return 0; + +err_acl_block_bind: + prestera_acl_rule_entry_destroy(sw->acl, rule->re); +err_rule_add: + rule->re = NULL; + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); +err_ht_insert: + return err; } -u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule) +void prestera_acl_rule_del(struct prestera_switch *sw, + struct prestera_acl_rule *rule) { - return rule->n_actions; + struct prestera_acl_ruleset *ruleset = rule->ruleset; + struct prestera_flow_block *block = ruleset->ht_key.block; + + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + prestera_acl_rule_ht_params); + ruleset->rule_count--; + list_del(&rule->list); + + prestera_acl_rule_entry_destroy(sw->acl, rule->re); + + /* unbind block (all ports) */ + if (!ruleset->rule_count) + prestera_acl_ruleset_block_unbind(ruleset, block); } -u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule) +int prestera_acl_rule_get_stats(struct prestera_acl *acl, + struct prestera_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use) { - return rule->priority; + u64 current_packets; + u64 current_bytes; + int err; + + err = prestera_counter_stats_get(acl->sw->counter, + rule->re->counter.block, + rule->re->counter.id, + ¤t_packets, ¤t_bytes); + if (err) + return err; + + *packets = current_packets; + *bytes = current_bytes; + *last_use = jiffies; + + return 0; } -void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, - u32 priority) +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_find(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key) { - rule->priority = priority; + return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key, + __prestera_acl_rule_entry_ht_params); } -int prestera_acl_rule_match_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_match_entry *entry) +static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) { - struct prestera_acl_rule_match_entry *m_entry; + return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id); +} - m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL); - if (!m_entry) - return -ENOMEM; +static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) +{ + struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX]; + int act_num; - memcpy(m_entry, entry, sizeof(*entry)); - list_add(&m_entry->list, &rule->match_list); + memset(&act_hw, 0, sizeof(act_hw)); + act_num = 0; - rule->n_matches++; - return 0; + /* accept */ + if (e->accept.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT; + act_num++; + } + /* drop */ + if (e->drop.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP; + act_num++; + } + /* trap */ + if (e->trap.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; + act_num++; + } + /* counter */ + if (e->counter.block) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT; + act_hw[act_num].count.id = e->counter.id; + act_num++; + } + + return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio, + e->key.match.key, e->key.match.mask, + act_hw, act_num, &e->hw_id); } -u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule) +static void +__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e) { - return rule->n_matches; + /* counter */ + prestera_counter_put(sw->counter, e->counter.block, e->counter.id); } -void prestera_acl_rule_destroy(struct prestera_acl_rule *rule) +void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, + struct prestera_acl_rule_entry *e) { - struct prestera_acl_rule_action_entry *a_entry; - struct prestera_acl_rule_match_entry *m_entry; - struct list_head *pos, *n; + int ret; - list_for_each_safe(pos, n, &rule->match_list) { - m_entry = list_entry(pos, typeof(*m_entry), list); - list_del(pos); - kfree(m_entry); - } + rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node, + __prestera_acl_rule_entry_ht_params); + + ret = __prestera_acl_rule_entry2hw_del(acl->sw, e); + WARN_ON(ret && ret != -ENODEV); - list_for_each_safe(pos, n, &rule->action_list) { - a_entry = list_entry(pos, typeof(*a_entry), list); - list_del(pos); - kfree(a_entry); + __prestera_acl_rule_entry_act_destruct(acl->sw, e); + kfree(e); +} + +static int +__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, + struct prestera_acl_rule_entry *e, + struct prestera_acl_rule_entry_arg *arg) +{ + /* accept */ + e->accept.valid = arg->accept.valid; + /* drop */ + e->drop.valid = arg->drop.valid; + /* trap */ + e->trap.valid = arg->trap.valid; + /* counter */ + if (arg->count.valid) { + int err; + + err = prestera_counter_get(sw->counter, arg->count.client, + &e->counter.block, + &e->counter.id); + if (err) + goto err_out; } - kfree(rule); + return 0; + +err_out: + __prestera_acl_rule_entry_act_destruct(sw, e); + return -EINVAL; } -int prestera_acl_rule_add(struct prestera_switch *sw, - struct prestera_acl_rule *rule) +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_create(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key, + struct prestera_acl_rule_entry_arg *arg) { - u32 rule_id; + struct prestera_acl_rule_entry *e; int err; - /* try to add rule to hash table first */ - err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht, - &rule->ht_node, - prestera_acl_rule_ht_params); - if (err) - return err; + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto err_kzalloc; - /* add rule to hw */ - err = prestera_hw_acl_rule_add(sw, rule, &rule_id); + memcpy(&e->key, key, sizeof(*key)); + e->vtcam_id = arg->vtcam_id; + err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg); if (err) - goto err_rule_add; + goto err_act_construct; - rule->id = rule_id; + err = __prestera_acl_rule_entry2hw_add(acl->sw, e); + if (err) + goto err_hw_add; - list_add_tail(&rule->list, &sw->acl->rules); + err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node, + __prestera_acl_rule_entry_ht_params); + if (err) + goto err_ht_insert; - return 0; + return e; -err_rule_add: - rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node, - prestera_acl_rule_ht_params); - return err; +err_ht_insert: + WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e)); +err_hw_add: + __prestera_acl_rule_entry_act_destruct(acl->sw, e); +err_act_construct: + kfree(e); +err_kzalloc: + return NULL; } -void prestera_acl_rule_del(struct prestera_switch *sw, - struct prestera_acl_rule *rule) +static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id) { - rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node, - prestera_acl_rule_ht_params); - list_del(&rule->list); - prestera_hw_acl_rule_del(sw, rule->id); + struct prestera_acl_vtcam *vtcam; + int i; + + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (lookup != vtcam->lookup) + continue; + + if (!keymask && !vtcam->is_keymask_set) + goto vtcam_found; + + if (!(keymask && vtcam->is_keymask_set)) + continue; + + /* try to fit with vtcam keymask */ + for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) { + __be32 __keymask = ((__be32 *)keymask)[i]; + + if (!__keymask) + /* vtcam keymask in not interested */ + continue; + + if (__keymask & ~vtcam->keymask[i]) + /* keymask does not fit the vtcam keymask */ + break; + } + + if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX) + /* keymask fits vtcam keymask, return it */ + goto vtcam_found; + } + + /* nothing is found */ + return -ENOENT; + +vtcam_found: + refcount_inc(&vtcam->refcount); + *vtcam_id = vtcam->id; + return 0; } -int prestera_acl_rule_get_stats(struct prestera_switch *sw, - struct prestera_acl_rule *rule, - u64 *packets, u64 *bytes, u64 *last_use) +int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id) { - u64 current_packets; - u64 current_bytes; + struct prestera_acl_vtcam *vtcam; + u32 new_vtcam_id; int err; - err = prestera_hw_acl_rule_stats_get(sw, rule->id, ¤t_packets, - ¤t_bytes); - if (err) - return err; + /* find the vtcam that suits keymask. We do not expect to have + * a big number of vtcams, so, the list type for vtcam list is + * fine for now + */ + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (lookup != vtcam->lookup) + continue; + + if (!keymask && !vtcam->is_keymask_set) { + refcount_inc(&vtcam->refcount); + goto vtcam_found; + } + + if (keymask && vtcam->is_keymask_set && + !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) { + refcount_inc(&vtcam->refcount); + goto vtcam_found; + } + } - *packets = current_packets; - *bytes = current_bytes; - *last_use = jiffies; + /* vtcam not found, try to create new one */ + vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL); + if (!vtcam) + return -ENOMEM; + + err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id, + PRESTERA_HW_VTCAM_DIR_INGRESS); + if (err) { + kfree(vtcam); + + /* cannot create new, try to fit into existing vtcam */ + if (__prestera_acl_vtcam_id_try_fit(acl, lookup, + keymask, &new_vtcam_id)) + return err; + *vtcam_id = new_vtcam_id; + return 0; + } + + vtcam->id = new_vtcam_id; + vtcam->lookup = lookup; + if (keymask) { + memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask)); + vtcam->is_keymask_set = true; + } + refcount_set(&vtcam->refcount, 1); + list_add_rcu(&vtcam->list, &acl->vtcam_list); + +vtcam_found: + *vtcam_id = vtcam->id; return 0; } +int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id) +{ + struct prestera_acl_vtcam *vtcam; + int err; + + list_for_each_entry(vtcam, &acl->vtcam_list, list) { + if (vtcam_id != vtcam->id) + continue; + + if (!refcount_dec_and_test(&vtcam->refcount)) + return 0; + + err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id); + if (err && err != -ENODEV) { + refcount_set(&vtcam->refcount, 1); + return err; + } + + list_del(&vtcam->list); + kfree(vtcam); + return 0; + } + + return -ENOENT; +} + int prestera_acl_init(struct prestera_switch *sw) { struct prestera_acl *acl; + int err; acl = kzalloc(sizeof(*acl), GFP_KERNEL); if (!acl) return -ENOMEM; + acl->sw = sw; INIT_LIST_HEAD(&acl->rules); + INIT_LIST_HEAD(&acl->vtcam_list); + idr_init(&acl->uid); + + err = rhashtable_init(&acl->acl_rule_entry_ht, + &__prestera_acl_rule_entry_ht_params); + if (err) + goto err_acl_rule_entry_ht_init; + + err = rhashtable_init(&acl->ruleset_ht, + &prestera_acl_ruleset_ht_params); + if (err) + goto err_ruleset_ht_init; + sw->acl = acl; - acl->sw = sw; return 0; + +err_ruleset_ht_init: + rhashtable_destroy(&acl->acl_rule_entry_ht); +err_acl_rule_entry_ht_init: + kfree(acl); + return err; } void prestera_acl_fini(struct prestera_switch *sw) { struct prestera_acl *acl = sw->acl; + WARN_ON(!idr_is_empty(&acl->uid)); + idr_destroy(&acl->uid); + + WARN_ON(!list_empty(&acl->vtcam_list)); WARN_ON(!list_empty(&acl->rules)); + + rhashtable_destroy(&acl->ruleset_ht); + rhashtable_destroy(&acl->acl_rule_entry_ht); + kfree(acl); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index 39b7869be659..40f6c1d961fa 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -1,114 +1,130 @@ /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ -/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */ #ifndef _PRESTERA_ACL_H_ #define _PRESTERA_ACL_H_ -enum prestera_acl_rule_match_entry_type { - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE, - PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE +#include <linux/types.h> +#include "prestera_counter.h" + +#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF +#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \ + (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF) + +#define rule_match_set_n(match_p, type, val_p, size) \ + memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ + val_p, size) +#define rule_match_set(match_p, type, val) \ + memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \ + &(val), sizeof(val)) + +enum prestera_acl_match_type { + PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0, + PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO, + PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT, + PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC, + PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST, + PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID, + PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID, + PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE, + PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE, + + __PRESTERA_ACL_RULE_MATCH_TYPE_MAX }; enum prestera_acl_rule_action { - PRESTERA_ACL_RULE_ACTION_ACCEPT, - PRESTERA_ACL_RULE_ACTION_DROP, - PRESTERA_ACL_RULE_ACTION_TRAP + PRESTERA_ACL_RULE_ACTION_ACCEPT = 0, + PRESTERA_ACL_RULE_ACTION_DROP = 1, + PRESTERA_ACL_RULE_ACTION_TRAP = 2, + PRESTERA_ACL_RULE_ACTION_COUNT = 7, + + PRESTERA_ACL_RULE_ACTION_MAX }; -struct prestera_switch; -struct prestera_port; -struct prestera_acl_rule; -struct prestera_acl_ruleset; +enum { + PRESTERA_ACL_IFACE_TYPE_PORT, + PRESTERA_ACL_IFACE_TYPE_INDEX +}; -struct prestera_flow_block_binding { - struct list_head list; - struct prestera_port *port; - int span_id; +struct prestera_acl_match { + __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; }; -struct prestera_flow_block { - struct list_head binding_list; - struct prestera_switch *sw; - struct net *net; - struct prestera_acl_ruleset *ruleset; - struct flow_block_cb *block_cb; +struct prestera_acl_action_count { + u32 id; }; -struct prestera_acl_rule_action_entry { - struct list_head list; - enum prestera_acl_rule_action id; +struct prestera_acl_rule_entry_key { + u32 prio; + struct prestera_acl_match match; }; -struct prestera_acl_rule_match_entry { - struct list_head list; - enum prestera_acl_rule_match_entry_type type; +struct prestera_acl_hw_action_info { + enum prestera_acl_rule_action id; union { + struct prestera_acl_action_count count; + }; +}; + +/* This struct (arg) used only to be passed as parameter for + * acl_rule_entry_create. Must be flat. Can contain object keys, which will be + * resolved to object links, before saving to acl_rule_entry struct + */ +struct prestera_acl_rule_entry_arg { + u32 vtcam_id; + struct { struct { - u8 key; - u8 mask; - } u8; - struct { - u16 key; - u16 mask; - } u16; - struct { - u32 key; - u32 mask; - } u32; - struct { - u64 key; - u64 mask; - } u64; + u8 valid:1; + } accept, drop, trap; struct { - u8 key[ETH_ALEN]; - u8 mask[ETH_ALEN]; - } mac; - } keymask; + u8 valid:1; + u32 client; + } count; + }; +}; + +struct prestera_acl_rule { + struct rhash_head ht_node; /* Member of acl HT */ + struct list_head list; + struct prestera_acl_ruleset *ruleset; + unsigned long cookie; + u32 priority; + struct prestera_acl_rule_entry_key re_key; + struct prestera_acl_rule_entry_arg re_arg; + struct prestera_acl_rule_entry *re; }; +struct prestera_acl_iface { + union { + struct prestera_port *port; + u32 index; + }; + u8 type; +}; + +struct prestera_acl; +struct prestera_switch; +struct prestera_flow_block; + int prestera_acl_init(struct prestera_switch *sw); void prestera_acl_fini(struct prestera_switch *sw); -struct prestera_flow_block * -prestera_acl_block_create(struct prestera_switch *sw, struct net *net); -void prestera_acl_block_destroy(struct prestera_flow_block *block); -struct net *prestera_acl_block_net(struct prestera_flow_block *block); -struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block); -int prestera_acl_block_bind(struct prestera_flow_block *block, - struct prestera_port *port); -int prestera_acl_block_unbind(struct prestera_flow_block *block, - struct prestera_port *port); -struct prestera_acl_ruleset * -prestera_acl_block_ruleset_get(struct prestera_flow_block *block); + struct prestera_acl_rule * -prestera_acl_rule_create(struct prestera_flow_block *block, +prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset, unsigned long cookie); -u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule); void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule, u32 priority); -u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule); -struct list_head * -prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule); -u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule); -u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule); -int prestera_acl_rule_action_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_action_entry *entry); -struct list_head * -prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule); -int prestera_acl_rule_match_add(struct prestera_acl_rule *rule, - struct prestera_acl_rule_match_entry *entry); void prestera_acl_rule_destroy(struct prestera_acl_rule *rule); struct prestera_acl_rule * prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset, @@ -117,8 +133,39 @@ int prestera_acl_rule_add(struct prestera_switch *sw, struct prestera_acl_rule *rule); void prestera_acl_rule_del(struct prestera_switch *sw, struct prestera_acl_rule *rule); -int prestera_acl_rule_get_stats(struct prestera_switch *sw, +int prestera_acl_rule_get_stats(struct prestera_acl *acl, struct prestera_acl_rule *rule, u64 *packets, u64 *bytes, u64 *last_use); +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_find(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key); +void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, + struct prestera_acl_rule_entry *e); +struct prestera_acl_rule_entry * +prestera_acl_rule_entry_create(struct prestera_acl *acl, + struct prestera_acl_rule_entry_key *key, + struct prestera_acl_rule_entry_arg *arg); +struct prestera_acl_ruleset * +prestera_acl_ruleset_get(struct prestera_acl *acl, + struct prestera_flow_block *block); +struct prestera_acl_ruleset * +prestera_acl_ruleset_lookup(struct prestera_acl *acl, + struct prestera_flow_block *block); +void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset, + void *keymask); +bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset); +int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset); +void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset); +int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port); +int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset, + struct prestera_port *port); +void +prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, + u16 pcl_id); + +int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, + void *keymask, u32 *vtcam_id); +int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id); #endif /* _PRESTERA_ACL_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c new file mode 100644 index 000000000000..4cd53a2dae46 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */ + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_acl.h" +#include "prestera_counter.h" + +#define COUNTER_POLL_TIME (msecs_to_jiffies(1000)) +#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50)) +#define COUNTER_BULK_SIZE (256) + +struct prestera_counter { + struct prestera_switch *sw; + struct delayed_work stats_dw; + struct mutex mtx; /* protect block_list */ + struct prestera_counter_block **block_list; + u32 total_read; + u32 block_list_len; + u32 curr_idx; + bool is_fetching; +}; + +struct prestera_counter_block { + struct list_head list; + u32 id; + u32 offset; + u32 num_counters; + u32 client; + struct idr counter_idr; + refcount_t refcnt; + struct mutex mtx; /* protect stats and counter_idr */ + struct prestera_counter_stats *stats; + u8 *counter_flag; + bool is_updating; + bool full; +}; + +enum { + COUNTER_FLAG_READY = 0, + COUNTER_FLAG_INVALID = 1 +}; + +static bool +prestera_counter_is_ready(struct prestera_counter_block *block, u32 id) +{ + return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY; +} + +static void prestera_counter_lock(struct prestera_counter *counter) +{ + mutex_lock(&counter->mtx); +} + +static void prestera_counter_unlock(struct prestera_counter *counter) +{ + mutex_unlock(&counter->mtx); +} + +static void prestera_counter_block_lock(struct prestera_counter_block *block) +{ + mutex_lock(&block->mtx); +} + +static void prestera_counter_block_unlock(struct prestera_counter_block *block) +{ + mutex_unlock(&block->mtx); +} + +static bool prestera_counter_block_incref(struct prestera_counter_block *block) +{ + return refcount_inc_not_zero(&block->refcnt); +} + +static bool prestera_counter_block_decref(struct prestera_counter_block *block) +{ + return refcount_dec_and_test(&block->refcnt); +} + +/* must be called with prestera_counter_block_lock() */ +static void prestera_counter_stats_clear(struct prestera_counter_block *block, + u32 counter_id) +{ + memset(&block->stats[counter_id - block->offset], 0, + sizeof(*block->stats)); +} + +static struct prestera_counter_block * +prestera_counter_block_lookup_not_full(struct prestera_counter *counter, + u32 client) +{ + u32 i; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i] && + counter->block_list[i]->client == client && + !counter->block_list[i]->full && + prestera_counter_block_incref(counter->block_list[i])) { + prestera_counter_unlock(counter); + return counter->block_list[i]; + } + } + prestera_counter_unlock(counter); + + return NULL; +} + +static int prestera_counter_block_list_add(struct prestera_counter *counter, + struct prestera_counter_block *block) +{ + struct prestera_counter_block **arr; + u32 i; + + prestera_counter_lock(counter); + + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i]) + continue; + + counter->block_list[i] = block; + prestera_counter_unlock(counter); + return 0; + } + + arr = krealloc(counter->block_list, (counter->block_list_len + 1) * + sizeof(*counter->block_list), GFP_KERNEL); + if (!arr) { + prestera_counter_unlock(counter); + return -ENOMEM; + } + + counter->block_list = arr; + counter->block_list[counter->block_list_len] = block; + counter->block_list_len++; + prestera_counter_unlock(counter); + return 0; +} + +static struct prestera_counter_block * +prestera_counter_block_get(struct prestera_counter *counter, u32 client) +{ + struct prestera_counter_block *block; + int err; + + block = prestera_counter_block_lookup_not_full(counter, client); + if (block) + return block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return ERR_PTR(-ENOMEM); + + err = prestera_hw_counter_block_get(counter->sw, client, + &block->id, &block->offset, + &block->num_counters); + if (err) + goto err_block; + + block->stats = kcalloc(block->num_counters, + sizeof(*block->stats), GFP_KERNEL); + if (!block->stats) { + err = -ENOMEM; + goto err_stats; + } + + block->counter_flag = kcalloc(block->num_counters, + sizeof(*block->counter_flag), + GFP_KERNEL); + if (!block->counter_flag) { + err = -ENOMEM; + goto err_flag; + } + + block->client = client; + mutex_init(&block->mtx); + refcount_set(&block->refcnt, 1); + idr_init_base(&block->counter_idr, block->offset); + + err = prestera_counter_block_list_add(counter, block); + if (err) + goto err_list_add; + + return block; + +err_list_add: + idr_destroy(&block->counter_idr); + mutex_destroy(&block->mtx); + kfree(block->counter_flag); +err_flag: + kfree(block->stats); +err_stats: + prestera_hw_counter_block_release(counter->sw, block->id); +err_block: + kfree(block); + return ERR_PTR(err); +} + +static void prestera_counter_block_put(struct prestera_counter *counter, + struct prestera_counter_block *block) +{ + u32 i; + + if (!prestera_counter_block_decref(block)) + return; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + if (counter->block_list[i] && + counter->block_list[i]->id == block->id) { + counter->block_list[i] = NULL; + break; + } + } + prestera_counter_unlock(counter); + + WARN_ON(!idr_is_empty(&block->counter_idr)); + + prestera_hw_counter_block_release(counter->sw, block->id); + idr_destroy(&block->counter_idr); + mutex_destroy(&block->mtx); + kfree(block->stats); + kfree(block); +} + +static int prestera_counter_get_vacant(struct prestera_counter_block *block, + u32 *id) +{ + int free_id; + + if (block->full) + return -ENOSPC; + + prestera_counter_block_lock(block); + free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset, + block->offset + block->num_counters, + GFP_KERNEL); + if (free_id < 0) { + if (free_id == -ENOSPC) + block->full = true; + + prestera_counter_block_unlock(block); + return free_id; + } + *id = free_id; + prestera_counter_block_unlock(block); + + return 0; +} + +int prestera_counter_get(struct prestera_counter *counter, u32 client, + struct prestera_counter_block **bl, u32 *counter_id) +{ + struct prestera_counter_block *block; + int err; + u32 id; + +get_next_block: + block = prestera_counter_block_get(counter, client); + if (IS_ERR(block)) + return PTR_ERR(block); + + err = prestera_counter_get_vacant(block, &id); + if (err) { + prestera_counter_block_put(counter, block); + + if (err == -ENOSPC) + goto get_next_block; + + return err; + } + + prestera_counter_block_lock(block); + if (block->is_updating) + block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID; + prestera_counter_block_unlock(block); + + *counter_id = id; + *bl = block; + + return 0; +} + +void prestera_counter_put(struct prestera_counter *counter, + struct prestera_counter_block *block, u32 counter_id) +{ + if (!block) + return; + + prestera_counter_block_lock(block); + idr_remove(&block->counter_idr, counter_id); + block->full = false; + prestera_counter_stats_clear(block, counter_id); + prestera_counter_block_unlock(block); + + prestera_hw_counter_clear(counter->sw, block->id, counter_id); + prestera_counter_block_put(counter, block); +} + +static u32 prestera_counter_block_idx_next(struct prestera_counter *counter, + u32 curr_idx) +{ + u32 idx, i, start = curr_idx + 1; + + prestera_counter_lock(counter); + for (i = 0; i < counter->block_list_len; i++) { + idx = (start + i) % counter->block_list_len; + if (!counter->block_list[idx]) + continue; + + prestera_counter_unlock(counter); + return idx; + } + prestera_counter_unlock(counter); + + return 0; +} + +static struct prestera_counter_block * +prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx) +{ + if (idx >= counter->block_list_len) + return NULL; + + prestera_counter_lock(counter); + + if (!counter->block_list[idx] || + !prestera_counter_block_incref(counter->block_list[idx])) { + prestera_counter_unlock(counter); + return NULL; + } + + prestera_counter_unlock(counter); + return counter->block_list[idx]; +} + +static void prestera_counter_stats_work(struct work_struct *work) +{ + struct delayed_work *dl_work = + container_of(work, struct delayed_work, work); + struct prestera_counter *counter = + container_of(dl_work, struct prestera_counter, stats_dw); + struct prestera_counter_block *block; + u32 resched_time = COUNTER_POLL_TIME; + u32 count = COUNTER_BULK_SIZE; + bool done = false; + int err; + u32 i; + + block = prestera_counter_block_get_by_idx(counter, counter->curr_idx); + if (!block) { + if (counter->is_fetching) + goto abort; + + goto next; + } + + if (!counter->is_fetching) { + err = prestera_hw_counter_trigger(counter->sw, block->id); + if (err) + goto abort; + + prestera_counter_block_lock(block); + block->is_updating = true; + prestera_counter_block_unlock(block); + + counter->is_fetching = true; + counter->total_read = 0; + resched_time = COUNTER_RESCHED_TIME; + goto resched; + } + + prestera_counter_block_lock(block); + err = prestera_hw_counters_get(counter->sw, counter->total_read, + &count, &done, + &block->stats[counter->total_read]); + prestera_counter_block_unlock(block); + if (err) + goto abort; + + counter->total_read += count; + if (!done || counter->total_read < block->num_counters) { + resched_time = COUNTER_RESCHED_TIME; + goto resched; + } + + for (i = 0; i < block->num_counters; i++) { + if (block->counter_flag[i] == COUNTER_FLAG_INVALID) { + prestera_counter_block_lock(block); + block->counter_flag[i] = COUNTER_FLAG_READY; + memset(&block->stats[i], 0, sizeof(*block->stats)); + prestera_counter_block_unlock(block); + } + } + + prestera_counter_block_lock(block); + block->is_updating = false; + prestera_counter_block_unlock(block); + + goto next; +abort: + prestera_hw_counter_abort(counter->sw); +next: + counter->is_fetching = false; + counter->curr_idx = + prestera_counter_block_idx_next(counter, counter->curr_idx); +resched: + if (block) + prestera_counter_block_put(counter, block); + + schedule_delayed_work(&counter->stats_dw, resched_time); +} + +/* Can be executed without rtnl_lock(). + * So pay attention when something changing. + */ +int prestera_counter_stats_get(struct prestera_counter *counter, + struct prestera_counter_block *block, + u32 counter_id, u64 *packets, u64 *bytes) +{ + if (!block || !prestera_counter_is_ready(block, counter_id)) { + *packets = 0; + *bytes = 0; + return 0; + } + + prestera_counter_block_lock(block); + *packets = block->stats[counter_id - block->offset].packets; + *bytes = block->stats[counter_id - block->offset].bytes; + + prestera_counter_stats_clear(block, counter_id); + prestera_counter_block_unlock(block); + + return 0; +} + +int prestera_counter_init(struct prestera_switch *sw) +{ + struct prestera_counter *counter; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return -ENOMEM; + + counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL); + if (!counter->block_list) { + kfree(counter); + return -ENOMEM; + } + + mutex_init(&counter->mtx); + counter->block_list_len = 1; + counter->sw = sw; + sw->counter = counter; + + INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work); + schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME); + + return 0; +} + +void prestera_counter_fini(struct prestera_switch *sw) +{ + struct prestera_counter *counter = sw->counter; + u32 i; + + cancel_delayed_work_sync(&counter->stats_dw); + + for (i = 0; i < counter->block_list_len; i++) + WARN_ON(counter->block_list[i]); + + mutex_destroy(&counter->mtx); + kfree(counter->block_list); + kfree(counter); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h new file mode 100644 index 000000000000..ad6b73907799 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_COUNTER_H_ +#define _PRESTERA_COUNTER_H_ + +#include <linux/types.h> + +struct prestera_counter_stats { + u64 packets; + u64 bytes; +}; + +struct prestera_switch; +struct prestera_counter; +struct prestera_counter_block; + +int prestera_counter_init(struct prestera_switch *sw); +void prestera_counter_fini(struct prestera_switch *sw); + +int prestera_counter_get(struct prestera_counter *counter, u32 client, + struct prestera_counter_block **block, + u32 *counter_id); +void prestera_counter_put(struct prestera_counter *counter, + struct prestera_counter_block *block, u32 counter_id); +int prestera_counter_stats_get(struct prestera_counter *counter, + struct prestera_counter_block *block, + u32 counter_id, u64 *packets, u64 *bytes); + +#endif /* _PRESTERA_COUNTER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c index c9891e968259..d849f046ece7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c @@ -40,6 +40,11 @@ static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, return 0; case FLOW_CLS_STATS: return prestera_flower_stats(block, f); + case FLOW_CLS_TMPLT_CREATE: + return prestera_flower_tmplt_create(block, f); + case FLOW_CLS_TMPLT_DESTROY: + prestera_flower_tmplt_destroy(block, f); + return 0; default: return -EOPNOTSUPP; } @@ -60,11 +65,102 @@ static int prestera_flow_block_cb(enum tc_setup_type type, } } +static void prestera_flow_block_destroy(void *cb_priv) +{ + struct prestera_flow_block *block = cb_priv; + + prestera_flower_template_cleanup(block); + + WARN_ON(!list_empty(&block->binding_list)); + + kfree(block); +} + +static struct prestera_flow_block * +prestera_flow_block_create(struct prestera_switch *sw, struct net *net) +{ + struct prestera_flow_block *block; + + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return NULL; + + INIT_LIST_HEAD(&block->binding_list); + block->net = net; + block->sw = sw; + + return block; +} + static void prestera_flow_block_release(void *cb_priv) { struct prestera_flow_block *block = cb_priv; - prestera_acl_block_destroy(block); + prestera_flow_block_destroy(block); +} + +static bool +prestera_flow_block_is_bound(const struct prestera_flow_block *block) +{ + return block->ruleset_zero; +} + +static struct prestera_flow_block_binding * +prestera_flow_block_lookup(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + list_for_each_entry(binding, &block->binding_list, list) + if (binding->port == port) + return binding; + + return NULL; +} + +static int prestera_flow_block_bind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + int err; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return -ENOMEM; + + binding->span_id = PRESTERA_SPAN_INVALID_ID; + binding->port = port; + + if (prestera_flow_block_is_bound(block)) { + err = prestera_acl_ruleset_bind(block->ruleset_zero, port); + if (err) + goto err_ruleset_bind; + } + + list_add(&binding->list, &block->binding_list); + return 0; + +err_ruleset_bind: + kfree(binding); + return err; +} + +static int prestera_flow_block_unbind(struct prestera_flow_block *block, + struct prestera_port *port) +{ + struct prestera_flow_block_binding *binding; + + binding = prestera_flow_block_lookup(block, port); + if (!binding) + return -ENOENT; + + list_del(&binding->list); + + if (prestera_flow_block_is_bound(block)) + prestera_acl_ruleset_unbind(block->ruleset_zero, port); + + kfree(binding); + return 0; } static struct prestera_flow_block * @@ -78,7 +174,7 @@ prestera_flow_block_get(struct prestera_switch *sw, block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw); if (!block_cb) { - block = prestera_acl_block_create(sw, f->net); + block = prestera_flow_block_create(sw, f->net); if (!block) return ERR_PTR(-ENOMEM); @@ -86,7 +182,7 @@ prestera_flow_block_get(struct prestera_switch *sw, sw, block, prestera_flow_block_release); if (IS_ERR(block_cb)) { - prestera_acl_block_destroy(block); + prestera_flow_block_destroy(block); return ERR_CAST(block_cb); } @@ -110,7 +206,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block) return; flow_block_cb_free(block_cb); - prestera_acl_block_destroy(block); + prestera_flow_block_destroy(block); } static int prestera_setup_flow_block_bind(struct prestera_port *port, @@ -128,7 +224,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port, block_cb = block->block_cb; - err = prestera_acl_block_bind(block, port); + err = prestera_flow_block_bind(block, port); if (err) goto err_block_bind; @@ -162,7 +258,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port, prestera_span_destroy(block); - err = prestera_acl_block_unbind(block, port); + err = prestera_flow_block_unbind(block, port); if (err) goto error; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h index 467c7038cace..1ea5b745bf72 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h @@ -7,6 +7,24 @@ #include <net/flow_offload.h> struct prestera_port; +struct prestera_switch; +struct prestera_flower_template; + +struct prestera_flow_block_binding { + struct list_head list; + struct prestera_port *port; + int span_id; +}; + +struct prestera_flow_block { + struct list_head binding_list; + struct prestera_switch *sw; + struct net *net; + struct prestera_acl_ruleset *ruleset_zero; + struct flow_block_cb *block_cb; + struct prestera_flower_template *tmplt; + unsigned int rule_count; +}; int prestera_flow_block_setup(struct prestera_port *port, struct flow_block_offload *f); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index e571ba09ec08..19c1417fd05f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -3,42 +3,61 @@ #include "prestera.h" #include "prestera_acl.h" +#include "prestera_flow.h" #include "prestera_flower.h" +struct prestera_flower_template { + struct prestera_acl_ruleset *ruleset; +}; + +void prestera_flower_template_cleanup(struct prestera_flow_block *block) +{ + if (block->tmplt) { + /* put the reference to the ruleset kept in create */ + prestera_acl_ruleset_put(block->tmplt->ruleset); + kfree(block->tmplt); + block->tmplt = NULL; + return; + } +} + static int prestera_flower_parse_actions(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_action *flow_action, struct netlink_ext_ack *extack) { - struct prestera_acl_rule_action_entry a_entry; const struct flow_action_entry *act; - int err, i; + int i; + /* whole struct (rule->re_arg) must be initialized with 0 */ if (!flow_action_has_entries(flow_action)) return 0; flow_action_for_each(i, act, flow_action) { - memset(&a_entry, 0, sizeof(a_entry)); - switch (act->id) { case FLOW_ACTION_ACCEPT: - a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT; + if (rule->re_arg.accept.valid) + return -EEXIST; + + rule->re_arg.accept.valid = 1; break; case FLOW_ACTION_DROP: - a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP; + if (rule->re_arg.drop.valid) + return -EEXIST; + + rule->re_arg.drop.valid = 1; break; case FLOW_ACTION_TRAP: - a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP; + if (rule->re_arg.trap.valid) + return -EEXIST; + + rule->re_arg.trap.valid = 1; break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); pr_err("Unsupported action\n"); return -EOPNOTSUPP; } - - err = prestera_acl_rule_action_add(rule, &a_entry); - if (err) - return err; } return 0; @@ -47,12 +66,12 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, struct flow_cls_offload *f, struct prestera_flow_block *block) -{ - struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); - struct prestera_acl_rule_match_entry m_entry = {0}; +{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); + struct prestera_acl_match *r_match = &rule->re_key.match; + struct prestera_port *port; struct net_device *ingress_dev; struct flow_match_meta match; - struct prestera_port *port; + __be16 key, mask; flow_rule_match_meta(f_rule, &match); if (match.mask->ingress_ifindex != 0xFFFFFFFF) { @@ -61,7 +80,7 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, return -EINVAL; } - ingress_dev = __dev_get_by_index(prestera_acl_block_net(block), + ingress_dev = __dev_get_by_index(block->net, match.key->ingress_ifindex); if (!ingress_dev) { NL_SET_ERR_MSG_MOD(f->common.extack, @@ -76,22 +95,28 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, } port = netdev_priv(ingress_dev); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT; - m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32); - m_entry.keymask.u64.mask = ~(u64)0; + mask = htons(0x1FFF); + key = htons(port->hw_id); + rule_match_set(r_match->key, SYS_PORT, key); + rule_match_set(r_match->mask, SYS_PORT, mask); + + mask = htons(0x1FF); + key = htons(port->dev_id); + rule_match_set(r_match->key, SYS_DEV, key); + rule_match_set(r_match->mask, SYS_DEV, mask); + + return 0; - return prestera_acl_rule_match_add(rule, &m_entry); } static int prestera_flower_parse(struct prestera_flow_block *block, struct prestera_acl_rule *rule, struct flow_cls_offload *f) -{ - struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); +{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = f_rule->match.dissector; - struct prestera_acl_rule_match_entry m_entry; - u16 n_proto_mask = 0; - u16 n_proto_key = 0; + struct prestera_acl_match *r_match = &rule->re_key.match; + __be16 n_proto_mask = 0; + __be16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; @@ -129,32 +154,19 @@ static int prestera_flower_parse(struct prestera_flow_block *block, struct flow_match_basic match; flow_rule_match_basic(f_rule, &match); - n_proto_key = ntohs(match.key->n_proto); - n_proto_mask = ntohs(match.mask->n_proto); + n_proto_key = match.key->n_proto; + n_proto_mask = match.mask->n_proto; - if (n_proto_key == ETH_P_ALL) { + if (ntohs(match.key->n_proto) == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } - /* add eth type key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE; - m_entry.keymask.u16.key = n_proto_key; - m_entry.keymask.u16.mask = n_proto_mask; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; - - /* add ip proto key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO; - m_entry.keymask.u8.key = match.key->ip_proto; - m_entry.keymask.u8.mask = match.mask->ip_proto; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, ETH_TYPE, n_proto_key); + rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask); + rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto); + rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto); ip_proto = match.key->ip_proto; } @@ -163,27 +175,27 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_eth_addrs(f_rule, &match); - /* add ethernet dst key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC; - memcpy(&m_entry.keymask.mac.key, - &match.key->dst, sizeof(match.key->dst)); - memcpy(&m_entry.keymask.mac.mask, - &match.mask->dst, sizeof(match.mask->dst)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; - - /* add ethernet src key,mask */ - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC; - memcpy(&m_entry.keymask.mac.key, - &match.key->src, sizeof(match.key->src)); - memcpy(&m_entry.keymask.mac.mask, - &match.mask->src, sizeof(match.mask->src)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + /* DA key, mask */ + rule_match_set_n(r_match->key, + ETH_DMAC_0, &match.key->dst[0], 4); + rule_match_set_n(r_match->key, + ETH_DMAC_1, &match.key->dst[4], 2); + + rule_match_set_n(r_match->mask, + ETH_DMAC_0, &match.mask->dst[0], 4); + rule_match_set_n(r_match->mask, + ETH_DMAC_1, &match.mask->dst[4], 2); + + /* SA key, mask */ + rule_match_set_n(r_match->key, + ETH_SMAC_0, &match.key->src[0], 4); + rule_match_set_n(r_match->key, + ETH_SMAC_1, &match.key->src[4], 2); + + rule_match_set_n(r_match->mask, + ETH_SMAC_0, &match.mask->src[0], 4); + rule_match_set_n(r_match->mask, + ETH_SMAC_1, &match.mask->src[4], 2); } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -191,25 +203,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_ipv4_addrs(f_rule, &match); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC; - memcpy(&m_entry.keymask.u32.key, - &match.key->src, sizeof(match.key->src)); - memcpy(&m_entry.keymask.u32.mask, - &match.mask->src, sizeof(match.mask->src)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, IP_SRC, match.key->src); + rule_match_set(r_match->mask, IP_SRC, match.mask->src); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST; - memcpy(&m_entry.keymask.u32.key, - &match.key->dst, sizeof(match.key->dst)); - memcpy(&m_entry.keymask.u32.mask, - &match.mask->dst, sizeof(match.mask->dst)); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, IP_DST, match.key->dst); + rule_match_set(r_match->mask, IP_DST, match.mask->dst); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) { @@ -224,21 +222,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_ports(f_rule, &match); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC; - m_entry.keymask.u16.key = ntohs(match.key->src); - m_entry.keymask.u16.mask = ntohs(match.mask->src); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, L4_PORT_SRC, match.key->src); + rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST; - m_entry.keymask.u16.key = ntohs(match.key->dst); - m_entry.keymask.u16.mask = ntohs(match.mask->dst); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, L4_PORT_DST, match.key->dst); + rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) { @@ -247,22 +235,15 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_vlan(f_rule, &match); if (match.mask->vlan_id != 0) { - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID; - m_entry.keymask.u16.key = match.key->vlan_id; - m_entry.keymask.u16.mask = match.mask->vlan_id; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + __be16 key = cpu_to_be16(match.key->vlan_id); + __be16 mask = cpu_to_be16(match.mask->vlan_id); + + rule_match_set(r_match->key, VLAN_ID, key); + rule_match_set(r_match->mask, VLAN_ID, mask); } - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID; - m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid); - m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid); - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid); + rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid); } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) { @@ -270,90 +251,164 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_icmp(f_rule, &match); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE; - m_entry.keymask.u8.key = match.key->type; - m_entry.keymask.u8.mask = match.mask->type; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, ICMP_TYPE, match.key->type); + rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type); - memset(&m_entry, 0, sizeof(m_entry)); - m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE; - m_entry.keymask.u8.key = match.key->code; - m_entry.keymask.u8.mask = match.mask->code; - err = prestera_acl_rule_match_add(rule, &m_entry); - if (err) - return err; + rule_match_set(r_match->key, ICMP_CODE, match.key->code); + rule_match_set(r_match->mask, ICMP_CODE, match.mask->code); } - return prestera_flower_parse_actions(block, rule, - &f->rule->action, + return prestera_flower_parse_actions(block, rule, &f->rule->action, f->common.extack); } int prestera_flower_replace(struct prestera_flow_block *block, struct flow_cls_offload *f) { - struct prestera_switch *sw = prestera_acl_block_sw(block); + struct prestera_acl_ruleset *ruleset; + struct prestera_acl *acl = block->sw->acl; struct prestera_acl_rule *rule; int err; - rule = prestera_acl_rule_create(block, f->cookie); - if (IS_ERR(rule)) - return PTR_ERR(rule); + ruleset = prestera_acl_ruleset_get(acl, block); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + /* increments the ruleset reference */ + rule = prestera_acl_rule_create(ruleset, f->cookie); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule_create; + } err = prestera_flower_parse(block, rule, f); if (err) - goto err_flower_parse; + goto err_rule_add; + + if (!prestera_acl_ruleset_is_offload(ruleset)) { + err = prestera_acl_ruleset_offload(ruleset); + if (err) + goto err_ruleset_offload; + } - err = prestera_acl_rule_add(sw, rule); + err = prestera_acl_rule_add(block->sw, rule); if (err) goto err_rule_add; + prestera_acl_ruleset_put(ruleset); return 0; +err_ruleset_offload: err_rule_add: -err_flower_parse: prestera_acl_rule_destroy(rule); +err_rule_create: + prestera_acl_ruleset_put(ruleset); return err; } void prestera_flower_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f) { + struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; - struct prestera_switch *sw; - rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block), - f->cookie); + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); + if (IS_ERR(ruleset)) + return; + + rule = prestera_acl_rule_lookup(ruleset, f->cookie); if (rule) { - sw = prestera_acl_block_sw(block); - prestera_acl_rule_del(sw, rule); + prestera_acl_rule_del(block->sw, rule); prestera_acl_rule_destroy(rule); } + prestera_acl_ruleset_put(ruleset); + +} + +int prestera_flower_tmplt_create(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + struct prestera_flower_template *template; + struct prestera_acl_ruleset *ruleset; + struct prestera_acl_rule rule; + int err; + + memset(&rule, 0, sizeof(rule)); + err = prestera_flower_parse(block, &rule, f); + if (err) + return err; + + template = kmalloc(sizeof(*template), GFP_KERNEL); + if (!template) { + err = -ENOMEM; + goto err_malloc; + } + + prestera_acl_rule_keymask_pcl_id_set(&rule, 0); + ruleset = prestera_acl_ruleset_get(block->sw->acl, block); + if (IS_ERR_OR_NULL(ruleset)) { + err = -EINVAL; + goto err_ruleset_get; + } + + /* preserve keymask/template to this ruleset */ + prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask); + + /* skip error, as it is not possible to reject template operation, + * so, keep the reference to the ruleset for rules to be added + * to that ruleset later. In case of offload fail, the ruleset + * will be offloaded again during adding a new rule. Also, + * unlikly possble that ruleset is already offloaded at this staage. + */ + prestera_acl_ruleset_offload(ruleset); + + /* keep the reference to the ruleset */ + template->ruleset = ruleset; + block->tmplt = template; + return 0; + +err_ruleset_get: + kfree(template); +err_malloc: + NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed"); + return err; +} + +void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f) +{ + prestera_flower_template_cleanup(block); } int prestera_flower_stats(struct prestera_flow_block *block, struct flow_cls_offload *f) { - struct prestera_switch *sw = prestera_acl_block_sw(block); + struct prestera_acl_ruleset *ruleset; struct prestera_acl_rule *rule; u64 packets; u64 lastuse; u64 bytes; int err; - rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block), - f->cookie); - if (!rule) - return -EINVAL; + ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + rule = prestera_acl_rule_lookup(ruleset, f->cookie); + if (!rule) { + err = -EINVAL; + goto err_rule_get_stats; + } - err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse); + err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets, + &bytes, &lastuse); if (err) - return err; + goto err_rule_get_stats; flow_stats_update(&f->stats, bytes, packets, 0, lastuse, - FLOW_ACTION_HW_STATS_IMMEDIATE); - return 0; + FLOW_ACTION_HW_STATS_DELAYED); + +err_rule_get_stats: + prestera_acl_ruleset_put(ruleset); + return err; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h index 91e045eec58b..dc3aa4280e9f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h @@ -1,11 +1,12 @@ /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ -/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */ +/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */ #ifndef _PRESTERA_FLOWER_H_ #define _PRESTERA_FLOWER_H_ #include <net/pkt_cls.h> +struct prestera_switch; struct prestera_flow_block; int prestera_flower_replace(struct prestera_flow_block *block, @@ -14,5 +15,10 @@ void prestera_flower_destroy(struct prestera_flow_block *block, struct flow_cls_offload *f); int prestera_flower_stats(struct prestera_flow_block *block, struct flow_cls_offload *f); +int prestera_flower_tmplt_create(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_tmplt_destroy(struct prestera_flow_block *block, + struct flow_cls_offload *f); +void prestera_flower_template_cleanup(struct prestera_flow_block *block); #endif /* _PRESTERA_FLOWER_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index 9b8b1ed474fc..51fc841b1e7a 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -9,6 +9,7 @@ #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" +#include "prestera_counter.h" #define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000) @@ -38,13 +39,24 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403, - PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500, - PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501, - PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510, - PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520, - PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521, - PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530, - PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531, + PRESTERA_CMD_TYPE_COUNTER_GET = 0x510, + PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511, + PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512, + PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513, + PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514, + PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515, + + PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540, + PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541, + PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550, + PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551, + PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560, + PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561, + + PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600, + PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601, + PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630, + PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631, PRESTERA_CMD_TYPE_RXTX_INIT = 0x800, @@ -359,76 +371,84 @@ struct prestera_msg_bridge_resp { u8 pad[2]; }; -struct prestera_msg_acl_action { - __le32 id; - __le32 reserved[5]; +struct prestera_msg_vtcam_create_req { + struct prestera_msg_cmd cmd; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + u8 direction; + u8 lookup; + u8 pad[2]; }; -struct prestera_msg_acl_match { - __le32 type; - __le32 __reserved; - union { - struct { - u8 key; - u8 mask; - } u8; - struct { - __le16 key; - __le16 mask; - } u16; - struct { - __le32 key; - __le32 mask; - } u32; - struct { - __le64 key; - __le64 mask; - } u64; - struct { - u8 key[ETH_ALEN]; - u8 mask[ETH_ALEN]; - } mac; - } keymask; +struct prestera_msg_vtcam_destroy_req { + struct prestera_msg_cmd cmd; + __le32 vtcam_id; }; -struct prestera_msg_acl_rule_req { +struct prestera_msg_vtcam_rule_add_req { struct prestera_msg_cmd cmd; - __le32 id; - __le32 priority; - __le16 ruleset_id; - u8 n_actions; - u8 n_matches; + __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 vtcam_id; + __le32 prio; + __le32 n_act; }; -struct prestera_msg_acl_rule_resp { - struct prestera_msg_ret ret; +struct prestera_msg_vtcam_rule_del_req { + struct prestera_msg_cmd cmd; + __le32 vtcam_id; __le32 id; }; -struct prestera_msg_acl_rule_stats_resp { +struct prestera_msg_vtcam_bind_req { + struct prestera_msg_cmd cmd; + union { + struct { + __le32 hw_id; + __le32 dev_id; + } port; + __le32 index; + }; + __le32 vtcam_id; + __le16 pcl_id; + __le16 type; +}; + +struct prestera_msg_vtcam_resp { struct prestera_msg_ret ret; - __le64 packets; - __le64 bytes; + __le32 vtcam_id; + __le32 rule_id; }; -struct prestera_msg_acl_ruleset_bind_req { - struct prestera_msg_cmd cmd; - __le32 port; - __le32 dev; - __le16 ruleset_id; - u8 pad[2]; +struct prestera_msg_acl_action { + __le32 id; + __le32 __reserved; + union { + struct { + __le32 id; + } count; + __le32 reserved[6]; + }; }; -struct prestera_msg_acl_ruleset_req { +struct prestera_msg_counter_req { struct prestera_msg_cmd cmd; - __le16 id; - u8 pad[2]; + __le32 client; + __le32 block_id; + __le32 num_counters; +}; + +struct prestera_msg_counter_stats { + __le64 packets; + __le64 bytes; }; -struct prestera_msg_acl_ruleset_resp { +struct prestera_msg_counter_resp { struct prestera_msg_ret ret; - __le16 id; - u8 pad[2]; + __le32 block_id; + __le32 offset; + __le32 num_counters; + __le32 done; + struct prestera_msg_counter_stats stats[]; }; struct prestera_msg_span_req { @@ -465,6 +485,48 @@ struct prestera_msg_rxtx_resp { __le32 map_addr; }; +struct prestera_msg_iface { + union { + struct { + __le32 dev; + __le32 port; + }; + __le16 lag_id; + }; + __le16 vr_id; + __le16 vid; + u8 type; + u8 __pad[3]; +}; + +struct prestera_msg_rif_req { + struct prestera_msg_cmd cmd; + struct prestera_msg_iface iif; + __le32 mtu; + __le16 rif_id; + __le16 __reserved; + u8 mac[ETH_ALEN]; + u8 __pad[2]; +}; + +struct prestera_msg_rif_resp { + struct prestera_msg_ret ret; + __le16 rif_id; + u8 __pad[2]; +}; + +struct prestera_msg_vr_req { + struct prestera_msg_cmd cmd; + __le16 vr_id; + u8 __pad[2]; +}; + +struct prestera_msg_vr_resp { + struct prestera_msg_ret ret; + __le16 vr_id; + u8 __pad[2]; +}; + struct prestera_msg_lag_req { struct prestera_msg_cmd cmd; __le32 port; @@ -521,14 +583,24 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28); BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16); BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20); + BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); + + /* structure that are part of req/resp fw messages */ + BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); /* check responses */ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8); @@ -537,11 +609,12 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248); BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20); BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24); - BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16); + BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24); + BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); /* check events */ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); @@ -1044,225 +1117,162 @@ static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause) } } -int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id) +int prestera_hw_vtcam_create(struct prestera_switch *sw, + u8 lookup, const u32 *keymask, u32 *vtcam_id, + enum prestera_hw_vtcam_direction_t dir) { - struct prestera_msg_acl_ruleset_resp resp; - struct prestera_msg_acl_ruleset_req req; int err; + struct prestera_msg_vtcam_resp resp; + struct prestera_msg_vtcam_create_req req = { + .lookup = lookup, + .direction = dir, + }; + + if (keymask) + memcpy(req.keymask, keymask, sizeof(req.keymask)); + else + memset(req.keymask, 0, sizeof(req.keymask)); - err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE, + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE, &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); if (err) return err; - *ruleset_id = __le16_to_cpu(resp.id); - + *vtcam_id = __le32_to_cpu(resp.vtcam_id); return 0; } -int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id) +int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id) { - struct prestera_msg_acl_ruleset_req req = { - .id = __cpu_to_le16(ruleset_id), + struct prestera_msg_vtcam_destroy_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), }; - return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE, + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY, &req.cmd, sizeof(req)); } -static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action, - struct prestera_acl_rule *rule) +static int +prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, + struct prestera_acl_hw_action_info *info) { - struct list_head *a_list = prestera_acl_rule_action_list_get(rule); - struct prestera_acl_rule_action_entry *a_entry; - int i = 0; - - list_for_each_entry(a_entry, a_list, list) { - action[i].id = __cpu_to_le32(a_entry->id); - - switch (a_entry->id) { - case PRESTERA_ACL_RULE_ACTION_ACCEPT: - case PRESTERA_ACL_RULE_ACTION_DROP: - case PRESTERA_ACL_RULE_ACTION_TRAP: - /* just rule action id, no specific data */ - break; - default: - return -EINVAL; - } + action->id = __cpu_to_le32(info->id); - i++; - } - - return 0; -} - -static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match, - struct prestera_acl_rule *rule) -{ - struct list_head *m_list = prestera_acl_rule_match_list_get(rule); - struct prestera_acl_rule_match_entry *m_entry; - int i = 0; - - list_for_each_entry(m_entry, m_list, list) { - match[i].type = __cpu_to_le32(m_entry->type); - - switch (m_entry->type) { - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID: - match[i].keymask.u16.key = - __cpu_to_le16(m_entry->keymask.u16.key); - match[i].keymask.u16.mask = - __cpu_to_le16(m_entry->keymask.u16.mask); - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO: - match[i].keymask.u8.key = m_entry->keymask.u8.key; - match[i].keymask.u8.mask = m_entry->keymask.u8.mask; - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC: - memcpy(match[i].keymask.mac.key, - m_entry->keymask.mac.key, - sizeof(match[i].keymask.mac.key)); - memcpy(match[i].keymask.mac.mask, - m_entry->keymask.mac.mask, - sizeof(match[i].keymask.mac.mask)); - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC: - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST: - match[i].keymask.u32.key = - __cpu_to_le32(m_entry->keymask.u32.key); - match[i].keymask.u32.mask = - __cpu_to_le32(m_entry->keymask.u32.mask); - break; - case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT: - match[i].keymask.u64.key = - __cpu_to_le64(m_entry->keymask.u64.key); - match[i].keymask.u64.mask = - __cpu_to_le64(m_entry->keymask.u64.mask); - break; - default: - return -EINVAL; - } - - i++; + switch (info->id) { + case PRESTERA_ACL_RULE_ACTION_ACCEPT: + case PRESTERA_ACL_RULE_ACTION_DROP: + case PRESTERA_ACL_RULE_ACTION_TRAP: + /* just rule action id, no specific data */ + break; + case PRESTERA_ACL_RULE_ACTION_COUNT: + action->count.id = __cpu_to_le32(info->count.id); + break; + default: + return -EINVAL; } return 0; } -int prestera_hw_acl_rule_add(struct prestera_switch *sw, - struct prestera_acl_rule *rule, - u32 *rule_id) +int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, + u32 vtcam_id, u32 prio, void *key, void *keymask, + struct prestera_acl_hw_action_info *act, + u8 n_act, u32 *rule_id) { - struct prestera_msg_acl_action *actions; - struct prestera_msg_acl_match *matches; - struct prestera_msg_acl_rule_resp resp; - struct prestera_msg_acl_rule_req *req; - u8 n_actions; - u8 n_matches; + struct prestera_msg_acl_action *actions_msg; + struct prestera_msg_vtcam_rule_add_req *req; + struct prestera_msg_vtcam_resp resp; void *buff; u32 size; int err; + u8 i; - n_actions = prestera_acl_rule_action_len(rule); - n_matches = prestera_acl_rule_match_len(rule); - - size = sizeof(*req) + sizeof(*actions) * n_actions + - sizeof(*matches) * n_matches; + size = sizeof(*req) + sizeof(*actions_msg) * n_act; buff = kzalloc(size, GFP_KERNEL); if (!buff) return -ENOMEM; req = buff; - actions = buff + sizeof(*req); - matches = buff + sizeof(*req) + sizeof(*actions) * n_actions; - - /* put acl actions into the message */ - err = prestera_hw_acl_actions_put(actions, rule); - if (err) - goto free_buff; + req->n_act = __cpu_to_le32(n_act); + actions_msg = buff + sizeof(*req); /* put acl matches into the message */ - err = prestera_hw_acl_matches_put(matches, rule); - if (err) - goto free_buff; + memcpy(req->key, key, sizeof(req->key)); + memcpy(req->keymask, keymask, sizeof(req->keymask)); - req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule)); - req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule)); - req->n_actions = prestera_acl_rule_action_len(rule); - req->n_matches = prestera_acl_rule_match_len(rule); + /* put acl actions into the message */ + for (i = 0; i < n_act; i++) { + err = prestera_acl_rule_add_put_action(&actions_msg[i], + &act[i]); + if (err) + goto free_buff; + } - err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD, + req->vtcam_id = __cpu_to_le32(vtcam_id); + req->prio = __cpu_to_le32(prio); + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD, &req->cmd, size, &resp.ret, sizeof(resp)); if (err) goto free_buff; - *rule_id = __le32_to_cpu(resp.id); + *rule_id = __le32_to_cpu(resp.rule_id); free_buff: kfree(buff); return err; } -int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id) +int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, + u32 vtcam_id, u32 rule_id) { - struct prestera_msg_acl_rule_req req = { + struct prestera_msg_vtcam_rule_del_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), .id = __cpu_to_le32(rule_id) }; - return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE, + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE, &req.cmd, sizeof(req)); } -int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id, - u64 *packets, u64 *bytes) +int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id, u16 pcl_id) { - struct prestera_msg_acl_rule_stats_resp resp; - struct prestera_msg_acl_rule_req req = { - .id = __cpu_to_le32(rule_id) + struct prestera_msg_vtcam_bind_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .type = __cpu_to_le16(iface->type), + .pcl_id = __cpu_to_le16(pcl_id) }; - int err; - - err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET, - &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); - if (err) - return err; - - *packets = __le64_to_cpu(resp.packets); - *bytes = __le64_to_cpu(resp.bytes); - - return 0; -} -int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id) -{ - struct prestera_msg_acl_ruleset_bind_req req = { - .port = __cpu_to_le32(port->hw_id), - .dev = __cpu_to_le32(port->dev_id), - .ruleset_id = __cpu_to_le16(ruleset_id), - }; + if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { + req.port.dev_id = __cpu_to_le32(iface->port->dev_id); + req.port.hw_id = __cpu_to_le32(iface->port->hw_id); + } else { + req.index = __cpu_to_le32(iface->index); + } - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND, + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND, &req.cmd, sizeof(req)); } -int prestera_hw_acl_port_unbind(const struct prestera_port *port, - u16 ruleset_id) +int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id) { - struct prestera_msg_acl_ruleset_bind_req req = { - .port = __cpu_to_le32(port->hw_id), - .dev = __cpu_to_le32(port->dev_id), - .ruleset_id = __cpu_to_le16(ruleset_id), + struct prestera_msg_vtcam_bind_req req = { + .vtcam_id = __cpu_to_le32(vtcam_id), + .type = __cpu_to_le16(iface->type) }; - return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND, + if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) { + req.port.dev_id = __cpu_to_le32(iface->port->dev_id); + req.port.hw_id = __cpu_to_le32(iface->port->hw_id); + } else { + req.index = __cpu_to_le32(iface->index); + } + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND, &req.cmd, sizeof(req)); } @@ -1796,6 +1806,91 @@ int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id) &req.cmd, sizeof(req)); } +static int prestera_iface_to_msg(struct prestera_iface *iface, + struct prestera_msg_iface *msg_if) +{ + switch (iface->type) { + case PRESTERA_IF_PORT_E: + case PRESTERA_IF_VID_E: + msg_if->port = __cpu_to_le32(iface->dev_port.port_num); + msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num); + break; + case PRESTERA_IF_LAG_E: + msg_if->lag_id = __cpu_to_le16(iface->lag_id); + break; + default: + return -EOPNOTSUPP; + } + + msg_if->vr_id = __cpu_to_le16(iface->vr_id); + msg_if->vid = __cpu_to_le16(iface->vlan_id); + msg_if->type = iface->type; + return 0; +} + +int prestera_hw_rif_create(struct prestera_switch *sw, + struct prestera_iface *iif, u8 *mac, u16 *rif_id) +{ + struct prestera_msg_rif_req req; + struct prestera_msg_rif_resp resp; + int err; + + memcpy(req.mac, mac, ETH_ALEN); + + err = prestera_iface_to_msg(iif, &req.iif); + if (err) + return err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *rif_id = __le16_to_cpu(resp.rif_id); + return err; +} + +int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, + struct prestera_iface *iif) +{ + struct prestera_msg_rif_req req = { + .rif_id = __cpu_to_le16(rif_id), + }; + int err; + + err = prestera_iface_to_msg(iif, &req.iif); + if (err) + return err; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd, + sizeof(req)); +} + +int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id) +{ + int err; + struct prestera_msg_vr_resp resp; + struct prestera_msg_vr_req req; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *vr_id = __le16_to_cpu(resp.vr_id); + return err; +} + +int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id) +{ + struct prestera_msg_vr_req req = { + .vr_id = __cpu_to_le16(vr_id), + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd, + sizeof(req)); +} + int prestera_hw_rxtx_init(struct prestera_switch *sw, struct prestera_rxtx_params *params) { @@ -1916,3 +2011,100 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw, list_del_rcu(&eh->list); kfree_rcu(eh, rcu); } + +int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counter_abort(struct prestera_switch *sw) +{ + struct prestera_msg_counter_req req; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, + u32 *len, bool *done, + struct prestera_counter_stats *stats) +{ + struct prestera_msg_counter_resp *resp; + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(idx), + .num_counters = __cpu_to_le32(*len), + }; + size_t size = struct_size(resp, stats, *len); + int err, i; + + resp = kmalloc(size, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET, + &req.cmd, sizeof(req), &resp->ret, size); + if (err) + goto free_buff; + + for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) { + stats[i].packets += __le64_to_cpu(resp->stats[i].packets); + stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes); + } + + *len = __le32_to_cpu(resp->num_counters); + *done = __le32_to_cpu(resp->done); + +free_buff: + kfree(resp); + return err; +} + +int prestera_hw_counter_block_get(struct prestera_switch *sw, + u32 client, u32 *block_id, u32 *offset, + u32 *num_counters) +{ + struct prestera_msg_counter_resp resp; + struct prestera_msg_counter_req req = { + .client = __cpu_to_le32(client) + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *block_id = __le32_to_cpu(resp.block_id); + *offset = __le32_to_cpu(resp.offset); + *num_counters = __le32_to_cpu(resp.num_counters); + + return 0; +} + +int prestera_hw_counter_block_release(struct prestera_switch *sw, + u32 block_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, + u32 counter_id) +{ + struct prestera_msg_counter_req req = { + .block_id = __cpu_to_le32(block_id), + .num_counters = __cpu_to_le32(counter_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR, + &req.cmd, sizeof(req)); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index 57a3c2e5b112..3ff12bae5909 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -5,6 +5,7 @@ #define _PRESTERA_HW_H_ #include <linux/types.h> +#include "prestera_acl.h" enum prestera_accept_frm_type { PRESTERA_ACCEPT_FRAME_TYPE_TAGGED, @@ -111,18 +112,32 @@ enum prestera_hw_cpu_code_cnt_t { PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1, }; +enum prestera_hw_vtcam_direction_t { + PRESTERA_HW_VTCAM_DIR_INGRESS = 0, + PRESTERA_HW_VTCAM_DIR_EGRESS = 1, +}; + +enum { + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0, + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1, + PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2, +}; + struct prestera_switch; struct prestera_port; struct prestera_port_stats; struct prestera_port_caps; enum prestera_event_type; struct prestera_event; -struct prestera_acl_rule; typedef void (*prestera_event_cb_t) (struct prestera_switch *sw, struct prestera_event *evt, void *arg); struct prestera_rxtx_params; +struct prestera_acl_hw_action_info; +struct prestera_acl_iface; +struct prestera_counter_stats; +struct prestera_iface; /* Switch API */ int prestera_hw_switch_init(struct prestera_switch *sw); @@ -186,21 +201,37 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id); int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id); int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id); -/* ACL API */ -int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, - u16 *ruleset_id); -int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, - u16 ruleset_id); -int prestera_hw_acl_rule_add(struct prestera_switch *sw, - struct prestera_acl_rule *rule, - u32 *rule_id); -int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id); -int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, - u32 rule_id, u64 *packets, u64 *bytes); -int prestera_hw_acl_port_bind(const struct prestera_port *port, - u16 ruleset_id); -int prestera_hw_acl_port_unbind(const struct prestera_port *port, - u16 ruleset_id); +/* vTCAM API */ +int prestera_hw_vtcam_create(struct prestera_switch *sw, + u8 lookup, const u32 *keymask, u32 *vtcam_id, + enum prestera_hw_vtcam_direction_t direction); +int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id, + u32 prio, void *key, void *keymask, + struct prestera_acl_hw_action_info *act, + u8 n_act, u32 *rule_id); +int prestera_hw_vtcam_rule_del(struct prestera_switch *sw, + u32 vtcam_id, u32 rule_id); +int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id); +int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id, u16 pcl_id); +int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw, + struct prestera_acl_iface *iface, + u32 vtcam_id); + +/* Counter API */ +int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id); +int prestera_hw_counter_abort(struct prestera_switch *sw); +int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx, + u32 *len, bool *done, + struct prestera_counter_stats *stats); +int prestera_hw_counter_block_get(struct prestera_switch *sw, + u32 client, u32 *block_id, u32 *offset, + u32 *num_counters); +int prestera_hw_counter_block_release(struct prestera_switch *sw, + u32 block_id); +int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, + u32 counter_id); /* SPAN API */ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id); @@ -208,6 +239,16 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id); int prestera_hw_span_unbind(const struct prestera_port *port); int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id); +/* Router API */ +int prestera_hw_rif_create(struct prestera_switch *sw, + struct prestera_iface *iif, u8 *mac, u16 *rif_id); +int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id, + struct prestera_iface *iif); + +/* Virtual Router API */ +int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id); +int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id); + /* Event handlers */ int prestera_hw_event_handler_register(struct prestera_switch *sw, enum prestera_event_type type, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index c687dc9aa973..08fdd1e50388 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -18,6 +18,7 @@ #include "prestera_rxtx.h" #include "prestera_devlink.h" #include "prestera_ethtool.h" +#include "prestera_counter.h" #include "prestera_switchdev.h" #define PRESTERA_MTU_DEFAULT 1536 @@ -162,7 +163,7 @@ static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, return prestera_rxtx_xmit(netdev_priv(dev), skb); } -static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr) +int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr) { if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; @@ -901,6 +902,10 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) return err; + err = prestera_router_init(sw); + if (err) + goto err_router_init; + err = prestera_switchdev_init(sw); if (err) goto err_swdev_register; @@ -913,6 +918,10 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_handlers_register; + err = prestera_counter_init(sw); + if (err) + goto err_counter_init; + err = prestera_acl_init(sw); if (err) goto err_acl_init; @@ -945,12 +954,16 @@ err_dl_register: err_span_init: prestera_acl_fini(sw); err_acl_init: + prestera_counter_fini(sw); +err_counter_init: prestera_event_handlers_unregister(sw); err_handlers_register: prestera_rxtx_switch_fini(sw); err_rxtx_register: prestera_switchdev_fini(sw); err_swdev_register: + prestera_router_fini(sw); +err_router_init: prestera_netdev_event_handler_unregister(sw); prestera_hw_switch_fini(sw); @@ -965,6 +978,7 @@ static void prestera_switch_fini(struct prestera_switch *sw) prestera_devlink_traps_unregister(sw); prestera_span_fini(sw); prestera_acl_fini(sw); + prestera_counter_fini(sw); prestera_event_handlers_unregister(sw); prestera_rxtx_switch_fini(sw); prestera_switchdev_fini(sw); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c new file mode 100644 index 000000000000..8a3b7b664358 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/inetdevice.h> +#include <net/switchdev.h> + +#include "prestera.h" +#include "prestera_router_hw.h" + +/* This util to be used, to convert kernel rules for default vr in hw_vr */ +static u32 prestera_fix_tb_id(u32 tb_id) +{ + if (tb_id == RT_TABLE_UNSPEC || + tb_id == RT_TABLE_LOCAL || + tb_id == RT_TABLE_DEFAULT) + tb_id = RT_TABLE_MAIN; + + return tb_id; +} + +static int __prestera_inetaddr_port_event(struct net_device *port_dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + struct prestera_port *port = netdev_priv(port_dev); + int err; + struct prestera_rif_entry *re; + struct prestera_rif_entry_key re_key = {}; + u32 kern_tb_id; + + err = prestera_is_valid_mac_addr(port, port_dev->dev_addr); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix"); + return err; + } + + kern_tb_id = l3mdev_fib_table(port_dev); + re_key.iface.type = PRESTERA_IF_PORT_E; + re_key.iface.dev_port.hw_dev_num = port->dev_id; + re_key.iface.dev_port.port_num = port->hw_id; + re = prestera_rif_entry_find(port->sw, &re_key); + + switch (event) { + case NETDEV_UP: + if (re) { + NL_SET_ERR_MSG_MOD(extack, "rif_entry already exist"); + return -EEXIST; + } + re = prestera_rif_entry_create(port->sw, &re_key, + prestera_fix_tb_id(kern_tb_id), + port_dev->dev_addr); + if (!re) { + NL_SET_ERR_MSG_MOD(extack, "Can't create rif_entry"); + return -EINVAL; + } + dev_hold(port_dev); + break; + case NETDEV_DOWN: + if (!re) { + NL_SET_ERR_MSG_MOD(extack, "rif_entry not exist"); + return -EEXIST; + } + prestera_rif_entry_destroy(port->sw, re); + dev_put(port_dev); + break; + } + + return 0; +} + +static int __prestera_inetaddr_event(struct prestera_switch *sw, + struct net_device *dev, + unsigned long event, + struct netlink_ext_ack *extack) +{ + if (prestera_netdev_check(dev) && !netif_is_bridge_port(dev) && + !netif_is_lag_port(dev) && !netif_is_ovs_port(dev)) + return __prestera_inetaddr_port_event(dev, event, extack); + + return 0; +} + +static int __prestera_inetaddr_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct prestera_router *router = container_of(nb, + struct prestera_router, + inetaddr_nb); + struct in_device *idev; + int err = 0; + + if (event != NETDEV_DOWN) + goto out; + + /* Ignore if this is not latest address */ + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + goto out; + + err = __prestera_inetaddr_event(router->sw, dev, event, NULL); +out: + return notifier_from_errno(err); +} + +static int __prestera_inetaddr_valid_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct in_validator_info *ivi = (struct in_validator_info *)ptr; + struct net_device *dev = ivi->ivi_dev->dev; + struct prestera_router *router = container_of(nb, + struct prestera_router, + inetaddr_valid_nb); + struct in_device *idev; + int err = 0; + + if (event != NETDEV_UP) + goto out; + + /* Ignore if this is not first address */ + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + goto out; + + if (ipv4_is_multicast(ivi->ivi_addr)) { + err = -EINVAL; + goto out; + } + + err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack); +out: + return notifier_from_errno(err); +} + +int prestera_router_init(struct prestera_switch *sw) +{ + struct prestera_router *router; + int err; + + router = kzalloc(sizeof(*sw->router), GFP_KERNEL); + if (!router) + return -ENOMEM; + + sw->router = router; + router->sw = sw; + + err = prestera_router_hw_init(sw); + if (err) + goto err_router_lib_init; + + router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb; + err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb); + if (err) + goto err_register_inetaddr_validator_notifier; + + router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + return 0; + +err_register_inetaddr_notifier: + unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb); +err_register_inetaddr_validator_notifier: + /* prestera_router_hw_fini */ +err_router_lib_init: + kfree(sw->router); + return err; +} + +void prestera_router_fini(struct prestera_switch *sw) +{ + unregister_inetaddr_notifier(&sw->router->inetaddr_nb); + unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb); + /* router_hw_fini */ + kfree(sw->router); + sw->router = NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c new file mode 100644 index 000000000000..5866a4be50f5 --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */ + +#include <linux/rhashtable.h> + +#include "prestera.h" +#include "prestera_hw.h" +#include "prestera_router_hw.h" +#include "prestera_acl.h" + +/* +--+ + * +------->|vr| + * | +--+ + * | + * +-+-------+ + * |rif_entry| + * +---------+ + * Rif is + * used as + * entry point + * for vr in hw + */ + +int prestera_router_hw_init(struct prestera_switch *sw) +{ + INIT_LIST_HEAD(&sw->router->vr_list); + INIT_LIST_HEAD(&sw->router->rif_entry_list); + + return 0; +} + +static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw, + u32 tb_id) +{ + struct prestera_vr *vr; + + list_for_each_entry(vr, &sw->router->vr_list, router_node) { + if (vr->tb_id == tb_id) + return vr; + } + + return NULL; +} + +static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw, + u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct prestera_vr *vr; + u16 hw_vr_id; + int err; + + err = prestera_hw_vr_create(sw, &hw_vr_id); + if (err) + return ERR_PTR(-ENOMEM); + + vr = kzalloc(sizeof(*vr), GFP_KERNEL); + if (!vr) { + err = -ENOMEM; + goto err_alloc_vr; + } + + vr->tb_id = tb_id; + vr->hw_vr_id = hw_vr_id; + + list_add(&vr->router_node, &sw->router->vr_list); + + return vr; + +err_alloc_vr: + prestera_hw_vr_delete(sw, hw_vr_id); + kfree(vr); + return ERR_PTR(err); +} + +static void __prestera_vr_destroy(struct prestera_switch *sw, + struct prestera_vr *vr) +{ + prestera_hw_vr_delete(sw, vr->hw_vr_id); + list_del(&vr->router_node); + kfree(vr); +} + +static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct prestera_vr *vr; + + vr = __prestera_vr_find(sw, tb_id); + if (!vr) + vr = __prestera_vr_create(sw, tb_id, extack); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + return vr; +} + +static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr) +{ + if (!vr->ref_cnt) + __prestera_vr_destroy(sw, vr); +} + +/* iface is overhead struct. vr_id also can be removed. */ +static int +__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in, + struct prestera_rif_entry_key *out) +{ + memset(out, 0, sizeof(*out)); + + switch (in->iface.type) { + case PRESTERA_IF_PORT_E: + out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num; + out->iface.dev_port.port_num = in->iface.dev_port.port_num; + break; + case PRESTERA_IF_LAG_E: + out->iface.lag_id = in->iface.lag_id; + break; + case PRESTERA_IF_VID_E: + out->iface.vlan_id = in->iface.vlan_id; + break; + default: + pr_err("Unsupported iface type"); + return -EINVAL; + } + + out->iface.type = in->iface.type; + return 0; +} + +struct prestera_rif_entry * +prestera_rif_entry_find(const struct prestera_switch *sw, + const struct prestera_rif_entry_key *k) +{ + struct prestera_rif_entry *rif_entry; + struct prestera_rif_entry_key lk; /* lookup key */ + + if (__prestera_rif_entry_key_copy(k, &lk)) + return NULL; + + list_for_each_entry(rif_entry, &sw->router->rif_entry_list, + router_node) { + if (!memcmp(k, &rif_entry->key, sizeof(*k))) + return rif_entry; + } + + return NULL; +} + +void prestera_rif_entry_destroy(struct prestera_switch *sw, + struct prestera_rif_entry *e) +{ + struct prestera_iface iface; + + list_del(&e->router_node); + + memcpy(&iface, &e->key.iface, sizeof(iface)); + iface.vr_id = e->vr->hw_vr_id; + prestera_hw_rif_delete(sw, e->hw_id, &iface); + + e->vr->ref_cnt--; + prestera_vr_put(sw, e->vr); + kfree(e); +} + +struct prestera_rif_entry * +prestera_rif_entry_create(struct prestera_switch *sw, + struct prestera_rif_entry_key *k, + u32 tb_id, const unsigned char *addr) +{ + int err; + struct prestera_rif_entry *e; + struct prestera_iface iface; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + goto err_kzalloc; + + if (__prestera_rif_entry_key_copy(k, &e->key)) + goto err_key_copy; + + e->vr = prestera_vr_get(sw, tb_id, NULL); + if (IS_ERR(e->vr)) + goto err_vr_get; + + e->vr->ref_cnt++; + memcpy(&e->addr, addr, sizeof(e->addr)); + + /* HW */ + memcpy(&iface, &e->key.iface, sizeof(iface)); + iface.vr_id = e->vr->hw_vr_id; + err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id); + if (err) + goto err_hw_create; + + list_add(&e->router_node, &sw->router->rif_entry_list); + + return e; + +err_hw_create: + e->vr->ref_cnt--; + prestera_vr_put(sw, e->vr); +err_vr_get: +err_key_copy: + kfree(e); +err_kzalloc: + return NULL; +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h new file mode 100644 index 000000000000..fed53595f7bb --- /dev/null +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */ + +#ifndef _PRESTERA_ROUTER_HW_H_ +#define _PRESTERA_ROUTER_HW_H_ + +struct prestera_vr { + struct list_head router_node; + unsigned int ref_cnt; + u32 tb_id; /* key (kernel fib table id) */ + u16 hw_vr_id; /* virtual router ID */ + u8 __pad[2]; +}; + +struct prestera_rif_entry { + struct prestera_rif_entry_key { + struct prestera_iface iface; + } key; + struct prestera_vr *vr; + unsigned char addr[ETH_ALEN]; + u16 hw_id; /* rif_id */ + struct list_head router_node; /* ht */ +}; + +struct prestera_rif_entry * +prestera_rif_entry_find(const struct prestera_switch *sw, + const struct prestera_rif_entry_key *k); +void prestera_rif_entry_destroy(struct prestera_switch *sw, + struct prestera_rif_entry *e); +struct prestera_rif_entry * +prestera_rif_entry_create(struct prestera_switch *sw, + struct prestera_rif_entry_key *k, + u32 tb_id, const unsigned char *addr); +int prestera_router_hw_init(struct prestera_switch *sw); + +#endif /* _PRESTERA_ROUTER_HW_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c index 3cafca827bb7..845e9d8c8cc7 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_span.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c @@ -7,6 +7,7 @@ #include "prestera.h" #include "prestera_hw.h" #include "prestera_acl.h" +#include "prestera_flow.h" #include "prestera_span.h" struct prestera_span_entry { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 1d607bc6b59e..52bef50f5a0d 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1388,7 +1388,6 @@ static int pxa168_eth_probe(struct platform_device *pdev) { struct pxa168_eth_private *pep = NULL; struct net_device *dev = NULL; - struct resource *res; struct clk *clk; struct device_node *np; int err; @@ -1419,9 +1418,11 @@ static int pxa168_eth_probe(struct platform_device *pdev) goto err_netdev; } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - BUG_ON(!res); - dev->irq = res->start; + err = platform_get_irq(pdev, 0); + if (err == -EPROBE_DEFER) + goto err_netdev; + BUG_ON(dev->irq < 0); + dev->irq = err; dev->netdev_ops = &pxa168_eth_netdev_ops; dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 0c864e5bf0a6..cf03c67fbf40 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -492,7 +492,9 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) } static void skge_get_ring_param(struct net_device *dev, - struct ethtool_ringparam *p) + struct ethtool_ringparam *p, + struct kernel_ethtool_ringparam *kernel_p, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); @@ -504,7 +506,9 @@ static void skge_get_ring_param(struct net_device *dev, } static int skge_set_ring_param(struct net_device *dev, - struct ethtool_ringparam *p) + struct ethtool_ringparam *p, + struct kernel_ethtool_ringparam *kernel_p, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); int err = 0; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 28b5b9341145..ea16b1dd6a98 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4149,7 +4149,9 @@ static unsigned long roundup_ring_size(unsigned long pending) } static void sky2_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); @@ -4161,7 +4163,9 @@ static void sky2_get_ringparam(struct net_device *dev, } static int sky2_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ering) + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); @@ -4266,96 +4270,36 @@ static int sky2_get_eeprom_len(struct net_device *dev) return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } -static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) -{ - unsigned long start = jiffies; - - while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { - /* Can take up to 10.6 ms for write */ - if (time_after(jiffies, start + HZ/4)) { - dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); - return -ETIMEDOUT; - } - msleep(1); - } - - return 0; -} - -static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, - u16 offset, size_t length) -{ - int rc = 0; - - while (length > 0) { - u32 val; - - sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); - rc = sky2_vpd_wait(hw, cap, 0); - if (rc) - break; - - val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); - - memcpy(data, &val, min(sizeof(val), length)); - offset += sizeof(u32); - data += sizeof(u32); - length -= sizeof(u32); - } - - return rc; -} - -static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, - u16 offset, unsigned int length) -{ - unsigned int i; - int rc = 0; - - for (i = 0; i < length; i += sizeof(u32)) { - u32 val = *(u32 *)(data + i); - - sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); - sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); - - rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); - if (rc) - break; - } - return rc; -} - static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); - int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); - - if (!cap) - return -EINVAL; + int rc; eeprom->magic = SKY2_EEPROM_MAGIC; + rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, + data); + if (rc < 0) + return rc; + + eeprom->len = rc; - return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); + return 0; } static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct sky2_port *sky2 = netdev_priv(dev); - int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); - - if (!cap) - return -EINVAL; + int rc; if (eeprom->magic != SKY2_EEPROM_MAGIC) return -EINVAL; - /* Partial writes not supported */ - if ((eeprom->offset & 3) || (eeprom->len & 3)) - return -EINVAL; + rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, + data); - return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); + return rc < 0 ? rc : 0; } static netdev_features_t sky2_fix_features(struct net_device *dev, |