diff options
author | David S. Miller <davem@davemloft.net> | 2020-02-27 07:30:43 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-02-27 07:30:43 +0300 |
commit | 165b94ffcf8ef8165654df70f9f1c534797217cb (patch) | |
tree | a9cfc83d0cdf7e7128a7d466900d98357e5369d9 /drivers/net | |
parent | 06baf4be207a0f1e72474ea4dfc7221259df7cf4 (diff) | |
parent | 586ee9e8a3b00757836787d91b4c369bc36d7928 (diff) | |
download | linux-165b94ffcf8ef8165654df70f9f1c534797217cb.tar.xz |
Merge tag 'mlx5-updates-2020-02-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
mlx5-updates-2020-02-25
The following series provides some misc updates to mlx5 driver:
1) From Maxim, Refactoring for mlx5e netdev channels recreation flow.
- Add error handling
- Add context to the preactivate hook
- Use preactivate hook with context where it can be used
and subsequently unify channel recreation flow everywhere.
- Fix XPS cpumask to not reset upon channel recreation.
2) From Tariq:
- Use indirect calls wrapper on RX.
- Check LRO capability bit
3) Multiple small cleanups
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en.h | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 55 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 195 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c | 2 |
12 files changed, 215 insertions, 127 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 94d7b69a95c7..c9c9b479bda5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -188,7 +188,7 @@ static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer) MLX5_SET(create_mkey_in, in, translations_octword_actual_size, DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); - mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++) mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h index 3b7573461a45..148270073e71 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h @@ -2,7 +2,7 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #ifndef __MLX5_RSC_DUMP_H -#define __MLX5_RSC_DUMP__H +#define __MLX5_RSC_DUMP_H #include <linux/mlx5/driver.h> #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 220ef9f06f84..3cc439ab3253 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -737,7 +737,6 @@ struct mlx5e_channel { DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); int ix; int cpu; - cpumask_var_t xps_cpumask; }; struct mlx5e_channels { @@ -813,6 +812,15 @@ struct mlx5e_xsk { bool ever_used; }; +/* Temporary storage for variables that are allocated when struct mlx5e_priv is + * initialized, and used where we can't allocate them because that functions + * must not fail. Use with care and make sure the same variable is not used + * simultaneously by multiple users. + */ +struct mlx5e_scratchpad { + cpumask_var_t cpumask; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; @@ -876,6 +884,7 @@ struct mlx5e_priv { #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) struct mlx5e_hv_vhca_stats_agent stats_agent; #endif + struct mlx5e_scratchpad scratchpad; }; struct mlx5e_profile { @@ -1035,14 +1044,22 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs); void mlx5e_close_channels(struct mlx5e_channels *chs); -/* Function pointer to be used to modify WH settings while +/* Function pointer to be used to modify HW or kernel settings while * switching channels */ -typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); +typedef int (*mlx5e_fp_preactivate)(struct mlx5e_priv *priv, void *context); +#define MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(fn) \ +int fn##_ctx(struct mlx5e_priv *priv, void *context) \ +{ \ + return fn(priv); \ +} int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv); int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify); + mlx5e_fp_preactivate preactivate, + void *context); +int mlx5e_num_channels_changed(struct mlx5e_priv *priv); +int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); @@ -1122,10 +1139,10 @@ void mlx5e_update_ndo_stats(struct mlx5e_priv *priv); void mlx5e_queue_update_stats(struct mlx5e_priv *priv); int mlx5e_bits_invert(unsigned long a, int size); -typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); +int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, - change_hw_mtu_cb set_mtu_cb); + mlx5e_fp_preactivate preactivate); /* ethtool helpers */ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 01f2918063af..47874d34156b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1098,49 +1098,59 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) mlx5e_dcbnl_dscp_app(priv, DELETE); } -static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, - struct mlx5e_params *params) +static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + u8 trust_state) { - mlx5_query_min_inline(priv->mdev, ¶ms->tx_min_inline_mode); - if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); + if (trust_state == MLX5_QPTS_TRUST_DSCP && params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; } -static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) +static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context) +{ + u8 *trust_state = context; + int err; + + err = mlx5_set_trust_state(priv->mdev, *trust_state); + if (err) + return err; + priv->dcbx_dp.trust_state = *trust_state; + + return 0; +} + +static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) { struct mlx5e_channels new_channels = {}; + bool reset_channels = true; + int err = 0; mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; - mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params, + trust_state); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; - goto out; + reset_channels = false; } /* Skip if tx_min_inline is the same */ if (new_channels.params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) - goto out; + reset_channels = false; - mlx5e_safe_switch_channels(priv, &new_channels, NULL); + if (reset_channels) + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_update_trust_state_hw, + &trust_state); + else + err = mlx5e_update_trust_state_hw(priv, &trust_state); -out: mutex_unlock(&priv->state_lock); -} - -static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) -{ - int err; - - err = mlx5_set_trust_state(priv->mdev, trust_state); - if (err) - return err; - priv->dcbx_dp.trust_state = trust_state; - mlx5e_trust_update_sq_inline_mode(priv); return err; } @@ -1171,7 +1181,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) if (err) return err; - mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params); + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, + priv->dcbx_dp.trust_state); err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 68b520df07e4..06f6f08ff5eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -357,7 +357,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, goto unlock; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); unlock: mutex_unlock(&priv->state_lock); @@ -432,9 +432,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { *cur_params = new_channels.params; - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); + mlx5e_num_channels_changed(priv); goto out; } @@ -442,12 +440,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, if (arfs_enabled) mlx5e_arfs_disable(priv); - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); - /* Switch to new channels, set new parameters and close old ones */ - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_num_channels_changed_ctx, NULL); if (arfs_enabled) { int err2 = mlx5e_arfs_enable(priv); @@ -580,7 +575,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, goto out; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); out: mutex_unlock(&priv->state_lock); @@ -1748,7 +1743,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, return 0; } - return mlx5e_safe_switch_channels(priv, &new_channels, NULL); + return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) @@ -1781,7 +1776,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val return 0; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); if (err) return err; @@ -1838,7 +1833,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return 0; } - return mlx5e_safe_switch_channels(priv, &new_channels, NULL); + return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) @@ -1882,7 +1877,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) return 0; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 966983674663..d29e53c023f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1794,29 +1794,6 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c, - struct mlx5e_params *params) -{ - int num_comp_vectors = mlx5_comp_vectors_count(c->mdev); - int irq; - - if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL)) - return -ENOMEM; - - for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) { - int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq)); - - cpumask_set_cpu(cpu, c->xps_cpumask); - } - - return 0; -} - -static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c) -{ - free_cpumask_var(c->xps_cpumask); -} - static int mlx5e_open_queues(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1967,10 +1944,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->irq_desc = irq_to_desc(irq); c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); - err = mlx5e_alloc_xps_cpumask(c, params); - if (err) - goto err_free_channel; - netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); err = mlx5e_open_queues(c, params, cparam); @@ -1993,9 +1966,7 @@ err_close_queues: err_napi_del: netif_napi_del(&c->napi); - mlx5e_free_xps_cpumask(c); -err_free_channel: kvfree(c); return err; @@ -2009,7 +1980,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_icosq(&c->icosq); mlx5e_activate_rq(&c->rq); - netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix); if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_activate_xsk(c); @@ -2034,7 +2004,6 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_xsk(c); mlx5e_close_queues(c); netif_napi_del(&c->napi); - mlx5e_free_xps_cpumask(c); kvfree(c); } @@ -2784,6 +2753,8 @@ free_in: return err; } +static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); + static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 mtu) { @@ -2833,6 +2804,8 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) return 0; } +MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu); + void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) { struct mlx5e_params *params = &priv->channels.params; @@ -2869,6 +2842,54 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } +static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count) +{ + int num_txqs = count * priv->channels.params.num_tc; + int num_rxqs = count * priv->profile->rq_groups; + struct net_device *netdev = priv->netdev; + + mlx5e_netdev_set_tcs(netdev); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, num_rxqs); +} + +static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, + struct mlx5e_params *params) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int num_comp_vectors, ix, irq; + + num_comp_vectors = mlx5_comp_vectors_count(mdev); + + for (ix = 0; ix < params->num_channels; ix++) { + cpumask_clear(priv->scratchpad.cpumask); + + for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq)); + + cpumask_set_cpu(cpu, priv->scratchpad.cpumask); + } + + netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); + } +} + +int mlx5e_num_channels_changed(struct mlx5e_priv *priv) +{ + u16 count = priv->channels.params.num_channels; + + mlx5e_update_netdev_queues(priv, count); + mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); + + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); + + return 0; +} + +MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); + static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) { int i, ch; @@ -2890,14 +2911,6 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { - int num_txqs = priv->channels.num * priv->channels.params.num_tc; - int num_rxqs = priv->channels.num * priv->profile->rq_groups; - struct net_device *netdev = priv->netdev; - - mlx5e_netdev_set_tcs(netdev); - netif_set_real_num_tx_queues(netdev, num_txqs); - netif_set_real_num_rx_queues(netdev, num_rxqs); - mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); mlx5e_xdp_tx_enable(priv); @@ -2930,42 +2943,52 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) mlx5e_deactivate_channels(&priv->channels); } -static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify) +static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_preactivate preactivate, + void *context) { struct net_device *netdev = priv->netdev; - int new_num_txqs; + struct mlx5e_channels old_chs; int carrier_ok; - - new_num_txqs = new_chs->num * new_chs->params.num_tc; + int err = 0; carrier_ok = netif_carrier_ok(netdev); netif_carrier_off(netdev); - if (new_num_txqs < netdev->real_num_tx_queues) - netif_set_real_num_tx_queues(netdev, new_num_txqs); - mlx5e_deactivate_priv_channels(priv); - mlx5e_close_channels(&priv->channels); + old_chs = priv->channels; priv->channels = *new_chs; - /* New channels are ready to roll, modify HW settings if needed */ - if (hw_modify) - hw_modify(priv); + /* New channels are ready to roll, call the preactivate hook if needed + * to modify HW settings or update kernel parameters. + */ + if (preactivate) { + err = preactivate(priv, context); + if (err) { + priv->channels = old_chs; + goto out; + } + } + mlx5e_close_channels(&old_chs); priv->profile->update_rx(priv); + +out: mlx5e_activate_priv_channels(priv); /* return carrier back if needed */ if (carrier_ok) netif_carrier_on(netdev); + + return err; } int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify) + mlx5e_fp_preactivate preactivate, + void *context) { int err; @@ -2973,8 +2996,16 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, if (err) return err; - mlx5e_switch_priv_channels(priv, new_chs, hw_modify); + err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context); + if (err) + goto err_close; + return 0; + +err_close: + mlx5e_close_channels(new_chs); + + return err; } int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) @@ -2982,7 +3013,7 @@ int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) struct mlx5e_channels new_channels = {}; new_channels.params = priv->channels.params; - return mlx5e_safe_switch_channels(priv, &new_channels, NULL); + return mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); } void mlx5e_timestamp_init(struct mlx5e_priv *priv) @@ -3431,7 +3462,8 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, goto out; } - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_num_channels_changed_ctx, NULL); if (err) goto out; @@ -3644,7 +3676,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro); + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_modify_tirs_lro_ctx, NULL); out: mutex_unlock(&priv->state_lock); return err; @@ -3863,7 +3896,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, } int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, - change_hw_mtu_cb set_mtu_cb) + mlx5e_fp_preactivate preactivate) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; @@ -3912,13 +3945,13 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, if (!reset) { params->sw_mtu = new_mtu; - if (set_mtu_cb) - set_mtu_cb(priv); + if (preactivate) + preactivate(priv, NULL); netdev->mtu = params->sw_mtu; goto out; } - err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb); + err = mlx5e_safe_switch_channels(priv, &new_channels, preactivate, NULL); if (err) goto out; @@ -3931,7 +3964,7 @@ out: static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) { - return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); } int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) @@ -4392,7 +4425,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) mlx5e_set_rq_type(priv->mdev, &new_channels.params); old_prog = priv->channels.params.xdp_prog; - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); if (err) goto unlock; } else { @@ -4770,9 +4803,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, mlx5e_build_rq_params(mdev, params); /* HW LRO */ - - /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */ - if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + if (MLX5_CAP_ETH(mdev, lro_cap) && + params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { /* No XSK params: checking the availability of striding RQ in general. */ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) params->lro_en = !slow_pci_heuristic(mdev); @@ -5214,6 +5246,9 @@ int mlx5e_netdev_init(struct net_device *netdev, priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); priv->max_opened_tc = 1; + if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) + return -ENOMEM; + mutex_init(&priv->state_lock); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); @@ -5222,7 +5257,7 @@ int mlx5e_netdev_init(struct net_device *netdev, priv->wq = create_singlethread_workqueue("mlx5e"); if (!priv->wq) - return -ENOMEM; + goto err_free_cpumask; /* netdev init */ netif_carrier_off(netdev); @@ -5232,11 +5267,17 @@ int mlx5e_netdev_init(struct net_device *netdev, #endif return 0; + +err_free_cpumask: + free_cpumask_var(priv->scratchpad.cpumask); + + return -ENOMEM; } void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv) { destroy_workqueue(priv->wq); + free_cpumask_var(priv->scratchpad.cpumask); } struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, @@ -5271,6 +5312,7 @@ err_free_netdev: int mlx5e_attach_netdev(struct mlx5e_priv *priv) { + const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED; const struct mlx5e_profile *profile; int max_nch; int err; @@ -5282,10 +5324,25 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) max_nch = mlx5e_get_max_num_channels(priv->mdev); if (priv->channels.params.num_channels > max_nch) { mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); + /* Reducing the number of channels - RXFH has to be reset, and + * mlx5e_num_channels_changed below will build the RQT. + */ + priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; priv->channels.params.num_channels = max_nch; - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, max_nch); } + /* 1. Set the real number of queues in the kernel the first time. + * 2. Set our default XPS cpumask. + * 3. Build the RQT. + * + * rtnl_lock is required by netif_set_real_num_*_queues in case the + * netdev has been registered by this point (if this function was called + * in the reload or resume flow). + */ + if (take_rtnl) + rtnl_lock(); + mlx5e_num_channels_changed(priv); + if (take_rtnl) + rtnl_unlock(); err = profile->init_tx(priv); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7b48ccacebe2..6be85a6b11d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1396,7 +1396,7 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu) static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) { - return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx); } static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1c3ab69cbd96..065c74a2d0c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -158,7 +158,8 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, mlx5e_read_mini_arr_slot(wq, cqd, cqcc); mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); - rq->handle_rx_cqe(rq, &cqd->title); + INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe, rq, &cqd->title); } mlx5e_cqes_update_owner(wq, cqcc - wq->cc); wq->cc = cqcc; @@ -178,7 +179,8 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, mlx5e_read_title_slot(rq, wq, cc); mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); mlx5e_decompress_cqe(rq, wq, cc); - rq->handle_rx_cqe(rq, &cqd->title); + INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe, rq, &cqd->title); cqd->mini_arr_idx++; return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index ee60383adc5b..fd6b2a1898c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -72,8 +72,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, { int txq_ix = netdev_pick_tx(dev, skb, NULL); struct mlx5e_priv *priv = netdev_priv(dev); - u16 num_channels; int up = 0; + int ch_ix; if (!netdev_get_num_tc(dev)) return txq_ix; @@ -86,14 +86,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, if (skb_vlan_tag_present(skb)) up = skb_vlan_tag_get_prio(skb); - /* txq_ix can be larger than num_channels since - * dev->num_real_tx_queues = num_channels * num_tc + /* Normalize any picked txq_ix to [0, num_channels), + * So we can return a txq_ix that matches the channel and + * packet UP. */ - num_channels = priv->channels.params.num_channels; - if (txq_ix >= num_channels) - txq_ix = priv->txq2sq[txq_ix]->ch_ix; + ch_ix = priv->txq2sq[txq_ix]->ch_ix; - return priv->channel_tc2realtxq[txq_ix][up]; + return priv->channel_tc2realtxq[ch_ix][up]; } static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 257a7c9f7a14..267f4535c36b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -31,6 +31,7 @@ */ #include <linux/irq.h> +#include <linux/indirect_call_wrapper.h> #include "en.h" #include "en/xdp.h" #include "en/xsk/rx.h" @@ -99,7 +100,10 @@ static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskr busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); mlx5e_xsk_update_tx_wakeup(xsksq); - xsk_rx_alloc_err = xskrq->post_wqes(xskrq); + xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes, + mlx5e_post_rx_mpwqes, + mlx5e_post_rx_wqes, + xskrq); busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err); return busy_xsk; @@ -142,7 +146,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) mlx5e_poll_ico_cq(&c->icosq.cq); - busy |= rq->post_wqes(rq); + busy |= INDIRECT_CALL_2(rq->post_wqes, + mlx5e_post_rx_mpwqes, + mlx5e_post_rx_wqes, + rq); if (xsk_open) { mlx5e_poll_ico_cq(&c->xskicosq.cq); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 56078b23f1a0..673aaa815f57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -483,7 +483,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; - err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c index e065c2f68f5a..6cbccba56f70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c @@ -21,7 +21,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev) struct mlx5_dm *dm; if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)) - return 0; + return NULL; dm = kzalloc(sizeof(*dm), GFP_KERNEL); if (!dm) |