summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/arc/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c45
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c52
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c10
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c5
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c107
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
45 files changed, 470 insertions, 262 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d796684ec9ca..412ae3e43ffb 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -100,6 +100,7 @@ config JME
config KORINA
tristate "Korina (IDT RC32434) Ethernet support"
depends on MIKROTIK_RB532 || COMPILE_TEST
+ select CRC32
select MII
help
If you have a Mikrotik RouterBoard 500 or IDT RC32434
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 37a41773dd43..92a79c4ffa2c 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -21,6 +21,7 @@ config ARC_EMAC_CORE
depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST
select MII
select PHYLIB
+ select CRC32
config ARC_EMAC
tristate "ARC EMAC support"
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 1d3188e8e3b3..92dc18a4bcc4 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -780,7 +780,7 @@ struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
gve_num_tx_qpls(priv));
/* we are out of rx qpls */
- if (id == priv->qpl_cfg.qpl_map_size)
+ if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 099a2bc5ae67..bf8a4a7c43f7 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -41,6 +41,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
{
struct gve_priv *priv = netdev_priv(dev);
unsigned int start;
+ u64 packets, bytes;
int ring;
if (priv->rx) {
@@ -48,10 +49,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
do {
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
- s->rx_packets += priv->rx[ring].rpackets;
- s->rx_bytes += priv->rx[ring].rbytes;
+ packets = priv->rx[ring].rpackets;
+ bytes = priv->rx[ring].rbytes;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
+ s->rx_packets += packets;
+ s->rx_bytes += bytes;
}
}
if (priv->tx) {
@@ -59,10 +62,12 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
- s->tx_packets += priv->tx[ring].pkt_done;
- s->tx_bytes += priv->tx[ring].bytes_done;
+ packets = priv->tx[ring].pkt_done;
+ bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
+ s->tx_packets += packets;
+ s->tx_bytes += bytes;
}
}
}
@@ -82,6 +87,9 @@ static int gve_alloc_counter_array(struct gve_priv *priv)
static void gve_free_counter_array(struct gve_priv *priv)
{
+ if (!priv->counter_array)
+ return;
+
dma_free_coherent(&priv->pdev->dev,
priv->num_event_counters *
sizeof(*priv->counter_array),
@@ -142,6 +150,9 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
static void gve_free_stats_report(struct gve_priv *priv)
{
+ if (!priv->stats_report)
+ return;
+
del_timer_sync(&priv->stats_report_timer);
dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
priv->stats_report, priv->stats_report_bus);
@@ -370,18 +381,19 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- if (priv->msix_vectors) {
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (!priv->msix_vectors)
+ return;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
- }
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
+
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
}
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
@@ -1185,9 +1197,10 @@ static void gve_handle_reset(struct gve_priv *priv)
void gve_handle_report_stats(struct gve_priv *priv)
{
- int idx, stats_idx = 0, tx_bytes;
- unsigned int start = 0;
struct stats *stats = priv->stats_report->stats;
+ int idx, stats_idx = 0;
+ unsigned int start = 0;
+ u64 tx_bytes;
if (!gve_get_report_stats(priv))
return;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index bb8261368250..94941d4e4744 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -104,8 +104,14 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
if (!rx->data.page_info)
return -ENOMEM;
- if (!rx->data.raw_addressing)
+ if (!rx->data.raw_addressing) {
rx->data.qpl = gve_assign_rx_qpl(priv);
+ if (!rx->data.qpl) {
+ kvfree(rx->data.page_info);
+ rx->data.page_info = NULL;
+ return -ENOMEM;
+ }
+ }
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2f20980dd9a5..e04b540cedc8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4871,7 +4871,8 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
- i40e_free_misc_vector(pf);
+ if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
+ i40e_free_misc_vector(pf);
i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
I40E_IWARP_IRQ_PILE_ID);
@@ -10113,7 +10114,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
/* retry with a larger buffer */
buf_len = data_size;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
"capability discovery failed, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 23762a7ef740..cada4e0e40b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1965,7 +1965,6 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 05cc5870e4ef..80380aed8882 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1313,22 +1313,21 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
{
u8 idx;
- spin_lock(&tx->lock);
-
for (idx = 0; idx < tx->len; idx++) {
u8 phy_idx = idx + tx->quad_offset;
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
-
+ spin_lock(&tx->lock);
if (tx->tstamps[idx].skb) {
dev_kfree_skb_any(tx->tstamps[idx].skb);
tx->tstamps[idx].skb = NULL;
}
- }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
- spin_unlock(&tx->lock);
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index cf97985628ab..02e77ffe5c3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -155,6 +155,8 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {};
int err;
+ mlx5_debug_cq_remove(dev, cq);
+
mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
mlx5_eq_del_cq(&cq->eq->core, cq);
@@ -162,16 +164,13 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
- if (err)
- return err;
synchronize_irq(cq->irqn);
- mlx5_debug_cq_remove(dev, cq);
mlx5_cq_put(cq);
wait_for_completion(&cq->free);
- return 0;
+ return err;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 7b8c8187543a..03a7a4ce5cd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -252,6 +252,7 @@ struct mlx5e_params {
struct {
u16 mode;
u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
@@ -845,6 +846,7 @@ struct mlx5e_priv {
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
bool tx_ptp_opened;
@@ -1100,12 +1102,6 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
-static inline unsigned int
-mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
-{
- return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
-}
-
static inline bool
mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
{
@@ -1114,11 +1110,13 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
}
int mlx5e_priv_init(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+ unsigned int txqs, unsigned int rxqs);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index ac44bbe95c5c..d290d7276b8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -35,7 +35,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
{
int ch, i = 0;
- for (ch = 0; ch < priv->max_nch; ch++) {
+ for (ch = 0; ch < priv->stats_nch; ch++) {
void *buf = data + i;
if (WARN_ON_ONCE(buf +
@@ -51,7 +51,7 @@ static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, void *data,
static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
{
return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
- priv->max_nch);
+ priv->stats_nch);
}
static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
@@ -100,7 +100,7 @@ static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
sagent = &priv->stats_agent;
block->version = MLX5_HV_VHCA_STATS_VERSION;
- block->rings = priv->max_nch;
+ block->rings = priv->stats_nch;
if (!block->command) {
cancel_delayed_work_sync(&priv->stats_agent.work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index ee688dec67a9..3a86f66d1295 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -13,8 +13,6 @@ struct mlx5e_ptp_fs {
bool valid;
};
-#define MLX5E_PTP_CHANNEL_IX 0
-
struct mlx5e_ptp_params {
struct mlx5e_params params;
struct mlx5e_sq_param txq_sq_param;
@@ -509,6 +507,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->ptp_stats.rq;
+ rq->ix = MLX5E_PTP_CHANNEL_IX;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, false);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index c96668bd701c..a71a32e00ebb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -8,6 +8,8 @@
#include "en_stats.h"
#include <linux/ptp_classify.h>
+#define MLX5E_PTP_CHANNEL_IX 0
+
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
struct mlx5e_cq ts_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index b5ddaa82755f..c6d2f8c78db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -475,9 +475,6 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
goto err_alloc_wq;
}
- INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
- queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
- msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
err = register_switchdev_notifier(&br_offloads->nb);
@@ -500,6 +497,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
err);
goto err_register_netdev;
}
+ INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
return;
err_register_netdev:
@@ -523,10 +523,10 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
if (!br_offloads)
return;
+ cancel_delayed_work_sync(&br_offloads->update_work);
unregister_netdevice_notifier(&br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
- cancel_delayed_work(&br_offloads->update_work);
destroy_workqueue(br_offloads->wq);
rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 306fb5d6a36d..9d451b8ee467 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2036,6 +2036,17 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
}
new_params = priv->channels.params;
+ /* Don't allow enabling TX-port-TS if MQPRIO mode channel offload is
+ * active, since it defines explicitly which TC accepts the packet.
+ * This conflicts with TX-port-TS hijacking the PTP traffic to a specific
+ * HW TX-queue.
+ */
+ if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+ netdev_err(priv->netdev,
+ "%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",
+ __func__);
+ return -EINVAL;
+ }
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3fd515e7bf30..09c8b71b186c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2264,7 +2264,7 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
}
static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
- struct tc_mqprio_qopt_offload *mqprio)
+ struct netdev_tc_txq *tc_to_txq)
{
int tc, err;
@@ -2282,11 +2282,8 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
for (tc = 0; tc < ntc; tc++) {
u16 count, offset;
- /* For DCB mode, map netdev TCs to offset 0
- * We have our own UP to TXQ mapping for QoS
- */
- count = mqprio ? mqprio->qopt.count[tc] : nch;
- offset = mqprio ? mqprio->qopt.offset[tc] : 0;
+ count = tc_to_txq[tc].count;
+ offset = tc_to_txq[tc].offset;
netdev_set_tc_queue(netdev, tc, count, offset);
}
@@ -2315,19 +2312,24 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
+ struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
int num_rxqs, nch, ntc;
int err;
+ int i;
old_num_txqs = netdev->real_num_tx_queues;
old_ntc = netdev->num_tc ? : 1;
+ for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
+ old_tc_to_txq[i] = netdev->tc_to_txq[i];
nch = priv->channels.params.num_channels;
- ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ ntc = priv->channels.params.mqprio.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
+ tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
- err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL);
+ err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
if (err)
goto err_out;
err = mlx5e_update_tx_netdev_queues(priv);
@@ -2350,11 +2352,14 @@ err_txqs:
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
err_tcs:
- mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL);
+ WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
+ old_tc_to_txq));
err_out:
return err;
}
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
+
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
@@ -2861,6 +2866,58 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
return 0;
}
+static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+ int ntc, int nch)
+{
+ int tc;
+
+ memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
+
+ /* Map netdev TCs to offset 0.
+ * We have our own UP to TXQ mapping for DCB mode of QoS
+ */
+ for (tc = 0; tc < ntc; tc++) {
+ tc_to_txq[tc] = (struct netdev_tc_txq) {
+ .count = nch,
+ .offset = 0,
+ };
+ }
+}
+
+static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
+ struct tc_mqprio_qopt *qopt)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ tc_to_txq[tc] = (struct netdev_tc_txq) {
+ .count = qopt->count[tc],
+ .offset = qopt->offset[tc],
+ };
+ }
+}
+
+static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
+{
+ params->mqprio.mode = TC_MQPRIO_MODE_DCB;
+ params->mqprio.num_tc = num_tc;
+ mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
+ params->num_channels);
+}
+
+static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
+ struct tc_mqprio_qopt *qopt)
+{
+ params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
+ params->mqprio.num_tc = qopt->num_tc;
+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+}
+
+static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
+{
+ mlx5e_params_mqprio_dcb_set(params, 1);
+}
+
static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
struct tc_mqprio_qopt *mqprio)
{
@@ -2874,8 +2931,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
return -EINVAL;
new_params = priv->channels.params;
- new_params.mqprio.mode = TC_MQPRIO_MODE_DCB;
- new_params.mqprio.num_tc = tc ? tc : 1;
+ mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
@@ -2889,9 +2945,17 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
struct net_device *netdev = priv->netdev;
+ struct mlx5e_ptp *ptp_channel;
int agg_count = 0;
int i;
+ ptp_channel = priv->channels.ptp;
+ if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
+ netdev_err(netdev,
+ "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
+ return -EINVAL;
+ }
+
if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
return -EINVAL;
@@ -2917,8 +2981,8 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
agg_count += mqprio->qopt.count[i];
}
- if (priv->channels.params.num_channels < agg_count) {
- netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n",
+ if (priv->channels.params.num_channels != agg_count) {
+ netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
agg_count, priv->channels.params.num_channels);
return -EINVAL;
}
@@ -2926,25 +2990,12 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx)
-{
- struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx;
- struct net_device *netdev = priv->netdev;
- u8 num_tc;
-
- if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
- return -EINVAL;
-
- num_tc = priv->channels.params.mqprio.num_tc;
- mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio);
-
- return 0;
-}
-
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
+ mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
@@ -2952,12 +3003,12 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
return err;
new_params = priv->channels.params;
- new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
- new_params.mqprio.num_tc = mqprio->qopt.num_tc;
- err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
- return err;
+ nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
+ preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
+ mlx5e_update_netdev_queues_ctx;
+ return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3065,7 +3116,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
- for (i = 0; i < priv->max_nch; i++) {
+ for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -3274,20 +3325,67 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
+static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
+ bool supported, curr_state;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return 0;
+
+ err = mlx5_query_ports_check(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
+ curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
+
+ if (!supported || enable == curr_state)
+ return 0;
+
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
+
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5_core_dev *mdev = priv->mdev;
int err;
mutex_lock(&priv->state_lock);
- priv->channels.params.scatter_fcs_en = enable;
- err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
- if (err)
- priv->channels.params.scatter_fcs_en = !enable;
+ if (enable) {
+ err = mlx5e_set_rx_port_ts(mdev, false);
+ if (err)
+ goto out;
- mutex_unlock(&priv->state_lock);
+ chs->params.scatter_fcs_en = true;
+ err = mlx5e_modify_channels_scatter_fcs(chs, true);
+ if (err) {
+ chs->params.scatter_fcs_en = false;
+ mlx5e_set_rx_port_ts(mdev, true);
+ }
+ } else {
+ chs->params.scatter_fcs_en = false;
+ err = mlx5e_modify_channels_scatter_fcs(chs, false);
+ if (err) {
+ chs->params.scatter_fcs_en = true;
+ goto out;
+ }
+ err = mlx5e_set_rx_port_ts(mdev, true);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
+ err = 0;
+ }
+ }
+out:
+ mutex_unlock(&priv->state_lock);
return err;
}
@@ -4186,13 +4284,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
- priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
-
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
priv->max_nch);
- params->mqprio.num_tc = 1;
+ mlx5e_params_mqprio_reset(params);
/* Set an initial non-zero value, so that mlx5e_select_queue won't
* divide by zero if called before first activating channels.
@@ -4682,8 +4778,35 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_ptp_support = true,
};
+static unsigned int
+mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
+ const struct mlx5e_profile *profile)
+
+{
+ unsigned int max_nch, tmp;
+
+ /* core resources */
+ max_nch = mlx5e_get_max_num_channels(mdev);
+
+ /* netdev rx queues */
+ tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
+ max_nch = min_t(unsigned int, max_nch, tmp);
+
+ /* netdev tx queues */
+ tmp = netdev->num_tx_queues;
+ if (mlx5_qos_is_supported(mdev))
+ tmp -= mlx5e_qos_max_leaf_nodes(mdev);
+ if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ tmp -= profile->max_tc;
+ tmp = tmp / profile->max_tc;
+ max_nch = min_t(unsigned int, max_nch, tmp);
+
+ return max_nch;
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
@@ -4691,6 +4814,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
+ priv->stats_nch = priv->max_nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -4734,7 +4859,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
}
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
+ unsigned int txqs, unsigned int rxqs)
{
struct net_device *netdev;
int err;
@@ -4745,7 +4871,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int
return NULL;
}
- err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
+ err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
goto err_free_netdev;
@@ -4787,7 +4913,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
- max_nch = mlx5e_get_max_num_channels(priv->mdev);
+ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
/* Reducing the number of channels - RXFH has to be reset, and
@@ -4795,7 +4921,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
*/
priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
priv->channels.params.num_channels = max_nch;
+ if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
+ mlx5e_params_mqprio_reset(&priv->channels.params);
+ }
}
+ if (max_nch != priv->max_nch) {
+ mlx5_core_warn(priv->mdev,
+ "MLX5E: Updating max number of channels from %u to %u\n",
+ priv->max_nch, max_nch);
+ priv->max_nch = max_nch;
+ }
+
/* 1. Set the real number of queues in the kernel the first time.
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
@@ -4860,7 +4997,7 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- err = mlx5e_priv_init(priv, netdev, mdev);
+ err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
return err;
@@ -4886,20 +5023,12 @@ priv_cleanup:
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
const struct mlx5e_profile *new_profile, void *new_ppriv)
{
- unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
const struct mlx5e_profile *orig_profile = priv->profile;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
void *orig_ppriv = priv->ppriv;
int err, rollback_err;
- /* sanity */
- if (new_max_nch != priv->max_nch) {
- netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
- __func__);
- return -EINVAL;
- }
-
/* cleanup old profile */
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
@@ -4995,7 +5124,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
nch = mlx5e_get_max_num_channels(mdev);
txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
+ netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ae71a17fdb27..0684ac6699b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -596,7 +596,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -619,6 +618,11 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
+ /* Set an initial non-zero value, so that mlx5e_select_queue won't
+ * divide by zero if called before first activating channels.
+ */
+ priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
+
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
}
@@ -644,7 +648,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
- netdev->features |= NETIF_F_VLAN_CHALLENGED;
netdev->features |= NETIF_F_NETNS_LOCAL;
}
@@ -1169,7 +1172,7 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
nch = mlx5e_get_max_num_channels(dev);
txqs = nch * profile->max_tc;
rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(dev, txqs, rxqs);
+ netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3c65fd0bcf31..29a6586ef28d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1001,14 +1001,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
- u8 ipproto = get_ip_proto(skb, network_depth, proto);
-
- if (unlikely(ipproto == IPPROTO_SCTP))
+ if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
- if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
- goto csum_none;
-
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e4f5b6395148..e1dd17019030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -34,6 +34,7 @@
#include "en.h"
#include "en_accel/tls.h"
#include "en_accel/en_accel.h"
+#include "en/ptp.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -450,7 +451,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
memset(s, 0, sizeof(*s));
- for (i = 0; i < priv->max_nch; i++) {
+ for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
int j;
@@ -2076,7 +2077,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_rq_stats_desc[i].format);
+ ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
}
return idx;
}
@@ -2119,7 +2120,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
{
- int max_nch = priv->max_nch;
+ int max_nch = priv->stats_nch;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -2133,7 +2134,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
{
bool is_xsk = priv->xsk.ever_used;
- int max_nch = priv->max_nch;
+ int max_nch = priv->stats_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -2175,7 +2176,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
{
bool is_xsk = priv->xsk.ever_used;
- int max_nch = priv->max_nch;
+ int max_nch = priv->stats_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 0399a396d166..60a73990017c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -79,12 +79,16 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
int dest_num = 0;
int err = 0;
- if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
+ if (vport->egress.legacy.drop_counter) {
+ drop_counter = vport->egress.legacy.drop_counter;
+ } else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
drop_counter = mlx5_fc_create(esw->dev, false);
- if (IS_ERR(drop_counter))
+ if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
"vport[%d] configure egress drop rule counter err(%ld)\n",
vport->vport, PTR_ERR(drop_counter));
+ drop_counter = NULL;
+ }
vport->egress.legacy.drop_counter = drop_counter;
}
@@ -123,7 +127,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
/* Attach egress drop flow counter */
- if (!IS_ERR_OR_NULL(drop_counter)) {
+ if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
@@ -162,7 +166,7 @@ void esw_acl_egress_lgcy_cleanup(struct mlx5_eswitch *esw,
esw_acl_egress_table_destroy(vport);
clean_drop_counter:
- if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) {
+ if (vport->egress.legacy.drop_counter) {
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
vport->egress.legacy.drop_counter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index f75b86abaf1c..b1a5199260f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -160,7 +160,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
esw_acl_ingress_lgcy_rules_destroy(vport);
- if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
+ if (vport->ingress.legacy.drop_counter) {
+ counter = vport->ingress.legacy.drop_counter;
+ } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(counter)) {
esw_warn(esw->dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 67571e5040d6..269ebb53eda6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -113,7 +113,7 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
struct mlx5e_sw_stats s = { 0 };
int i, j;
- for (i = 0; i < priv->max_nch; i++) {
+ for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@@ -711,7 +711,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
goto destroy_ht;
}
- err = mlx5e_priv_init(epriv, netdev, mdev);
+ err = mlx5e_priv_init(epriv, prof, netdev, mdev);
if (err)
goto destroy_mdev_resources;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ffac8a0e7a23..91e806c1aa21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -448,22 +448,20 @@ static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
return cycles_now + cycles_delta;
}
-static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
- s64 sec, u32 nsec)
+static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
{
- struct timespec64 ts;
+ struct timespec64 ts = {};
s64 target_ns;
ts.tv_sec = sec;
- ts.tv_nsec = nsec;
target_ns = timespec64_to_ns(&ts);
return find_target_cycles(mdev, target_ns);
}
-static u64 perout_conf_real_time(s64 sec, u32 nsec)
+static u64 perout_conf_real_time(s64 sec)
{
- return (u64)nsec | (u64)sec << 32;
+ return (u64)sec << 32;
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,6 +472,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
+ bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
struct timespec64 ts;
u32 field_select = 0;
@@ -501,8 +500,10 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
- u32 nsec;
- s64 sec;
+ s64 sec = rq->perout.start.sec;
+
+ if (rq->perout.start.nsec)
+ return -EINVAL;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
@@ -513,14 +514,11 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if ((ns >> 1) != 500000000LL)
return -EINVAL;
- nsec = rq->perout.start.nsec;
- sec = rq->perout.start.sec;
-
if (rt_mode && sec > U32_MAX)
return -EINVAL;
- time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
- perout_conf_internal_timer(mdev, sec, nsec);
+ time_stamp = rt_mode ? perout_conf_real_time(sec) :
+ perout_conf_internal_timer(mdev, sec);
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
@@ -538,6 +536,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (err)
return err;
+ if (rt_mode)
+ return 0;
+
return mlx5_set_mtppse(mdev, pin, 0,
MLX5_EVENT_MODE_REPETETIVE & on);
}
@@ -705,20 +706,14 @@ static void ts_next_sec(struct timespec64 *ts)
static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
struct mlx5_clock *clock)
{
- bool rt_mode = mlx5_real_time_mode(mdev);
struct timespec64 ts;
s64 target_ns;
- if (rt_mode)
- ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
- else
- mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
-
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
ts_next_sec(&ts);
target_ns = timespec64_to_ns(&ts);
- return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
- find_target_cycles(mdev, target_ns);
+ return find_target_cycles(mdev, target_ns);
}
static int mlx5_pps_event(struct notifier_block *nb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index c79a10b3454d..763c83a02380 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -13,8 +13,8 @@
#endif
#define MLX5_MAX_IRQ_NAME (32)
-/* max irq_index is 255. three chars */
-#define MLX5_MAX_IRQ_IDX_CHARS (3)
+/* max irq_index is 2047, so four chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_SFS_PER_CTRL_IRQ 64
#define MLX5_IRQ_CTRL_SF_MAX 8
@@ -633,8 +633,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{
if (table->sf_comp_pool)
- return table->sf_comp_pool->xa_num_irqs.max -
- table->sf_comp_pool->xa_num_irqs.min + 1;
+ return min_t(int, num_online_cpus(),
+ table->sf_comp_pool->xa_num_irqs.max -
+ table->sf_comp_pool->xa_num_irqs.min + 1);
else
return mlx5_irq_table_get_num_comp(table);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 0998dcc9cac0..b29824448aa8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -24,16 +24,8 @@
#define MLXSW_THERMAL_ZONE_MAX_NAME 16
#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
+#define MLXSW_THERMAL_MIN_STATE 2
#define MLXSW_THERMAL_MAX_DUTY 255
-/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
- * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
- * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
- * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
- * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
- */
-#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2)
-#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
-#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
/* External cooling devices, allowed for binding to mlxsw thermal zones. */
static char * const mlxsw_thermal_external_allowed_cdev[] = {
@@ -646,49 +638,16 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
struct mlxsw_thermal *thermal = cdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- unsigned long cur_state, i;
int idx;
- u8 duty;
int err;
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
idx = mlxsw_get_cooling_device_idx(thermal, cdev);
if (idx < 0)
return idx;
- /* Verify if this request is for changing allowed fan dynamical
- * minimum. If it is - update cooling levels accordingly and update
- * state, if current state is below the newly requested minimum state.
- * For example, if current state is 5, and minimal state is to be
- * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
- * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
- * overwritten.
- */
- if (state >= MLXSW_THERMAL_SPEED_MIN &&
- state <= MLXSW_THERMAL_SPEED_MAX) {
- state -= MLXSW_THERMAL_MAX_STATE;
- for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(state, i);
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
- if (err)
- return err;
-
- duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
- cur_state = mlxsw_duty_to_state(duty);
-
- /* If current fan state is lower than requested dynamical
- * minimum, increase fan speed up to dynamical minimum.
- */
- if (state < cur_state)
- return 0;
-
- state = cur_state;
- }
-
- if (state > MLXSW_THERMAL_MAX_STATE)
- return -EINVAL;
-
/* Normalize the state to the valid speed range. */
state = thermal->cooling_levels[state];
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
@@ -998,8 +957,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
/* Initialize cooling levels per PWM state. */
for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
- thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
- i);
+ thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
thermal->polling_delay = bus_info->low_frequency ?
MLXSW_THERMAL_SLOW_POLL_INT :
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 796e46a53926..81a8ccca7e5e 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -497,13 +497,19 @@ static struct regmap_bus phymap_encx24j600 = {
.reg_read = regmap_encx24j600_phy_reg_read,
};
-void devm_regmap_init_encx24j600(struct device *dev,
- struct encx24j600_context *ctx)
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx)
{
mutex_init(&ctx->mutex);
regcfg.lock_arg = ctx;
ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+ if (IS_ERR(ctx->phymap))
+ return PTR_ERR(ctx->phymap);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index ee921a99e439..0bc6b3176fbf 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -1023,10 +1023,13 @@ static int encx24j600_spi_probe(struct spi_device *spi)
priv->speed = SPEED_100;
priv->ctx.spi = spi;
- devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
ndev->irq = spi->irq;
ndev->netdev_ops = &encx24j600_netdev_ops;
+ ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+ if (ret)
+ goto out_free;
+
mutex_init(&priv->lock);
/* Reset device and check if it is connected */
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index fac61a8fbd02..34c5a289898c 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -15,8 +15,8 @@ struct encx24j600_context {
int bank;
};
-void devm_regmap_init_encx24j600(struct device *dev,
- struct encx24j600_context *ctx);
+int devm_regmap_init_encx24j600(struct device *dev,
+ struct encx24j600_context *ctx);
/* Single-byte instructions */
#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 1b21030308e5..030ae89f3a33 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1477,8 +1477,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;
- if (cq->gdma_id >= gc->max_num_cqs)
+ if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
+ err = -EINVAL;
goto out;
+ }
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 559177e6ded4..a08e4f530c1c 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -472,9 +472,9 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
!(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
ocelot_port_rmwl(ocelot_port,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
@@ -569,49 +569,44 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
-static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ocelot->ts_id_lock, flags);
- spin_lock(&ocelot_port->ts_id_lock);
+ if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+ ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ return -EBUSY;
+ }
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
- ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
- spin_unlock(&ocelot_port->ts_id_lock);
-}
+ ocelot_port->ts_id++;
+ if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+ ocelot_port->ts_id = 0;
-u32 ocelot_ptp_rew_op(struct sk_buff *skb)
-{
- struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
- u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
- u32 rew_op = 0;
+ ocelot_port->ptp_skbs_in_flight++;
+ ocelot->ptp_skbs_in_flight++;
- if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
- rew_op = ptp_cmd;
- rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
- } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- rew_op = ptp_cmd;
- }
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
- return rew_op;
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+
+ return 0;
}
-EXPORT_SYMBOL(ocelot_ptp_rew_op);
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb)
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+ unsigned int ptp_class)
{
struct ptp_header *hdr;
- unsigned int ptp_class;
u8 msgtype, twostep;
- ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return false;
-
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return false;
@@ -631,10 +626,20 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 ptp_cmd = ocelot_port->ptp_cmd;
+ unsigned int ptp_class;
+ int err;
+
+ /* Don't do anything if PTP timestamping not enabled */
+ if (!ptp_cmd)
+ return 0;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- if (ocelot_ptp_is_onestep_sync(skb)) {
+ if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
return 0;
}
@@ -648,8 +653,12 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
if (!(*clone))
return -ENOMEM;
- ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ if (err)
+ return err;
+
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
@@ -683,6 +692,17 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+ if (WARN_ON(!hdr))
+ return false;
+
+ return seqid == ntohs(hdr->sequence_id);
+}
+
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
@@ -690,10 +710,10 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
+ u32 val, id, seqid, txport;
struct ocelot_port *port;
struct timespec64 ts;
unsigned long flags;
- u32 val, id, txport;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -706,10 +726,17 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Retrieve the ts ID and Tx port */
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
- /* Retrieve its associated skb */
port = ocelot->ports[txport];
+ spin_lock(&ocelot->ts_id_lock);
+ port->ptp_skbs_in_flight--;
+ ocelot->ptp_skbs_in_flight--;
+ spin_unlock(&ocelot->ts_id_lock);
+
+ /* Retrieve its associated skb */
+try_again:
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
@@ -722,12 +749,20 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+ if (WARN_ON(!skb_match))
+ continue;
+
+ if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+ dev_err_ratelimited(ocelot->dev,
+ "port %d received stale TX timestamp for seqid %d, discarding\n",
+ txport, seqid);
+ dev_kfree_skb_any(skb);
+ goto try_again;
+ }
+
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
- if (unlikely(!skb_match))
- continue;
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -1948,7 +1983,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
- spin_lock_init(&ocelot_port->ts_id_lock);
/* Basic L2 initialization */
@@ -2081,6 +2115,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
+ spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index e54b9fb2a97a..2545727fd5b2 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -8,6 +8,7 @@
* Copyright 2020-2021 NXP
*/
+#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
@@ -1625,7 +1626,7 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
ocelot_port_rmwl(ocelot_port, 0,
DEV_CLOCK_CFG_MAC_TX_RST |
- DEV_CLOCK_CFG_MAC_TX_RST,
+ DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
ocelot_port->phy_mode = phy_mode;
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 7945393a0655..99d7376a70a7 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -998,8 +998,8 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
}
struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
- bool tc_offload)
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
+ unsigned long cookie, bool tc_offload)
{
struct ocelot_vcap_filter *filter;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 09c0e839cca5..3b6b2e61139e 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8566,7 +8566,7 @@ static void s2io_io_resume(struct pci_dev *pdev)
return;
}
- if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
+ if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
s2io_card_down(sp);
pr_err("Can't restore mac addr after reset.\n");
return;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index c029950a81e2..ac1dcfa1d179 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -830,10 +830,6 @@ static int nfp_flower_init(struct nfp_app *app)
if (err)
goto err_cleanup;
- err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
- if (err)
- goto err_cleanup;
-
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
@@ -942,7 +938,20 @@ static int nfp_flower_start(struct nfp_app *app)
return err;
}
- return nfp_tunnel_config_start(app);
+ err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
+ if (err)
+ return err;
+
+ err = nfp_tunnel_config_start(app);
+ if (err)
+ goto err_tunnel_config;
+
+ return 0;
+
+err_tunnel_config:
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_tc_release);
+ return err;
}
static void nfp_flower_stop(struct nfp_app *app)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 381966e8f557..7f3322ce044c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1292,8 +1292,10 @@ int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
if (err && err != -EEXIST) {
/* set the state back to NEW so we can try again later */
f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_SYNCED)
+ if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
f->state = IONIC_FILTER_STATE_NEW;
+ set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+ }
spin_unlock_bh(&lif->rx_filters.lock);
@@ -1377,6 +1379,10 @@ static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
+ /* Don't delete our own address from the uc list */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 25ecfcfa1281..69728f9013cb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -349,9 +349,6 @@ loop_out:
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
(void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
- if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED)
- set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
-
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 15ef59aa34ff..d10e1cd6d2ba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1299,6 +1299,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
} else {
DP_NOTICE(cdev,
"Failed to acquire PTT for aRFS\n");
+ rc = -EINVAL;
goto err;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index fbfda55b4c52..5e731a72cce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -71,6 +71,7 @@ err_remove_config_dt:
static const struct of_device_id dwmac_generic_match[] = {
{ .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.40a"},
{ .compatible = "snps,dwmac-3.50a"},
{ .compatible = "snps,dwmac-3.610"},
{ .compatible = "snps,dwmac-3.70a"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index ed817011a94a..6924a6aacbd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
#include "stmmac_platform.h"
@@ -1528,6 +1529,8 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
return ret;
}
+ pm_runtime_get_sync(dev);
+
if (bsp_priv->integrated_phy)
rk_gmac_integrated_phy_powerup(bsp_priv);
@@ -1539,6 +1542,8 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
if (gmac->integrated_phy)
rk_gmac_integrated_phy_powerdown(gmac);
+ pm_runtime_put_sync(&gmac->pdev->dev);
+
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 90383abafa66..f5581db0ba9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -218,11 +218,18 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
readl(ioaddr + DMA_BUS_MODE + i * 4);
}
-static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+ if (!hw_cap) {
+ /* 0x00000000 is the value read on old hardware that does not
+ * implement this register
+ */
+ return -EOPNOTSUPP;
+ }
+
dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
@@ -252,6 +259,8 @@ static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
/* Alternate (enhanced) DESC mode */
dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+ return 0;
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 5be8e6a631d9..d99fa028c646 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -347,8 +347,8 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
}
-static void dwmac4_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
@@ -437,6 +437,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
+
+ return 0;
}
/* Enable/disable TSO feature and set MSS */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 906e985441a9..5e98355f422b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -371,8 +371,8 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
- struct dma_features *dma_cap)
+static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
u32 hw_cap;
@@ -445,6 +445,8 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+
+ return 0;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 6dc1c98ebec8..fe2660d5694d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -203,8 +203,8 @@ struct stmmac_dma_ops {
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir);
/* If supported then get the optional core features */
- void (*get_hw_feature)(void __iomem *ioaddr,
- struct dma_features *dma_cap);
+ int (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
@@ -255,7 +255,7 @@ struct stmmac_dma_ops {
#define stmmac_dma_interrupt_status(__priv, __args...) \
stmmac_do_callback(__priv, dma, dma_interrupt, __args)
#define stmmac_get_hw_feature(__priv, __args...) \
- stmmac_do_void_callback(__priv, dma, get_hw_feature, __args)
+ stmmac_do_callback(__priv, dma, get_hw_feature, __args)
#define stmmac_rx_watchdog(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
#define stmmac_set_tx_ring_len(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 981ccf47dcea..eb3b7bf771d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -477,6 +477,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
stmmac_lpi_entry_timer_config(priv, 0);
del_timer_sync(&priv->eee_ctrl_timer);
stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ false);
}
mutex_unlock(&priv->lock);
return false;
@@ -1038,7 +1042,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, false);
priv->eee_active = false;
priv->tx_lpi_enabled = false;
- stmmac_eee_init(priv);
+ priv->eee_enabled = stmmac_eee_init(priv);
stmmac_set_eee_pls(priv, priv->hw, false);
if (priv->dma_cap.fpesel)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 62cec9bfcd33..232ac98943cd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -508,6 +508,14 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
+ if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
+ plat->has_gmac = 1;
+ plat->enh_desc = 1;
+ plat->tx_coe = 1;
+ plat->bugged_jumbo = 1;
+ plat->pmt = 1;
+ }
+
if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
of_device_is_compatible(np, "snps,dwmac-4.10a") ||
of_device_is_compatible(np, "snps,dwmac-4.20a") ||