diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
20 files changed, 692 insertions, 440 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 5098e7f21987..22b1cc012bc9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -7,7 +7,7 @@ config MLX4_EN depends on MAY_USE_DEVLINK depends on PCI select MLX4_CORE - select PTP_1588_CLOCK + imply PTP_1588_CLOCK ---help--- This driver supports Mellanox Technologies ConnectX Ethernet devices. diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index b1cef7a0f7ca..a49072b4fa52 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2469,6 +2469,7 @@ err_comm_admin: kfree(priv->mfunc.master.slave_state); err_comm: iounmap(priv->mfunc.comm); + priv->mfunc.comm = NULL; err_vhcr: dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, priv->mfunc.vhcr, @@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) int slave; u32 slave_read; + /* If the comm channel has not yet been initialized, + * skip reporting the internal error event to all + * the communication channels. + */ + if (!priv->mfunc.comm) + return; + /* Report an internal error event to all * communication channels. */ @@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev) } iounmap(priv->mfunc.comm); + priv->mfunc.comm = NULL; } void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) @@ -2670,15 +2679,13 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) if (!mailbox) return ERR_PTR(-ENOMEM); - mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, - &mailbox->dma); + mailbox->buf = pci_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, + &mailbox->dma); if (!mailbox->buf) { kfree(mailbox); return ERR_PTR(-ENOMEM); } - memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); - return mailbox; } EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 08fc5fc56d43..a5fc46bbcbe2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq) { u32 freq_khz = freq * 1000; u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; + u64 tmp_rounded = + roundup_pow_of_two(max_val_cycles) > max_val_cycles ? + roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX; u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? - max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1; + max_val_cycles : tmp_rounded; /* calculate max possible multiplier in order to fit in 64bit */ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 132cea655920..09dd3776db76 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -65,7 +65,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->buf_size = cq->size * mdev->dev->caps.cqe_size; cq->ring = ring; - cq->is_tx = mode; + cq->type = mode; cq->vector = mdev->dev->caps.num_comp_vectors; /* Allocate HW buffers on provided NUMA node. @@ -104,7 +104,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); - if (cq->is_tx == RX) { + if (cq->type == RX) { if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector)) { cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); @@ -133,11 +133,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->vector = rx_cq->vector; } - if (!cq->is_tx) + if (cq->type == RX) cq->size = priv->rx_ring[cq->ring]->actual_size; - if ((cq->is_tx && priv->hwtstamp_config.tx_type) || - (!cq->is_tx && priv->hwtstamp_config.rx_filter)) + if ((cq->type != RX && priv->hwtstamp_config.tx_type) || + (cq->type == RX && priv->hwtstamp_config.rx_filter)) timestamp_en = 1; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, @@ -146,10 +146,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (err) goto free_eq; - cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; + cq->mcq.comp = cq->type != RX ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (cq->is_tx) + if (cq->type != RX) netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); else @@ -173,7 +173,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && - cq->is_tx == RX) + cq->type == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); cq->vector = 0; cq->buf_size = 0; @@ -185,10 +185,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { napi_disable(&cq->napi); - if (!cq->is_tx) { - napi_hash_del(&cq->napi); - synchronize_rcu(); - } netif_napi_del(&cq->napi); mlx4_cq_free(priv->mdev->dev, &cq->mcq); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bdda17d2ea0f..d9c9f86a30df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -49,16 +49,19 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) { - int i; + int i, t; int err = 0; - for (i = 0; i < priv->tx_ring_num; i++) { - priv->tx_cq[i]->moder_cnt = priv->tx_frames; - priv->tx_cq[i]->moder_time = priv->tx_usecs; - if (priv->port_up) { - err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); - if (err) - return err; + for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + priv->tx_cq[t][i]->moder_cnt = priv->tx_frames; + priv->tx_cq[t][i]->moder_time = priv->tx_usecs; + if (priv->port_up) { + err = mlx4_en_set_cq_moder(priv, + priv->tx_cq[t][i]); + if (err) + return err; + } } } @@ -192,6 +195,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = { "tx_prio_7_packets", "tx_prio_7_bytes", "tx_novlan_packets", "tx_novlan_bytes", + /* xdp statistics */ + "rx_xdp_drop", + "rx_xdp_tx", + "rx_xdp_tx_full", }; static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { @@ -336,8 +343,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) switch (sset) { case ETH_SS_STATS: return bitmap_iterator_count(&it) + - (priv->tx_ring_num * 2) + - (priv->rx_ring_num * 3); + (priv->tx_ring_num[TX] * 2) + + (priv->rx_ring_num * (3 + NUM_XDP_STATS)); case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -360,6 +367,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, spin_lock_bh(&priv->stats_lock); + mlx4_en_fold_software_stats(dev); + for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&dev->stats)[i]; @@ -397,14 +406,21 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, if (bitmap_iterator_test(&it)) data[index++] = ((unsigned long *)&priv->pkstats)[i]; - for (i = 0; i < priv->tx_ring_num; i++) { - data[index++] = priv->tx_ring[i]->packets; - data[index++] = priv->tx_ring[i]->bytes; + for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->xdp_stats)[i]; + + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + data[index++] = priv->tx_ring[TX][i]->packets; + data[index++] = priv->tx_ring[TX][i]->bytes; } for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; data[index++] = priv->rx_ring[i]->dropped; + data[index++] = priv->rx_ring[i]->xdp_drop; + data[index++] = priv->rx_ring[i]->xdp_tx; + data[index++] = priv->rx_ring[i]->xdp_tx_full; } spin_unlock_bh(&priv->stats_lock); @@ -467,7 +483,13 @@ static void mlx4_en_get_strings(struct net_device *dev, strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[strings]); - for (i = 0; i < priv->tx_ring_num; i++) { + for (i = 0; i < NUM_XDP_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < priv->tx_ring_num[TX]; i++) { sprintf(data + (index++) * ETH_GSTRING_LEN, "tx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, @@ -480,6 +502,12 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_bytes", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_dropped", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_drop", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_tx", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_tx_full", i); } break; case ETH_SS_PRIV_FLAGS: @@ -1060,7 +1088,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && - tx_size == priv->tx_ring[0]->size) + tx_size == priv->tx_ring[TX][0]->size) return 0; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); @@ -1105,7 +1133,7 @@ static void mlx4_en_get_ringparam(struct net_device *dev, param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; param->rx_pending = priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; - param->tx_pending = priv->tx_ring[0]->size; + param->tx_pending = priv->tx_ring[TX][0]->size; } static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) @@ -1710,7 +1738,7 @@ static void mlx4_en_get_channels(struct net_device *dev, channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; channel->rx_count = priv->rx_ring_num; - channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; + channel->tx_count = priv->tx_ring_num[TX] / MLX4_EN_NUM_UP; } static int mlx4_en_set_channels(struct net_device *dev, @@ -1721,6 +1749,7 @@ static int mlx4_en_set_channels(struct net_device *dev, struct mlx4_en_port_profile new_prof; struct mlx4_en_priv *tmp; int port_up = 0; + int xdp_count; int err = 0; if (channel->other_count || channel->combined_count || @@ -1729,20 +1758,25 @@ static int mlx4_en_set_channels(struct net_device *dev, !channel->tx_count || !channel->rx_count) return -EINVAL; - if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) { - en_err(priv, "Minimum %d tx channels required with XDP on\n", - priv->xdp_ring_num / MLX4_EN_NUM_UP + 1); - return -EINVAL; - } - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; mutex_lock(&mdev->state_lock); + xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; + if (channel->tx_count * MLX4_EN_NUM_UP + xdp_count > MAX_TX_RINGS) { + err = -EINVAL; + en_err(priv, + "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", + channel->tx_count * MLX4_EN_NUM_UP + xdp_count, + MAX_TX_RINGS); + goto out; + } + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); new_prof.num_tx_rings_p_up = channel->tx_count; - new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; + new_prof.tx_ring_num[TX] = channel->tx_count * MLX4_EN_NUM_UP; + new_prof.tx_ring_num[TX_XDP] = xdp_count; new_prof.rx_ring_num = channel->rx_count; err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); @@ -1756,14 +1790,13 @@ static int mlx4_en_set_channels(struct net_device *dev, mlx4_en_safe_replace_resources(priv, tmp); - netif_set_real_num_tx_queues(dev, priv->tx_ring_num - - priv->xdp_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); if (dev->num_tc) mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); - en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); + en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]); en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); if (port_up) { @@ -1774,8 +1807,8 @@ static int mlx4_en_set_channels(struct net_device *dev, err = mlx4_en_moderation_update(priv); out: - kfree(tmp); mutex_unlock(&mdev->state_lock); + kfree(tmp); return err; } @@ -1823,11 +1856,15 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) int ret = 0; if (bf_enabled_new != bf_enabled_old) { + int t; + if (bf_enabled_new) { bool bf_supported = true; - for (i = 0; i < priv->tx_ring_num; i++) - bf_supported &= priv->tx_ring[i]->bf_alloced; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) + for (i = 0; i < priv->tx_ring_num[t]; i++) + bf_supported &= + priv->tx_ring[t][i]->bf_alloced; if (!bf_supported) { en_err(priv, "BlueFlame is not supported\n"); @@ -1839,8 +1876,10 @@ static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; } - for (i = 0; i < priv->tx_ring_num; i++) - priv->tx_ring[i]->bf_enabled = bf_enabled_new; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) + for (i = 0; i < priv->tx_ring_num[t]; i++) + priv->tx_ring[t][i]->bf_enabled = + bf_enabled_new; en_info(priv, "BlueFlame %s\n", bf_enabled_new ? "Enabled" : "Disabled"); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index bf7628db098a..36a7a54bbb82 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -169,7 +169,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].tx_ppp = pfctx; params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; - params->prof[i].tx_ring_num = params->num_tx_rings_p_up * + params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up * MLX4_EN_NUM_UP; params->prof[i].rss_rings = 0; params->prof[i].inline_thold = inline_thold; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7e703bed7b82..bcd955339058 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -51,6 +51,9 @@ #include "mlx4_en.h" #include "en_port.h" +#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \ + XDP_PACKET_HEADROOM)) + int mlx4_en_setup_tc(struct net_device *dev, u8 up) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -129,6 +132,9 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) } }; +/* Must not acquire state_lock, as its corresponding work_sync + * is done under it. + */ static void mlx4_en_filter_work(struct work_struct *work) { struct mlx4_en_filter *filter = container_of(work, @@ -1214,8 +1220,8 @@ static void mlx4_en_netpoll(struct net_device *dev) struct mlx4_en_cq *cq; int i; - for (i = 0; i < priv->tx_ring_num; i++) { - cq = priv->tx_cq[i]; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + cq = priv->tx_cq[TX][i]; napi_schedule(&cq->napi); } } @@ -1299,12 +1305,14 @@ static void mlx4_en_tx_timeout(struct net_device *dev) if (netif_msg_timer(priv)) en_warn(priv, "Tx timeout called on port:%d\n", priv->port); - for (i = 0; i < priv->tx_ring_num; i++) { + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i]; + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) continue; en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", - i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, - priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); + i, tx_ring->qpn, tx_ring->sp_cqn, + tx_ring->cons, tx_ring->prod); } priv->port_stats.tx_timeout++; @@ -1319,6 +1327,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx4_en_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->stats_lock); + mlx4_en_fold_software_stats(dev); netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); @@ -1328,7 +1337,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) { struct mlx4_en_cq *cq; - int i; + int i, t; /* If we haven't received a specific coalescing setting * (module param), we set the moderation parameters as follows: @@ -1353,10 +1362,12 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) priv->last_moder_bytes[i] = 0; } - for (i = 0; i < priv->tx_ring_num; i++) { - cq = priv->tx_cq[i]; - cq->moder_cnt = priv->tx_frames; - cq->moder_time = priv->tx_usecs; + for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + cq = priv->tx_cq[t][i]; + cq->moder_cnt = priv->tx_frames; + cq->moder_time = priv->tx_usecs; + } } /* Reset auto-moderation params */ @@ -1387,10 +1398,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) return; for (ring = 0; ring < priv->rx_ring_num; ring++) { - spin_lock_bh(&priv->stats_lock); - rx_packets = priv->rx_ring[ring]->packets; - rx_bytes = priv->rx_ring[ring]->bytes; - spin_unlock_bh(&priv->stats_lock); + rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); + rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); rx_pkt_diff = ((unsigned long) (rx_packets - priv->last_moder_packets[ring])); @@ -1526,19 +1535,13 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, int tx_ring_idx) { - struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx]; - int rr_index; + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx]; + int rr_index = tx_ring_idx; - rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx; - if (rr_index >= 0) { - tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; - tx_ring->recycle_ring = priv->rx_ring[rr_index]; - en_dbg(DRV, priv, - "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n", - tx_ring_idx, rr_index); - } else { - tx_ring->recycle_ring = NULL; - } + tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; + tx_ring->recycle_ring = priv->rx_ring[rr_index]; + en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n", + TX_XDP, tx_ring_idx, rr_index); } int mlx4_en_start_port(struct net_device *dev) @@ -1548,9 +1551,8 @@ int mlx4_en_start_port(struct net_device *dev) struct mlx4_en_cq *cq; struct mlx4_en_tx_ring *tx_ring; int rx_index = 0; - int tx_index = 0; int err = 0; - int i; + int i, t; int j; u8 mc_list[16] = {0}; @@ -1635,43 +1637,51 @@ int mlx4_en_start_port(struct net_device *dev) goto rss_err; /* Configure tx cq's and rings */ - for (i = 0; i < priv->tx_ring_num; i++) { - /* Configure cq */ - cq = priv->tx_cq[i]; - err = mlx4_en_activate_cq(priv, cq, i); - if (err) { - en_err(priv, "Failed allocating Tx CQ\n"); - goto tx_err; - } - err = mlx4_en_set_cq_moder(priv, cq); - if (err) { - en_err(priv, "Failed setting cq moderation parameters\n"); - mlx4_en_deactivate_cq(priv, cq); - goto tx_err; - } - en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); - cq->buf->wqe_index = cpu_to_be16(0xffff); - - /* Configure ring */ - tx_ring = priv->tx_ring[i]; - err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, - i / priv->num_tx_rings_p_up); - if (err) { - en_err(priv, "Failed allocating Tx ring\n"); - mlx4_en_deactivate_cq(priv, cq); - goto tx_err; - } - tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { + u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1; - mlx4_en_init_recycle_ring(priv, i); + for (i = 0; i < priv->tx_ring_num[t]; i++) { + /* Configure cq */ + cq = priv->tx_cq[t][i]; + err = mlx4_en_activate_cq(priv, cq, i); + if (err) { + en_err(priv, "Failed allocating Tx CQ\n"); + goto tx_err; + } + err = mlx4_en_set_cq_moder(priv, cq); + if (err) { + en_err(priv, "Failed setting cq moderation parameters\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + en_dbg(DRV, priv, + "Resetting index of collapsed CQ:%d to -1\n", i); + cq->buf->wqe_index = cpu_to_be16(0xffff); + + /* Configure ring */ + tx_ring = priv->tx_ring[t][i]; + err = mlx4_en_activate_tx_ring(priv, tx_ring, + cq->mcq.cqn, + i / num_tx_rings_p_up); + if (err) { + en_err(priv, "Failed allocating Tx ring\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + if (t != TX_XDP) { + tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + tx_ring->recycle_ring = NULL; + } else { + mlx4_en_init_recycle_ring(priv, i); + } - /* Arm CQ for TX completions */ - mlx4_en_arm_cq(priv, cq); + /* Arm CQ for TX completions */ + mlx4_en_arm_cq(priv, cq); - /* Set initial ownership of all Tx TXBBs to SW (1) */ - for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) - *((u32 *) (tx_ring->buf + j)) = 0xffffffff; - ++tx_index; + /* Set initial ownership of all Tx TXBBs to SW (1) */ + for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) + *((u32 *)(tx_ring->buf + j)) = 0xffffffff; + } } /* Configure port */ @@ -1733,15 +1743,31 @@ int mlx4_en_start_port(struct net_device *dev) udp_tunnel_get_rx_info(dev); priv->port_up = true; + + /* Process all completions if exist to prevent + * the queues freezing if they are full + */ + for (i = 0; i < priv->rx_ring_num; i++) + napi_schedule(&priv->rx_cq[i]->napi); + netif_tx_start_all_queues(dev); netif_device_attach(dev); return 0; tx_err: - while (tx_index--) { - mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); - mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); + if (t == MLX4_EN_NUM_TX_TYPES) { + t--; + i = priv->tx_ring_num[t]; + } + while (t >= 0) { + while (i--) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); + } + if (!t--) + break; + i = priv->tx_ring_num[t]; } mlx4_en_destroy_drop_qp(priv); rss_err: @@ -1766,7 +1792,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_mc_list *mclist, *tmp; struct ethtool_flow_id *flow, *tmp_flow; - int i; + int i, t; u8 mc_list[16] = {0}; if (!priv->port_up) { @@ -1786,8 +1812,12 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) netif_tx_disable(dev); + spin_lock_bh(&priv->stats_lock); + mlx4_en_fold_software_stats(dev); /* Set port as not active */ priv->port_up = false; + spin_unlock_bh(&priv->stats_lock); + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); /* Promsicuous mode */ @@ -1852,14 +1882,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) mlx4_en_destroy_drop_qp(priv); /* Free TX Rings */ - for (i = 0; i < priv->tx_ring_num; i++) { - mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); - mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]); + } } msleep(10); - for (i = 0; i < priv->tx_ring_num; i++) - mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) + for (i = 0; i < priv->tx_ring_num[t]; i++) + mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]); if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) mlx4_en_delete_rss_steer_rules(priv); @@ -1908,10 +1941,12 @@ static void mlx4_en_clear_stats(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring **tx_ring; int i; - if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) - en_dbg(HW, priv, "Failed dumping statistics\n"); + if (!mlx4_is_slave(mdev->dev)) + if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) + en_dbg(HW, priv, "Failed dumping statistics\n"); memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); @@ -1924,15 +1959,16 @@ static void mlx4_en_clear_stats(struct net_device *dev) sizeof(priv->tx_priority_flowstats)); memset(&priv->pf_stats, 0, sizeof(priv->pf_stats)); - for (i = 0; i < priv->tx_ring_num; i++) { - priv->tx_ring[i]->bytes = 0; - priv->tx_ring[i]->packets = 0; - priv->tx_ring[i]->tx_csum = 0; - priv->tx_ring[i]->tx_dropped = 0; - priv->tx_ring[i]->queue_stopped = 0; - priv->tx_ring[i]->wake_queue = 0; - priv->tx_ring[i]->tso_packets = 0; - priv->tx_ring[i]->xmit_more = 0; + tx_ring = priv->tx_ring[TX]; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + tx_ring[i]->bytes = 0; + tx_ring[i]->packets = 0; + tx_ring[i]->tx_csum = 0; + tx_ring[i]->tx_dropped = 0; + tx_ring[i]->queue_stopped = 0; + tx_ring[i]->wake_queue = 0; + tx_ring[i]->tso_packets = 0; + tx_ring[i]->xmit_more = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i]->bytes = 0; @@ -1988,17 +2024,20 @@ static int mlx4_en_close(struct net_device *dev) static void mlx4_en_free_resources(struct mlx4_en_priv *priv) { - int i; + int i, t; #ifdef CONFIG_RFS_ACCEL priv->dev->rx_cpu_rmap = NULL; #endif - for (i = 0; i < priv->tx_ring_num; i++) { - if (priv->tx_ring && priv->tx_ring[i]) - mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); - if (priv->tx_cq && priv->tx_cq[i]) - mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + if (priv->tx_ring[t] && priv->tx_ring[t][i]) + mlx4_en_destroy_tx_ring(priv, + &priv->tx_ring[t][i]); + if (priv->tx_cq[t] && priv->tx_cq[t][i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); + } } for (i = 0; i < priv->rx_ring_num; i++) { @@ -2014,20 +2053,22 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv) static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { struct mlx4_en_port_profile *prof = priv->prof; - int i; + int i, t; int node; /* Create tx Rings */ - for (i = 0; i < priv->tx_ring_num; i++) { - node = cpu_to_node(i % num_online_cpus()); - if (mlx4_en_create_cq(priv, &priv->tx_cq[i], - prof->tx_ring_size, i, TX, node)) - goto err; - - if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], - prof->tx_ring_size, TXBB_SIZE, - node, i)) - goto err; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + node = cpu_to_node(i % num_online_cpus()); + if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i], + prof->tx_ring_size, i, t, node)) + goto err; + + if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i], + prof->tx_ring_size, + TXBB_SIZE, node, i)) + goto err; + } } /* Create rx Rings */ @@ -2059,31 +2100,28 @@ err: if (priv->rx_cq[i]) mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); } - for (i = 0; i < priv->tx_ring_num; i++) { - if (priv->tx_ring[i]) - mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); - if (priv->tx_cq[i]) - mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + for (i = 0; i < priv->tx_ring_num[t]; i++) { + if (priv->tx_ring[t][i]) + mlx4_en_destroy_tx_ring(priv, + &priv->tx_ring[t][i]); + if (priv->tx_cq[t][i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); + } } return -ENOMEM; } -static void mlx4_en_shutdown(struct net_device *dev) -{ - rtnl_lock(); - netif_device_detach(dev); - mlx4_en_close(dev); - rtnl_unlock(); -} static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, struct mlx4_en_priv *src, struct mlx4_en_port_profile *prof) { + int t; + memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, sizeof(dst->hwtstamp_config)); dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; - dst->tx_ring_num = prof->tx_ring_num; dst->rx_ring_num = prof->rx_ring_num; dst->flags = prof->flags; dst->mdev = src->mdev; @@ -2093,33 +2131,50 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); - dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!dst->tx_ring) - return -ENOMEM; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + dst->tx_ring_num[t] = prof->tx_ring_num[t]; + if (!dst->tx_ring_num[t]) + continue; - dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!dst->tx_cq) { - kfree(dst->tx_ring); - return -ENOMEM; + dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!dst->tx_ring[t]) + goto err_free_tx; + + dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!dst->tx_cq[t]) { + kfree(dst->tx_ring[t]); + goto err_free_tx; + } } + return 0; + +err_free_tx: + while (t--) { + kfree(dst->tx_ring[t]); + kfree(dst->tx_cq[t]); + } + return -ENOMEM; } static void mlx4_en_update_priv(struct mlx4_en_priv *dst, struct mlx4_en_priv *src) { + int t; memcpy(dst->rx_ring, src->rx_ring, sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); memcpy(dst->rx_cq, src->rx_cq, sizeof(struct mlx4_en_cq *) * src->rx_ring_num); memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, sizeof(dst->hwtstamp_config)); - dst->tx_ring_num = src->tx_ring_num; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + dst->tx_ring_num[t] = src->tx_ring_num[t]; + dst->tx_ring[t] = src->tx_ring[t]; + dst->tx_cq[t] = src->tx_cq[t]; + } dst->rx_ring_num = src->rx_ring_num; - dst->tx_ring = src->tx_ring; - dst->tx_cq = src->tx_cq; memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); } @@ -2127,14 +2182,18 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, struct mlx4_en_priv *tmp, struct mlx4_en_port_profile *prof) { + int t; + mlx4_en_copy_priv(tmp, priv, prof); if (mlx4_en_alloc_resources(tmp)) { en_warn(priv, "%s: Resource allocation failed, using previous configuration\n", __func__); - kfree(tmp->tx_ring); - kfree(tmp->tx_cq); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + kfree(tmp->tx_ring[t]); + kfree(tmp->tx_cq[t]); + } return -ENOMEM; } return 0; @@ -2151,8 +2210,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - bool shutdown = mdev->dev->persist->interface_state & - MLX4_INTERFACE_STATE_SHUTDOWN; + int t; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); @@ -2160,10 +2218,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) if (priv->registered) { devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, priv->port)); - if (shutdown) - mlx4_en_shutdown(dev); - else - unregister_netdev(dev); + unregister_netdev(dev); } if (priv->allocated) @@ -2181,19 +2236,33 @@ void mlx4_en_destroy_netdev(struct net_device *dev) mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; mdev->upper[priv->port] = NULL; - mutex_unlock(&mdev->state_lock); #ifdef CONFIG_RFS_ACCEL mlx4_en_cleanup_filters(priv); #endif mlx4_en_free_resources(priv); + mutex_unlock(&mdev->state_lock); - kfree(priv->tx_ring); - kfree(priv->tx_cq); + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + kfree(priv->tx_ring[t]); + kfree(priv->tx_cq[t]); + } - if (!shutdown) - free_netdev(dev); + free_netdev(dev); +} + +static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (mtu > MLX4_EN_MAX_XDP_MTU) { + en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n", + mtu, MLX4_EN_MAX_XDP_MTU); + return false; + } + + return true; } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) @@ -2205,15 +2274,10 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", dev->mtu, new_mtu); - if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { - en_err(priv, "Bad MTU size:%d.\n", new_mtu); - return -EPERM; - } - if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { - en_err(priv, "MTU size:%d requires frags but XDP running\n", - new_mtu); - return -EOPNOTSUPP; - } + if (priv->tx_ring_num[TX_XDP] && + !mlx4_en_check_xdp_mtu(dev, new_mtu)) + return -ENOTSUPP; + dev->mtu = new_mtu; if (netif_running(dev)) { @@ -2600,7 +2664,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index]; struct mlx4_update_qp_params params; int err; @@ -2628,18 +2692,21 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_port_profile new_prof; struct bpf_prog *old_prog; + struct mlx4_en_priv *tmp; + int tx_changed = 0; int xdp_ring_num; int port_up = 0; int err; int i; - xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0; + xdp_ring_num = prog ? priv->rx_ring_num : 0; /* No need to reconfigure buffers when simply swapping the * program for a new one. */ - if (priv->xdp_ring_num == xdp_ring_num) { + if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { if (prog) { prog = bpf_prog_add(prog, priv->rx_ring_num - 1); if (IS_ERR(prog)) @@ -2658,33 +2725,47 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) return 0; } - if (priv->num_frags > 1) { - en_err(priv, "Cannot set XDP if MTU requires multiple frags\n"); + if (!mlx4_en_check_xdp_mtu(dev, dev->mtu)) return -EOPNOTSUPP; - } - if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) { - en_err(priv, - "Minimum %d tx channels required to run XDP\n", - (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP); - return -EINVAL; - } + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; if (prog) { prog = bpf_prog_add(prog, priv->rx_ring_num - 1); - if (IS_ERR(prog)) - return PTR_ERR(prog); + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto out; + } } mutex_lock(&mdev->state_lock); + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); + new_prof.tx_ring_num[TX_XDP] = xdp_ring_num; + + if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) { + tx_changed = 1; + new_prof.tx_ring_num[TX] = + MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP); + en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); + } + + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + if (err) { + if (prog) + bpf_prog_sub(prog, priv->rx_ring_num - 1); + goto unlock_out; + } + if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); } - priv->xdp_ring_num = xdp_ring_num; - netif_set_real_num_tx_queues(dev, priv->tx_ring_num - - priv->xdp_ring_num); + mlx4_en_safe_replace_resources(priv, tmp); + if (tx_changed) + netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); for (i = 0; i < priv->rx_ring_num; i++) { old_prog = rcu_dereference_protected( @@ -2704,15 +2785,18 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) } } +unlock_out: mutex_unlock(&mdev->state_lock); - return 0; +out: + kfree(tmp); + return err; } static bool mlx4_xdp_attached(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - return !!priv->xdp_ring_num; + return !!priv->tx_ring_num[TX_XDP]; } static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) @@ -3049,6 +3133,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, if (!mlx4_is_slave(dev)) bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); + last_i += NUM_PKT_STATS; + + bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS); + last_i += NUM_XDP_STATS; } int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -3056,7 +3144,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, { struct net_device *dev; struct mlx4_en_priv *priv; - int i; + int i, t; int err; dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), @@ -3064,7 +3152,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, if (dev == NULL) return -ENOMEM; - netif_set_real_num_tx_queues(dev, prof->tx_ring_num); + netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, prof->rx_ring_num); SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); @@ -3101,21 +3189,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | MLX4_WQE_CTRL_SOLICITED); priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; - priv->tx_ring_num = prof->tx_ring_num; priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); - priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!priv->tx_ring) { - err = -ENOMEM; - goto out; - } - priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, - GFP_KERNEL); - if (!priv->tx_cq) { - err = -ENOMEM; - goto out; + for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) { + priv->tx_ring_num[t] = prof->tx_ring_num[t]; + if (!priv->tx_ring_num[t]) + continue; + + priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!priv->tx_ring[t]) { + err = -ENOMEM; + goto err_free_tx; + } + priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * + MAX_TX_RINGS, GFP_KERNEL); + if (!priv->tx_cq[t]) { + kfree(priv->tx_ring[t]); + err = -ENOMEM; + goto out; + } } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; @@ -3198,7 +3292,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, else dev->netdev_ops = &mlx4_netdev_ops; dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; - netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); dev->ethtool_ops = &mlx4_en_ethtool_ops; @@ -3288,13 +3382,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } + /* MTU range: 46 - hw-specific max */ + dev->min_mtu = MLX4_EN_MIN_MTU; + dev->max_mtu = priv->max_mtu; + mdev->pndev[port] = dev; mdev->upper[port] = NULL; netif_carrier_off(dev); mlx4_en_set_default_moderation(priv); - en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); + en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); mlx4_en_update_loopback_state(priv->dev, priv->dev->features); @@ -3354,6 +3452,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, return 0; +err_free_tx: + while (t--) { + kfree(priv->tx_ring[t]); + kfree(priv->tx_cq[t]); + } out: mlx4_en_destroy_netdev(dev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 5aa8b751f417..9166d90e7328 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -147,6 +147,39 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) return ret; } +void mlx4_en_fold_software_stats(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + unsigned long packets, bytes; + int i; + + if (!priv->port_up || mlx4_is_master(mdev->dev)) + return; + + packets = 0; + bytes = 0; + for (i = 0; i < priv->rx_ring_num; i++) { + const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; + + packets += READ_ONCE(ring->packets); + bytes += READ_ONCE(ring->bytes); + } + dev->stats.rx_packets = packets; + dev->stats.rx_bytes = bytes; + + packets = 0; + bytes = 0; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i]; + + packets += READ_ONCE(ring->packets); + bytes += READ_ONCE(ring->bytes); + } + dev->stats.tx_packets = packets; + dev->stats.tx_bytes = bytes; +} + int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) { struct mlx4_counter tmp_counter_stats; @@ -159,6 +192,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) u64 in_mod = reset << 8 | port; int err; int i, counter_index; + unsigned long sw_tx_dropped = 0; unsigned long sw_rx_dropped = 0; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); @@ -166,7 +200,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) return PTR_ERR(mailbox); err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_WRAPPED); + MLX4_CMD_NATIVE); if (err) goto out; @@ -174,40 +208,42 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) spin_lock_bh(&priv->stats_lock); - stats->rx_packets = 0; - stats->rx_bytes = 0; + mlx4_en_fold_software_stats(dev); + priv->port_stats.rx_chksum_good = 0; priv->port_stats.rx_chksum_none = 0; priv->port_stats.rx_chksum_complete = 0; + priv->xdp_stats.rx_xdp_drop = 0; + priv->xdp_stats.rx_xdp_tx = 0; + priv->xdp_stats.rx_xdp_tx_full = 0; for (i = 0; i < priv->rx_ring_num; i++) { - stats->rx_packets += priv->rx_ring[i]->packets; - stats->rx_bytes += priv->rx_ring[i]->bytes; - sw_rx_dropped += priv->rx_ring[i]->dropped; - priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; - priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; - priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; + const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; + + sw_rx_dropped += READ_ONCE(ring->dropped); + priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); + priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); + priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); + priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); + priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); + priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); } - stats->tx_packets = 0; - stats->tx_bytes = 0; - stats->tx_dropped = 0; priv->port_stats.tx_chksum_offload = 0; priv->port_stats.queue_stopped = 0; priv->port_stats.wake_queue = 0; priv->port_stats.tso_packets = 0; priv->port_stats.xmit_more = 0; - for (i = 0; i < priv->tx_ring_num; i++) { - const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; - - stats->tx_packets += ring->packets; - stats->tx_bytes += ring->bytes; - stats->tx_dropped += ring->tx_dropped; - priv->port_stats.tx_chksum_offload += ring->tx_csum; - priv->port_stats.queue_stopped += ring->queue_stopped; - priv->port_stats.wake_queue += ring->wake_queue; - priv->port_stats.tso_packets += ring->tso_packets; - priv->port_stats.xmit_more += ring->xmit_more; + for (i = 0; i < priv->tx_ring_num[TX]; i++) { + const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i]; + + sw_tx_dropped += READ_ONCE(ring->tx_dropped); + priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum); + priv->port_stats.queue_stopped += READ_ONCE(ring->queue_stopped); + priv->port_stats.wake_queue += READ_ONCE(ring->wake_queue); + priv->port_stats.tso_packets += READ_ONCE(ring->tso_packets); + priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more); } + if (mlx4_is_master(mdev->dev)) { stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0, &mlx4_en_stats->RTOT_prio_1, @@ -245,7 +281,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); - stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); + stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) + + sw_tx_dropped; /* RX stats */ priv->pkstats.rx_multicast_packets = stats->multicast; @@ -322,7 +359,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, 0, MLX4_CMD_DUMP_ETH_STATS, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f2e8beddcf44..3c37e216bbf3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -96,7 +96,6 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; const struct mlx4_en_frag_info *frag_info; struct page *page; - dma_addr_t dma; int i; for (i = 0; i < priv->num_frags; i++) { @@ -115,9 +114,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++) { frags[i] = ring_alloc[i]; - dma = ring_alloc[i].dma + ring_alloc[i].page_offset; + frags[i].page_offset += priv->frag_info[i].rx_headroom; + rx_desc->data[i].addr = cpu_to_be64(frags[i].dma + + frags[i].page_offset); ring_alloc[i] = page_alloc[i]; - rx_desc->data[i].addr = cpu_to_be64(dma); } return 0; @@ -250,7 +250,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, if (ring->page_cache.index > 0) { frags[0] = ring->page_cache.buf[--ring->page_cache.index]; - rx_desc->data[0].addr = cpu_to_be64(frags[0].dma); + rx_desc->data[0].addr = cpu_to_be64(frags[0].dma + + frags[0].page_offset); return 0; } @@ -688,18 +689,23 @@ out_loopback: dev_kfree_skb_any(skb); } -static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, - struct mlx4_en_rx_ring *ring) +static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) { - int index = ring->prod & ring->size_mask; + u32 missing = ring->actual_size - (ring->prod - ring->cons); - while ((u32) (ring->prod - ring->cons) < ring->actual_size) { - if (mlx4_en_prepare_rx_desc(priv, ring, index, + /* Try to batch allocations, but not too much. */ + if (missing < 8) + return false; + do { + if (mlx4_en_prepare_rx_desc(priv, ring, + ring->prod & ring->size_mask, GFP_ATOMIC | __GFP_COLD)) break; ring->prod++; - index = ring->prod & ring->size_mask; - } + } while (--missing); + + return true; } /* When hardware doesn't strip the vlan, we need to calculate the checksum @@ -788,7 +794,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct bpf_prog *xdp_prog; int doorbell_pending; struct sk_buff *skb; - int tx_index; int index; int nr; unsigned int length; @@ -808,7 +813,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud rcu_read_lock(); xdp_prog = rcu_dereference(ring->xdp_prog); doorbell_pending = 0; - tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -877,8 +881,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud */ length = be32_to_cpu(cqe->byte_cnt); length -= ring->fcs_del; - ring->bytes += length; - ring->packets++; l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); @@ -888,6 +890,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (xdp_prog) { struct xdp_buff xdp; dma_addr_t dma; + void *orig_data; u32 act; dma = be64_to_cpu(rx_desc->data[0].addr); @@ -895,31 +898,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud priv->frag_info[0].frag_size, DMA_FROM_DEVICE); - xdp.data = page_address(frags[0].page) + - frags[0].page_offset; + xdp.data_hard_start = page_address(frags[0].page); + xdp.data = xdp.data_hard_start + frags[0].page_offset; xdp.data_end = xdp.data + length; + orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); + + if (xdp.data != orig_data) { + length = xdp.data_end - xdp.data; + frags[0].page_offset = xdp.data - + xdp.data_hard_start; + } + switch (act) { case XDP_PASS: break; case XDP_TX: - if (likely(!mlx4_en_xmit_frame(frags, dev, - length, tx_index, + if (likely(!mlx4_en_xmit_frame(ring, frags, dev, + length, cq->ring, &doorbell_pending))) goto consumed; - goto xdp_drop; /* Drop on xmit failure */ + goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: case XDP_DROP: -xdp_drop: + ring->xdp_drop++; +xdp_drop_no_cnt: if (likely(mlx4_en_rx_recycle(ring, frags))) goto consumed; goto next; } } + ring->bytes += length; + ring->packets++; + if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { @@ -1081,15 +1096,20 @@ consumed: out: rcu_read_unlock(); - if (doorbell_pending) - mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]); + if (polled) { + if (doorbell_pending) + mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq->ring]); + + mlx4_cq_set_ci(&cq->mcq); + wmb(); /* ensure HW sees CQ consumer before we post new buffers */ + ring->cons = cq->mcq.cons_index; + } AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); - mlx4_cq_set_ci(&cq->mcq); - wmb(); /* ensure HW sees CQ consumer before we post new buffers */ - ring->cons = cq->mcq.cons_index; - mlx4_en_refill_rx_buffers(priv, ring); - mlx4_en_update_rx_prod_db(ring); + + if (mlx4_en_refill_rx_buffers(priv, ring)) + mlx4_en_update_rx_prod_db(ring); + return polled; } @@ -1131,14 +1151,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) return budget; /* Current cpu is not according to smp_irq_affinity - - * probably affinity changed. need to stop this NAPI - * poll, and restart it on the right CPU + * probably affinity changed. Need to stop this NAPI + * poll, and restart it on the right CPU. + * Try to avoid returning a too small value (like 0), + * to not fool net_rx_action() and its netdev_budget */ - done = 0; + if (done) + done--; } /* Done for now */ - napi_complete_done(napi, done); - mlx4_en_arm_cq(priv, cq); + if (napi_complete_done(napi, done)) + mlx4_en_arm_cq(priv, cq); return done; } @@ -1151,37 +1174,41 @@ static const int frag_sizes[] = { void mlx4_en_calc_rx_buf(struct net_device *dev) { - enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE; struct mlx4_en_priv *priv = netdev_priv(dev); int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); - int order = MLX4_EN_ALLOC_PREFER_ORDER; - u32 align = SMP_CACHE_BYTES; - int buf_size = 0; int i = 0; /* bpf requires buffers to be set up as 1 packet per page. * This only works when num_frags == 1. */ - if (priv->xdp_ring_num) { - dma_dir = PCI_DMA_BIDIRECTIONAL; - /* This will gain efficient xdp frame recycling at the expense - * of more costly truesize accounting + if (priv->tx_ring_num[TX_XDP]) { + priv->frag_info[0].order = 0; + priv->frag_info[0].frag_size = eff_mtu; + priv->frag_info[0].frag_prefix_size = 0; + /* This will gain efficient xdp frame recycling at the + * expense of more costly truesize accounting */ - align = PAGE_SIZE; - order = 0; - } - - while (buf_size < eff_mtu) { - priv->frag_info[i].order = order; - priv->frag_info[i].frag_size = - (eff_mtu > buf_size + frag_sizes[i]) ? - frag_sizes[i] : eff_mtu - buf_size; - priv->frag_info[i].frag_prefix_size = buf_size; - priv->frag_info[i].frag_stride = - ALIGN(priv->frag_info[i].frag_size, align); - priv->frag_info[i].dma_dir = dma_dir; - buf_size += priv->frag_info[i].frag_size; - i++; + priv->frag_info[0].frag_stride = PAGE_SIZE; + priv->frag_info[0].dma_dir = PCI_DMA_BIDIRECTIONAL; + priv->frag_info[0].rx_headroom = XDP_PACKET_HEADROOM; + i = 1; + } else { + int buf_size = 0; + + while (buf_size < eff_mtu) { + priv->frag_info[i].order = MLX4_EN_ALLOC_PREFER_ORDER; + priv->frag_info[i].frag_size = + (eff_mtu > buf_size + frag_sizes[i]) ? + frag_sizes[i] : eff_mtu - buf_size; + priv->frag_info[i].frag_prefix_size = buf_size; + priv->frag_info[i].frag_stride = + ALIGN(priv->frag_info[i].frag_size, + SMP_CACHE_BYTES); + priv->frag_info[i].dma_dir = PCI_DMA_FROMDEVICE; + priv->frag_info[i].rx_headroom = 0; + buf_size += priv->frag_info[i].frag_size; + i++; + } } priv->num_frags = i; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index b66e03d9711f..95290e1fc9fe 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -68,7 +68,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); - skb_set_mac_header(skb, 0); + skb_reset_mac_header(skb); for (i = 0; i < packet_size; ++i) /* fill our packet */ packet[i] = (unsigned char)(i & 0xff); @@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit: return !loopback_ok; } +static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + int i = 0; + + err = mlx4_test_async(mdev->dev); + /* When not in MSI_X or slave, test only async */ + if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev)) + return err; + + /* A loop over all completion vectors of current port, + * for each vector check whether it works by mapping command + * completions to that vector and performing a NOP command + */ + for (i = 0; i < priv->rx_ring_num; i++) { + err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector); + if (err) + break; + } + + return err; +} static int mlx4_en_test_link(struct mlx4_en_priv *priv) { @@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv) void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; int i, carrier_ok; memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); @@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) netif_carrier_on(dev); } - buf[0] = mlx4_test_interrupts(mdev->dev); + buf[0] = mlx4_en_test_interrupts(priv); buf[1] = mlx4_en_test_link(priv); buf[2] = mlx4_en_test_speed(priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index e2509bba3e7c..5886ad78058f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -66,7 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; - ring->stride = stride; + ring->sp_stride = stride; ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; tmp = size * sizeof(struct mlx4_en_tx_info); @@ -90,22 +90,22 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, goto err_info; } } - ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } - ring->buf = ring->wqres.buf.direct.buf; + ring->buf = ring->sp_wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, - (unsigned long long) ring->wqres.buf.direct.map); + (unsigned long long) ring->sp_wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_ETH_BF_QP); @@ -114,12 +114,12 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, goto err_hwq_res; } - err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } - ring->qp.event = mlx4_en_sqp_event; + ring->sp_qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { @@ -141,7 +141,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, if (queue_index < priv->num_tx_rings_p_up) cpumask_set_cpu(cpumask_local_spread(queue_index, priv->mdev->dev->numa_node), - &ring->affinity_mask); + &ring->sp_affinity_mask); *pring = ring; return 0; @@ -149,7 +149,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_hwq_res: - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); ring->bounce_buf = NULL; @@ -171,10 +171,10 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, if (ring->bf_alloced) mlx4_bf_free(mdev->dev, &ring->bf); - mlx4_qp_remove(mdev->dev, &ring->qp); - mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_remove(mdev->dev, &ring->sp_qp); + mlx4_qp_free(mdev->dev, &ring->sp_qp); mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; kvfree(ring->tx_info); @@ -190,7 +190,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_dev *mdev = priv->mdev; int err; - ring->cqn = cq; + ring->sp_cqn = cq; ring->prod = 0; ring->cons = 0xffffffff; ring->last_nr_txbb = 1; @@ -198,21 +198,21 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, memset(ring->buf, 0, ring->buf_size); ring->free_tx_desc = mlx4_en_free_tx_desc; - ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); + ring->sp_qp_state = MLX4_QP_STATE_RST; + ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8); ring->mr_key = cpu_to_be32(mdev->mr.key); - mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, - ring->cqn, user_prio, &ring->context); + mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn, + ring->sp_cqn, user_prio, &ring->sp_context); if (ring->bf_alloced) - ring->context.usr_page = + ring->sp_context.usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, ring->bf.uar->index)); - err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, - &ring->qp, &ring->qp_state); - if (!cpumask_empty(&ring->affinity_mask)) - netif_set_xps_queue(priv->dev, &ring->affinity_mask, + err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context, + &ring->sp_qp, &ring->sp_qp_state); + if (!cpumask_empty(&ring->sp_affinity_mask)) + netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask, ring->queue_index); return err; @@ -223,8 +223,8 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, { struct mlx4_en_dev *mdev = priv->mdev; - mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, - MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); + mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp); } static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) @@ -354,7 +354,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc frame = { .page = tx_info->page, .dma = tx_info->map0_dma, - .page_offset = 0, + .page_offset = XDP_PACKET_HEADROOM, .page_size = PAGE_SIZE, }; @@ -392,7 +392,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) cnt++; } - netdev_tx_reset_queue(ring->tx_queue); + if (ring->tx_queue) + netdev_tx_reset_queue(ring->tx_queue); if (cnt) en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); @@ -405,7 +406,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; - struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; + struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; struct mlx4_cqe *cqe; u16 index; u16 new_index, ring_index, stamp_index; @@ -807,7 +808,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool bf_ok; tx_ind = skb_get_queue_mapping(skb); - ring = priv->tx_ring[tx_ind]; + ring = priv->tx_ring[TX][tx_ind]; if (!priv->port_up) goto tx_drop; @@ -1078,7 +1079,8 @@ tx_drop: return NETDEV_TX_OK; } -netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, + struct mlx4_en_rx_alloc *frame, struct net_device *dev, unsigned int length, int tx_ind, int *doorbell_pending) { @@ -1101,7 +1103,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE, "mlx4_en_xmit_frame requires minimum size tx desc"); - ring = priv->tx_ring[tx_ind]; + ring = priv->tx_ring[TX_XDP][tx_ind]; if (!priv->port_up) goto tx_drop; @@ -1130,7 +1132,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, tx_info->page = frame->page; frame->page = NULL; tx_info->map0_dma = dma; - tx_info->map0_byte_count = length; + tx_info->map0_byte_count = PAGE_SIZE; tx_info->nr_txbb = nr_txbb; tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); tx_info->data_offset = (void *)data - (void *)tx_desc; @@ -1139,9 +1141,10 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, tx_info->linear = 1; tx_info->inl = 0; - dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE); + dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset, + length, PCI_DMA_TODEVICE); - data->addr = cpu_to_be64(dma); + data->addr = cpu_to_be64(dma + frame->page_offset); data->lkey = ring->mr_key; dma_wmb(); data->byte_count = cpu_to_be32(length); @@ -1153,8 +1156,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); - ring->packets++; - ring->bytes += tx_info->nr_bytes; + rx_ring->xdp_tx++; AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length); ring->prod += nr_txbb; @@ -1178,7 +1180,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, return NETDEV_TX_OK; tx_drop_count: - ring->tx_dropped++; + rx_ring->xdp_tx_full++; tx_drop: return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cf8f8a72a801..cd3638e6fe25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) kfree(priv->eq_table.uar_map); } -/* A test that verifies that we can accept interrupts on all - * the irq vectors of the device. +/* A test that verifies that we can accept interrupts + * on the vector allocated for asynchronous events + */ +int mlx4_test_async(struct mlx4_dev *dev) +{ + return mlx4_NOP(dev); +} +EXPORT_SYMBOL(mlx4_test_async); + +/* A test that verifies that we can accept interrupts + * on the given irq vector of the tested port. * Interrupts are checked using the NOP command. */ -int mlx4_test_interrupts(struct mlx4_dev *dev) +int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) { struct mlx4_priv *priv = mlx4_priv(dev); - int i; int err; - err = mlx4_NOP(dev); - /* When not in MSI_X, there is only one irq to check */ - if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) - return err; - - /* A loop over all completion vectors, for each vector we will check - * whether it works by mapping command completions to that vector - * and performing a NOP command - */ - for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { - /* Make sure request_irq was called */ - if (!priv->eq_table.eq[i].have_irq) - continue; - - /* Temporary use polling for command completions */ - mlx4_cmd_use_polling(dev); - - /* Map the new eq to handle all asynchronous events */ - err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[i].eqn); - if (err) { - mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); - mlx4_cmd_use_events(dev); - break; - } + /* Temporary use polling for command completions */ + mlx4_cmd_use_polling(dev); - /* Go back to using events */ - mlx4_cmd_use_events(dev); - err = mlx4_NOP(dev); + /* Map the new eq to handle all asynchronous events */ + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); + if (err) { + mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); + goto out; } + /* Go back to using events */ + mlx4_cmd_use_events(dev); + err = mlx4_NOP(dev); + /* Return to default */ + mlx4_cmd_use_polling(dev); +out: mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); + mlx4_cmd_use_events(dev); + return err; } -EXPORT_SYMBOL(mlx4_test_interrupts); +EXPORT_SYMBOL(mlx4_test_interrupt); bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index c41ab31a39f8..84bab9f0732e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -49,9 +49,9 @@ enum { extern void __buggy_use_of_MLX4_GET(void); extern void __buggy_use_of_MLX4_PUT(void); -static bool enable_qos = true; +static bool enable_qos; module_param(enable_qos, bool, 0444); -MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); +MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); #define MLX4_GET(dest, source, offset) \ do { \ diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 62788b7c728d..b2ca8a635b2e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info, int i; int err = 0; + if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { + mlx4_err(mdev, + "Requested port type for port %d is not supported on this HCA\n", + info->port); + err = -EINVAL; + goto err_sup; + } + mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); info->tmp_type = port_type; @@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); - +err_sup: return err; } @@ -4141,11 +4149,8 @@ static void mlx4_shutdown(struct pci_dev *pdev) mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { - /* Notify mlx4 clients that the kernel is being shut down */ - persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); - } mutex_unlock(&persist->interface_state_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 94b891c118c1..1a670b681555 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1457,7 +1457,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach); int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode) { - struct mlx4_net_trans_rule rule; + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + }; + u64 *regid_p; switch (mode) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e4878f31e45d..88ee7d8a5923 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -145,9 +145,10 @@ enum mlx4_resource { RES_MTT, RES_MAC, RES_VLAN, - RES_EQ, + RES_NPORT_ID, RES_COUNTER, RES_FS_RULE, + RES_EQ, MLX4_NUM_OF_RESOURCE_TYPE }; @@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_info *cmd); int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, int port, void *buf); -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, - struct mlx4_cmd_mailbox *outbox); int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index a3528dd1e72e..ba1c6cd0cc79 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -207,8 +207,11 @@ enum { */ enum cq_type { - RX = 0, - TX = 1, + /* keep tx types first */ + TX, + TX_XDP, +#define MLX4_EN_NUM_TX_TYPES (TX_XDP + 1) + RX, }; @@ -278,46 +281,50 @@ struct mlx4_en_tx_ring { u32 last_nr_txbb; u32 cons; unsigned long wake_queue; + struct netdev_queue *tx_queue; + u32 (*free_tx_desc)(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, + u64 timestamp, int napi_mode); + struct mlx4_en_rx_ring *recycle_ring; /* cache line used and dirtied in mlx4_en_xmit() */ u32 prod ____cacheline_aligned_in_smp; + unsigned int tx_dropped; unsigned long bytes; unsigned long packets; unsigned long tx_csum; unsigned long tso_packets; unsigned long xmit_more; - unsigned int tx_dropped; struct mlx4_bf bf; - unsigned long queue_stopped; /* Following part should be mostly read */ - cpumask_t affinity_mask; - struct mlx4_qp qp; - struct mlx4_hwq_resources wqres; + __be32 doorbell_qpn; + __be32 mr_key; u32 size; /* number of TXBBs */ u32 size_mask; - u16 stride; u32 full_size; - u16 cqn; /* index of port CQ associated with this ring */ u32 buf_size; - __be32 doorbell_qpn; - __be32 mr_key; void *buf; struct mlx4_en_tx_info *tx_info; - struct mlx4_en_rx_ring *recycle_ring; - u32 (*free_tx_desc)(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring *ring, - int index, u8 owner, - u64 timestamp, int napi_mode); - u8 *bounce_buf; - struct mlx4_qp_context context; int qpn; - enum mlx4_qp_state qp_state; u8 queue_index; bool bf_enabled; bool bf_alloced; - struct netdev_queue *tx_queue; - int hwtstamp_tx_type; + u8 hwtstamp_tx_type; + u8 *bounce_buf; + + /* Not used in fast path + * Only queue_stopped might be used if BQL is not properly working. + */ + unsigned long queue_stopped; + struct mlx4_hwq_resources sp_wqres; + struct mlx4_qp sp_qp; + struct mlx4_qp_context sp_context; + cpumask_t sp_affinity_mask; + enum mlx4_qp_state sp_qp_state; + u16 sp_stride; + u16 sp_cqn; /* index of port CQ associated with this ring */ } ____cacheline_aligned_in_smp; struct mlx4_en_rx_desc { @@ -347,6 +354,9 @@ struct mlx4_en_rx_ring { unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; + unsigned long xdp_drop; + unsigned long xdp_tx; + unsigned long xdp_tx_full; unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; @@ -361,7 +371,7 @@ struct mlx4_en_cq { int size; int buf_size; int vector; - enum cq_type is_tx; + enum cq_type type; u16 moder_time; u16 moder_cnt; struct mlx4_cqe *buf; @@ -372,7 +382,7 @@ struct mlx4_en_cq { struct mlx4_en_port_profile { u32 flags; - u32 tx_ring_num; + u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES]; u32 rx_ring_num; u32 tx_ring_size; u32 rx_ring_size; @@ -465,7 +475,8 @@ struct mlx4_en_frag_info { u16 frag_prefix_size; u32 frag_stride; enum dma_data_direction dma_dir; - int order; + u16 order; + u16 rx_headroom; }; #ifdef CONFIG_MLX4_EN_DCB @@ -569,17 +580,16 @@ struct mlx4_en_priv { u32 flags; u8 num_tx_rings_p_up; u32 tx_work_limit; - u32 tx_ring_num; + u32 tx_ring_num[MLX4_EN_NUM_TX_TYPES]; u32 rx_ring_num; u32 rx_skb_size; struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; u16 num_frags; u16 log_rx_info; - int xdp_ring_num; - struct mlx4_en_tx_ring **tx_ring; + struct mlx4_en_tx_ring **tx_ring[MLX4_EN_NUM_TX_TYPES]; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; - struct mlx4_en_cq **tx_cq; + struct mlx4_en_cq **tx_cq[MLX4_EN_NUM_TX_TYPES]; struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; @@ -597,6 +607,7 @@ struct mlx4_en_priv { struct mlx4_en_flow_stats_rx rx_flowstats; struct mlx4_en_flow_stats_tx tx_flowstats; struct mlx4_en_port_stats port_stats; + struct mlx4_en_xdp_stats xdp_stats; struct mlx4_en_stats_bitmap stats_bitmap; struct list_head mc_list; struct list_head curr_list; @@ -685,7 +696,8 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); -netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, + struct mlx4_en_rx_alloc *frame, struct net_device *dev, unsigned int length, int tx_ind, int *doorbell_pending); void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); @@ -744,6 +756,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq); int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); +void mlx4_en_fold_software_stats(struct net_device *dev); int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 7fd466c0b929..48641cb0367f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -55,6 +55,13 @@ struct mlx4_en_perf_stats { #define NUM_PERF_COUNTERS 6 }; +struct mlx4_en_xdp_stats { + unsigned long rx_xdp_drop; + unsigned long rx_xdp_tx; + unsigned long rx_xdp_tx_full; +#define NUM_XDP_STATS 3 +}; + #define NUM_MAIN_STATS 21 #define MLX4_NUM_PRIORITIES 8 @@ -107,7 +114,8 @@ enum { }; #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ - NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS) + NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \ + NUM_XDP_STATS) #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ sizeof(((struct net_device_stats *)0)->n)) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index c5b2064297a1..b656dd5772e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, return err; } -int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, - u32 in_mod, struct mlx4_cmd_mailbox *outbox) -{ - return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, - MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, - MLX4_CMD_NATIVE); -} - int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { - if (slave != dev->caps.function) - return 0; - return mlx4_common_dump_eth_stats(dev, slave, - vhcr->in_modifier, outbox); + return 0; } int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 84d7857ccc27..c548beaaf910 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_EQ_BUSY; - if (eq) - *eq = r; } } spin_unlock_irq(mlx4_tlock(dev)); + if (!err && eq) + *eq = r; + return err; } |