diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-01 01:31:10 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-01 01:31:10 +0300 |
commit | b2fe5fa68642860e7de76167c3111623aa0d5de1 (patch) | |
tree | b7f9b89b7039ecefbc35fe3c8e73a6ff972641dd /drivers/net/ethernet/mellanox/mlx4 | |
parent | a103950e0dd2058df5e8a8d4a915707bdcf205f0 (diff) | |
parent | a54667f6728c2714a400f3c884727da74b6d1717 (diff) | |
download | linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Significantly shrink the core networking routing structures. Result
of http://vger.kernel.org/~davem/seoul2017_netdev_keynote.pdf
2) Add netdevsim driver for testing various offloads, from Jakub
Kicinski.
3) Support cross-chip FDB operations in DSA, from Vivien Didelot.
4) Add a 2nd listener hash table for TCP, similar to what was done for
UDP. From Martin KaFai Lau.
5) Add eBPF based queue selection to tun, from Jason Wang.
6) Lockless qdisc support, from John Fastabend.
7) SCTP stream interleave support, from Xin Long.
8) Smoother TCP receive autotuning, from Eric Dumazet.
9) Lots of erspan tunneling enhancements, from William Tu.
10) Add true function call support to BPF, from Alexei Starovoitov.
11) Add explicit support for GRO HW offloading, from Michael Chan.
12) Support extack generation in more netlink subsystems. From Alexander
Aring, Quentin Monnet, and Jakub Kicinski.
13) Add 1000BaseX, flow control, and EEE support to mvneta driver. From
Russell King.
14) Add flow table abstraction to netfilter, from Pablo Neira Ayuso.
15) Many improvements and simplifications to the NFP driver bpf JIT,
from Jakub Kicinski.
16) Support for ipv6 non-equal cost multipath routing, from Ido
Schimmel.
17) Add resource abstration to devlink, from Arkadi Sharshevsky.
18) Packet scheduler classifier shared filter block support, from Jiri
Pirko.
19) Avoid locking in act_csum, from Davide Caratti.
20) devinet_ioctl() simplifications from Al viro.
21) More TCP bpf improvements from Lawrence Brakmo.
22) Add support for onlink ipv6 route flag, similar to ipv4, from David
Ahern.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1925 commits)
tls: Add support for encryption using async offload accelerator
ip6mr: fix stale iterator
net/sched: kconfig: Remove blank help texts
openvswitch: meter: Use 64-bit arithmetic instead of 32-bit
tcp_nv: fix potential integer overflow in tcpnv_acked
r8169: fix RTL8168EP take too long to complete driver initialization.
qmi_wwan: Add support for Quectel EP06
rtnetlink: enable IFLA_IF_NETNSID for RTM_NEWLINK
ipmr: Fix ptrdiff_t print formatting
ibmvnic: Wait for device response when changing MAC
qlcnic: fix deadlock bug
tcp: release sk_frag.page in tcp_disconnect
ipv4: Get the address of interface correctly.
net_sched: gen_estimator: fix lockdep splat
net: macb: Handle HRESP error
net/mlx5e: IPoIB, Fix copy-paste bug in flow steering refactoring
ipv6: addrconf: break critical section in addrconf_verify_rtnl()
ipv6: change route cache aging logic
i40e/i40evf: Update DESC_NEEDED value to reflect larger value
bnxt_en: cleanup DIM work on device shutdown
...
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_rx.c | 69 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mr.c | 40 |
6 files changed, 91 insertions, 55 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5f41dc92aa68..1a0c3bf86ead 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) } switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: case IEEE_8021QAZ_TSA_STRICT: break; case IEEE_8021QAZ_TSA_ETS: @@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, /* higher TC means higher priority => lower pg */ for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + pg[i] = MLX4_EN_TC_VENDOR; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; case IEEE_8021QAZ_TSA_STRICT: pg[i] = num_strict++; tc_tx_bw[i] = MLX4_EN_BW_MAX; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bf1f04164885..ebc1f566a4d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev, if (param->rx_jumbo_pending || param->rx_mini_pending) return -EINVAL; + if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) { + en_warn(priv, "%s: rx_pending (%d) < min (%d)\n", + __func__, param->rx_pending, + MLX4_EN_MIN_RX_SIZE); + return -EINVAL; + } + if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) { + en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n", + __func__, param->tx_pending, + MLX4_EN_MIN_TX_SIZE); + return -EINVAL; + } + rx_size = roundup_pow_of_two(param->rx_pending); - rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); - rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); tx_size = roundup_pow_of_two(param->tx_pending); - tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); - tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 99051a294fa6..8fc51bc29003 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], prof->rx_ring_size, priv->stride, - node)) + node, i)) goto err; + } #ifdef CONFIG_RFS_ACCEL @@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + u8 prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { + priv->ets.prio_tc[prio] = prio; + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; + } + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; priv->flags |= MLX4_EN_DCB_ENABLED; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 85e28efcda33..b4d144e67514 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, - u32 size, u16 stride, int node) + u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring; @@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0) + goto err_ring; + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); ring->rx_info = vzalloc_node(tmp, node); @@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->rx_info = vzalloc(tmp); if (!ring->rx_info) { err = -ENOMEM; - goto err_ring; + goto err_xdp_info; } } @@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, err_info: vfree(ring->rx_info); ring->rx_info = NULL; +err_xdp_info: + xdp_rxq_info_unreg(&ring->xdp_rxq); err_ring: kfree(ring); *pring = NULL; @@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, lockdep_is_held(&mdev->state_lock)); if (old_prog) bpf_prog_put(old_prog); + xdp_rxq_info_unreg(&ring->xdp_rxq); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; @@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, return 0; } #endif + +/* We reach this function only after checking that any of + * the (IPv4 | IPv6) bits are set in cqe->status. + */ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { @@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, hdr += sizeof(struct vlan_hdr); } - if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) - return get_fixed_ipv4_csum(hw_checksum, skb, hdr); #if IS_ENABLED(CONFIG_IPV6) if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) return get_fixed_ipv6_csum(hw_checksum, skb, hdr); #endif - return 0; + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); } int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) @@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud int cq_ring = cq->ring; bool doorbell_pending; struct mlx4_cqe *cqe; + struct xdp_buff xdp; int polled = 0; int index; @@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ rcu_read_lock(); xdp_prog = rcu_dereference(ring->xdp_prog); + xdp.rxq = &ring->xdp_rxq; doorbell_pending = 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx @@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * read bytes but not past the end of the frag. */ if (xdp_prog) { - struct xdp_buff xdp; dma_addr_t dma; void *orig_data; u32 act; @@ -814,33 +823,33 @@ xdp_drop_no_cnt: if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { - if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - cqe->checksum == cpu_to_be16(0xffff)) { - bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && - (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); - - ip_summed = CHECKSUM_UNNECESSARY; - hash_type = PKT_HASH_TYPE_L4; - if (l2_tunnel) - skb->csum_level = 1; - ring->csum_ok++; - } else { + bool l2_tunnel; + + if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff))) goto csum_none; - } + + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + ip_summed = CHECKSUM_UNNECESSARY; + hash_type = PKT_HASH_TYPE_L4; + if (l2_tunnel) + skb->csum_level = 1; + ring->csum_ok++; } else { - if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && - (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | - MLX4_CQE_STATUS_IPV6))) { - if (check_csum(cqe, skb, va, dev->features)) { - goto csum_none; - } else { - ip_summed = CHECKSUM_COMPLETE; - hash_type = PKT_HASH_TYPE_L3; - ring->csum_complete++; - } - } else { + if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | +#if IS_ENABLED(CONFIG_IPV6) + MLX4_CQE_STATUS_IPV6)))) +#else + 0)))) +#endif goto csum_none; - } + if (check_csum(cqe, skb, va, dev->features)) + goto csum_none; + ip_summed = CHECKSUM_COMPLETE; + hash_type = PKT_HASH_TYPE_L3; + ring->csum_complete++; } } else { csum_none: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2b72677eccd4..f470ae37d937 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -46,6 +46,7 @@ #endif #include <linux/cpu_rmap.h> #include <linux/ptp_clock_kernel.h> +#include <net/xdp.h> #include <linux/mlx4/device.h> #include <linux/mlx4/qp.h> @@ -356,6 +357,7 @@ struct mlx4_en_rx_ring { unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; + struct xdp_rxq_info xdp_rxq; }; struct mlx4_en_cq { @@ -479,6 +481,7 @@ struct mlx4_en_frag_info { #define MLX4_EN_BW_MIN 1 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ +#define MLX4_EN_TC_VENDOR 0 #define MLX4_EN_TC_ETS 7 enum dcb_pfc_type { @@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, - u32 size, u16 stride, int node); + u32 size, u16 stride, int node, int queue_index); void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index c7c0764991c9..2e84f10f59ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { - struct mlx4_cmd_mailbox *mailbox; - int err; - if (!fmr->maps) return; - fmr->maps = 0; + /* To unmap: it is sufficient to take back ownership from HW */ + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - err = PTR_ERR(mailbox); - pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err); - return; - } + /* Make sure MPT status is visible */ + wmb(); - err = mlx4_HW2SW_MPT(dev, NULL, - key_to_hw_index(fmr->mr.key) & - (dev->caps.num_mpts - 1)); - mlx4_free_cmd_mailbox(dev, mailbox); - if (err) { - pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); - return; - } - fmr->mr.enabled = MLX4_MPT_EN_SW; + fmr->maps = 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); @@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) if (fmr->maps) return -EBUSY; + if (fmr->mr.enabled == MLX4_MPT_EN_HW) { + /* In case of FMR was enabled and unmapped + * make sure to give ownership of MPT back to HW + * so HW2SW_MPT command will success. + */ + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; + /* Make sure MPT status is visible before changing MPT fields */ + wmb(); + fmr->mpt->length = 0; + fmr->mpt->start = 0; + /* Make sure MPT data is visible after changing MPT status */ + wmb(); + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW; + /* make sure MPT status is visible */ + wmb(); + } ret = mlx4_mr_free(dev, &fmr->mr); if (ret) |