summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c52
-rw-r--r--drivers/net/bonding/bond_options.c9
-rw-r--r--drivers/net/can/bxcan.c2
-rw-r--r--drivers/net/can/dev/netlink.c6
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/m_can/m_can.c66
-rw-r--r--drivers/net/can/m_can/m_can_platform.c6
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c23
-rw-r--r--drivers/net/dsa/b53/b53_common.c36
-rw-r--r--drivers/net/dsa/b53/b53_regs.h3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c98
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c7
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c15
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c18
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c79
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c182
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c34
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/sfc/mae.c4
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c7
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c10
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h4
-rw-r--r--drivers/net/mctp/mctp-usb.c8
-rw-r--r--drivers/net/mdio/mdio-airoha.c2
-rw-r--r--drivers/net/netconsole.c31
-rw-r--r--drivers/net/netdevsim/netdev.c7
-rw-r--r--drivers/net/ovpn/tcp.c26
-rw-r--r--drivers/net/phy/broadcom.c20
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/mdio_bus.c5
-rw-r--r--drivers/net/phy/micrel.c179
-rw-r--r--drivers/net/phy/realtek/realtek_main.c39
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/lan78xx.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/virtio_net.c67
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c40
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c156
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c12
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c71
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
145 files changed, 1813 insertions, 728 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4da619210c1f..5abef8a3b775 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2120,7 +2120,7 @@ skip_mac_set:
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (netif_carrier_ok(slave_dev)) {
+ if (netif_running(slave_dev) && netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2287,7 +2287,9 @@ skip_mac_set:
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
if (!slave_dev->netdev_ops->ndo_bpf ||
@@ -2463,7 +2465,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2662,7 +2665,8 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = netif_carrier_ok(slave->dev);
+ link_state = netif_running(slave->dev) &&
+ netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -2871,7 +2875,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2883,30 +2887,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2914,13 +2921,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 495a87f2ea7c..384499c869b8 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -225,13 +225,6 @@ static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
{ NULL, -1, 0},
};
-static const struct bond_opt_value bond_actor_port_prio_tbl[] = {
- { "minval", 0, BOND_VALFLAG_MIN},
- { "maxval", 65535, BOND_VALFLAG_MAX},
- { "default", 255, BOND_VALFLAG_DEFAULT},
- { NULL, -1, 0},
-};
-
static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", 1023, BOND_VALFLAG_MAX},
@@ -497,7 +490,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_ACTOR_PORT_PRIO,
.name = "actor_port_prio",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
- .values = bond_actor_port_prio_tbl,
+ .flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_actor_port_prio_set,
},
[BOND_OPT_AD_ACTOR_SYSTEM] = {
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index bfc60eb33dc3..333ad42ea73b 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
u32 id;
int i, j;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (bxcan_tx_busy(priv))
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 0591406b6f32..6f83b87d54fc 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -452,7 +452,9 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
- if (!priv->do_set_mode) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+
+ if (restart_ms != 0 && !priv->do_set_mode) {
NL_SET_ERR_MSG(extack,
"Device doesn't support restart from Bus Off");
return -EOPNOTSUPP;
@@ -461,7 +463,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index c80032bc1a52..73e66f9a3781 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -254,7 +254,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 acc_id;
u32 acc_dlc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index e1d725979685..ad4f577c1ef7 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
/* Bosch M_CAN user manual can be obtained from:
@@ -812,6 +812,9 @@ static int m_can_handle_state_change(struct net_device *dev,
u32 timestamp = 0;
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cdev->can.can_stats.error_warning++;
@@ -841,6 +844,12 @@ static int m_can_handle_state_change(struct net_device *dev,
__m_can_get_berr_counter(dev, &bec);
switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
case CAN_STATE_ERROR_WARNING:
/* error warning state */
cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
@@ -877,30 +886,33 @@ static int m_can_handle_state_change(struct net_device *dev,
return 1;
}
-static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
+static enum can_state
+m_can_state_get_by_psr(struct m_can_classdev *cdev)
{
- struct m_can_classdev *cdev = netdev_priv(dev);
- int work_done = 0;
+ u32 reg_psr;
- if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_WARNING);
- }
+ reg_psr = m_can_read(cdev, M_CAN_PSR);
- if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_ERROR_PASSIVE);
- }
+ if (reg_psr & PSR_BO)
+ return CAN_STATE_BUS_OFF;
+ if (reg_psr & PSR_EP)
+ return CAN_STATE_ERROR_PASSIVE;
+ if (reg_psr & PSR_EW)
+ return CAN_STATE_ERROR_WARNING;
- if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
- netdev_dbg(dev, "entered error bus off state\n");
- work_done += m_can_handle_state_change(dev,
- CAN_STATE_BUS_OFF);
- }
+ return CAN_STATE_ERROR_ACTIVE;
+}
- return work_done;
+static int m_can_handle_state_errors(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ enum can_state new_state;
+
+ new_state = m_can_state_get_by_psr(cdev);
+ if (new_state == cdev->can.state)
+ return 0;
+
+ return m_can_handle_state_change(dev, new_state);
}
static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
@@ -1031,8 +1043,7 @@ static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
}
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev,
- m_can_read(cdev, M_CAN_PSR));
+ work_done += m_can_handle_state_errors(dev);
if (irqstatus & IR_ERR_BUS_30X)
work_done += m_can_handle_bus_errors(dev, irqstatus,
@@ -1606,7 +1617,7 @@ static int m_can_start(struct net_device *dev)
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
cdev->tx_max_coalesced_frames);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = m_can_state_get_by_psr(cdev);
m_can_enable_all_interrupts(cdev);
@@ -2492,12 +2503,11 @@ int m_can_class_suspend(struct device *dev)
}
m_can_clk_stop(cdev);
+ cdev->can.state = CAN_STATE_SLEEPING;
}
pinctrl_pm_select_sleep_state(dev);
- cdev->can.state = CAN_STATE_SLEEPING;
-
return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2510,8 +2520,6 @@ int m_can_class_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- cdev->can.state = CAN_STATE_ERROR_ACTIVE;
-
if (netif_running(ndev)) {
ret = m_can_clk_start(cdev);
if (ret)
@@ -2529,6 +2537,8 @@ int m_can_class_resume(struct device *dev)
if (cdev->ops->init)
ret = cdev->ops->init(cdev);
+ cdev->can.state = m_can_state_get_by_psr(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2546,7 +2556,7 @@ int m_can_class_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index b832566efda0..4a412add2b8d 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// IOMapped CAN bus driver for Bosch M_CAN controller
// Copyright (C) 2014 Freescale Semiconductor, Inc.
-// Dong Aisheng <b29396@freescale.com>
+// Dong Aisheng <aisheng.dong@nxp.com>
//
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
@@ -180,7 +180,7 @@ static void m_can_plat_remove(struct platform_device *pdev)
struct m_can_classdev *mcan_class = &priv->cdev;
m_can_class_unregister(mcan_class);
-
+ pm_runtime_disable(mcan_class->dev);
m_can_class_free_dev(mcan_class->net);
}
@@ -236,7 +236,7 @@ static struct platform_driver m_can_plat_driver = {
module_platform_driver(m_can_plat_driver);
-MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
index 865a15e033a9..12200dcfd338 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-tx.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -72,7 +72,7 @@ netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int err;
u8 i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (!netif_subqueue_maybe_stop(priv->ndev, 0,
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index c9482d6e947b..69b8d6da651b 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -289,11 +289,6 @@ struct gs_host_frame {
#define GS_MAX_RX_URBS 30
#define GS_NAPI_WEIGHT 32
-/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 3 interfaces. The future may vary.
- */
-#define GS_MAX_INTF 3
-
struct gs_tx_context {
struct gs_can *dev;
unsigned int echo_id;
@@ -324,7 +319,6 @@ struct gs_can {
/* usb interface struct */
struct gs_usb {
- struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
struct usb_device *udev;
@@ -336,9 +330,11 @@ struct gs_usb {
unsigned int hf_size_rx;
u8 active_channels;
+ u8 channel_cnt;
unsigned int pipe_in;
unsigned int pipe_out;
+ struct gs_can *canch[] __counted_by(channel_cnt);
};
/* 'allocate' a tx context.
@@ -599,7 +595,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
/* device reports out of range channel id */
- if (hf->channel >= GS_MAX_INTF)
+ if (hf->channel >= parent->channel_cnt)
goto device_detach;
dev = parent->canch[hf->channel];
@@ -699,7 +695,7 @@ resubmit_urb:
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
device_detach:
- for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ for (rc = 0; rc < parent->channel_cnt; rc++) {
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
@@ -1249,6 +1245,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
netdev->dev_id = channel;
+ netdev->dev_port = channel;
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
@@ -1460,17 +1457,19 @@ static int gs_usb_probe(struct usb_interface *intf,
icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
- if (icount > GS_MAX_INTF) {
+ if (icount > type_max(parent->channel_cnt)) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
- GS_MAX_INTF);
+ type_max(parent->channel_cnt));
return -EINVAL;
}
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
if (!parent)
return -ENOMEM;
+ parent->channel_cnt = icount;
+
init_usb_anchor(&parent->rx_submitted);
usb_set_intfdata(intf, parent);
@@ -1531,7 +1530,7 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
- for (i = 0; i < GS_MAX_INTF; i++)
+ for (i = 0; i < parent->channel_cnt; i++)
if (parent->canch[i])
gs_destroy_candev(parent->canch[i]);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 2f846381d5a7..eb767edc4c13 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -371,11 +371,11 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
* frames should be flooded or not.
*/
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IP_MC;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
} else {
b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_IP_MCAST_25;
+ mgmt |= B53_IP_MC;
b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
}
}
@@ -1372,6 +1372,10 @@ static void b53_force_port_config(struct b53_device *dev, int port,
else
reg &= ~PORT_OVERRIDE_FULL_DUPLEX;
+ reg &= ~(0x3 << GMII_PO_SPEED_S);
+ if (is5301x(dev) || is58xx(dev))
+ reg &= ~PORT_OVERRIDE_SPEED_2000M;
+
switch (speed) {
case 2000:
reg |= PORT_OVERRIDE_SPEED_2000M;
@@ -1390,6 +1394,11 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
+ if (is5325(dev))
+ reg &= ~PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg &= ~(PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW);
+
if (rx_pause) {
if (is5325(dev))
reg |= PORT_OVERRIDE_LP_FLOW_25;
@@ -1593,8 +1602,11 @@ static void b53_phylink_mac_link_down(struct phylink_config *config,
struct b53_device *dev = dp->ds->priv;
int port = dp->index;
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4))
+ b53_force_link(dev, port, false);
return;
+ }
if (mode == MLO_AN_FIXED) {
b53_force_link(dev, port, false);
@@ -1622,6 +1634,13 @@ static void b53_phylink_mac_link_up(struct phylink_config *config,
if (mode == MLO_AN_PHY) {
/* Re-negotiate EEE if it was enabled already */
p->eee_enabled = b53_eee_init(ds, port, phydev);
+
+ if (is63xx(dev) && in_range(port, B53_63XX_RGMII0, 4)) {
+ b53_force_port_config(dev, port, speed, duplex,
+ tx_pause, rx_pause);
+ b53_force_link(dev, port, true);
+ }
+
return;
}
@@ -2018,7 +2037,7 @@ static int b53_arl_search_wait(struct b53_device *dev)
do {
b53_read8(dev, B53_ARLIO_PAGE, offset, &reg);
if (!(reg & ARL_SRCH_STDN))
- return 0;
+ return -ENOENT;
if (reg & ARL_SRCH_VLID)
return 0;
@@ -2068,13 +2087,16 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ unsigned int count = 0, results_per_hit = 1;
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
- unsigned int count = 0;
u8 offset;
int ret;
u8 reg;
+ if (priv->num_arl_bins > 2)
+ results_per_hit = 2;
+
mutex_lock(&priv->arl_mutex);
if (is5325(priv) || is5365(priv))
@@ -2096,7 +2118,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
if (ret)
break;
- if (priv->num_arl_bins > 2) {
+ if (results_per_hit == 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
@@ -2106,7 +2128,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
break;
}
- } while (count++ < b53_max_arl_entries(priv) / 2);
+ } while (count++ < b53_max_arl_entries(priv) / results_per_hit);
mutex_unlock(&priv->arl_mutex);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 309fe0e46dad..8ce1ce72e938 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -111,8 +111,7 @@
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
-#define B53_IP_MCAST_25 BIT(0)
-#define B53_IPMC_FWD_EN BIT(1)
+#define B53_IP_MC BIT(0)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index d747ea1c41a7..5df8f153d511 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1355,9 +1355,15 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
}
}
+#define RESV_MCAST_CNT 8
+
+static u8 reserved_mcast_map[RESV_MCAST_CNT] = { 0, 1, 3, 16, 32, 33, 2, 17 };
+
int ksz9477_enable_stp_addr(struct ksz_device *dev)
{
+ u8 i, ports, update;
const u32 *masks;
+ bool override;
u32 data;
int ret;
@@ -1366,23 +1372,87 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
/* Enable Reserved multicast table */
ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
- /* Set the Override bit for forwarding BPDU packet to CPU */
- ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
- ALU_V_OVERRIDE | BIT(dev->cpu_port));
- if (ret < 0)
- return ret;
+ /* The reserved multicast address table has 8 entries. Each entry has
+ * a default value of which port to forward. It is assumed the host
+ * port is the last port in most of the switches, but that is not the
+ * case for KSZ9477 or maybe KSZ9897. For LAN937X family the default
+ * port is port 5, the first RGMII port. It is okay for LAN9370, a
+ * 5-port switch, but may not be correct for the other 8-port
+ * versions. It is necessary to update the whole table to forward to
+ * the right ports.
+ * Furthermore PTP messages can use a reserved multicast address and
+ * the host will not receive them if this table is not correct.
+ */
+ for (i = 0; i < RESV_MCAST_CNT; i++) {
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_READ];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
- data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
- ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
- if (ret < 0)
- return ret;
+ ret = ksz_read32(dev, REG_SW_ALU_VAL_B, &data);
+ if (ret < 0)
+ return ret;
- /* wait to be finished */
- ret = ksz9477_wait_alu_sta_ready(dev);
- if (ret < 0) {
- dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
- return ret;
+ override = false;
+ ports = data & dev->port_mask;
+ switch (i) {
+ case 0:
+ case 6:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ override = true;
+ break;
+ case 2:
+ /* Change the host port. */
+ update = BIT(dev->cpu_port);
+ break;
+ case 4:
+ case 5:
+ case 7:
+ /* Skip the host port. */
+ update = dev->port_mask & ~BIT(dev->cpu_port);
+ break;
+ default:
+ update = ports;
+ break;
+ }
+ if (update != ports || override) {
+ data &= ~dev->port_mask;
+ data |= update;
+ /* Set Override bit to receive frame even when port is
+ * closed.
+ */
+ if (override)
+ data |= ALU_V_OVERRIDE;
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B, data);
+ if (ret < 0)
+ return ret;
+
+ data = reserved_mcast_map[i] <<
+ dev->info->shifts[ALU_STAT_INDEX];
+ data |= ALU_STAT_START |
+ masks[ALU_STAT_DIRECT] |
+ masks[ALU_RESV_MCAST_ADDR] |
+ masks[ALU_STAT_WRITE];
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index ff579920078e..61ea11e3338e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2024 Microchip Technology Inc.
+ * Copyright (C) 2017-2025 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -397,7 +397,6 @@
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
-#define ALU_RESV_MCAST_ADDR BIT(1)
#define REG_SW_ALU_VAL_A 0x0420
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a962055bfdbd..933ae8dc6337 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -808,6 +808,8 @@ static const u16 ksz9477_regs[] = {
static const u32 ksz9477_masks[] = {
[ALU_STAT_WRITE] = 0,
[ALU_STAT_READ] = 1,
+ [ALU_STAT_DIRECT] = 0,
+ [ALU_RESV_MCAST_ADDR] = BIT(1),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
@@ -835,6 +837,8 @@ static const u8 ksz9477_xmii_ctrl1[] = {
static const u32 lan937x_masks[] = {
[ALU_STAT_WRITE] = 1,
[ALU_STAT_READ] = 2,
+ [ALU_STAT_DIRECT] = BIT(3),
+ [ALU_RESV_MCAST_ADDR] = BIT(2),
[P_MII_TX_FLOW_CTRL] = BIT(5),
[P_MII_RX_FLOW_CTRL] = BIT(3),
};
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index a1eb39771bb9..c65188cd3c0a 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -294,6 +294,8 @@ enum ksz_masks {
DYNAMIC_MAC_TABLE_TIMESTAMP,
ALU_STAT_WRITE,
ALU_STAT_READ,
+ ALU_STAT_DIRECT,
+ ALU_RESV_MCAST_ADDR,
P_MII_TX_FLOW_CTRL,
P_MII_RX_FLOW_CTRL,
};
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 833dd911980b..433a646e9831 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -1873,6 +1873,20 @@ static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
#endif
}
+static bool airoha_dev_tx_queue_busy(struct airoha_queue *q, u32 nr_frags)
+{
+ u32 tail = q->tail <= q->head ? q->tail + q->ndesc : q->tail;
+ u32 index = q->head + nr_frags;
+
+ /* completion napi can free out-of-order tx descriptors if hw QoS is
+ * enabled and packets with different priorities are queued to the same
+ * DMA ring. Take into account possible out-of-order reports checking
+ * if the tx queue is full using circular buffer head/tail pointers
+ * instead of the number of queued packets.
+ */
+ return index >= tail;
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1926,7 +1940,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
txq = netdev_get_tx_queue(dev, qid);
nr_frags = 1 + skb_shinfo(skb)->nr_frags;
- if (q->queued + nr_frags > q->ndesc) {
+ if (airoha_dev_tx_queue_busy(q, nr_frags)) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
spin_unlock_bh(&q->lock);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f0989aa01855..4dc631af7933 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1080,7 +1080,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
- pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
return pdata->phy_if.phy_reset(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 1a37ec45e650..7675bb98f029 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1555,6 +1555,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
}
+ pdata->phy_link = 0;
pdata->phy.link = 0;
pdata->phy.pause_autoneg = pdata->pause_autoneg;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3fc33b1b4dfb..a625e7c311dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -12439,7 +12439,7 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -16892,6 +16892,10 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
netif_close(dev);
+ if (bnxt_hwrm_func_drv_unrgtr(bp)) {
+ pcie_flr(pdev);
+ goto shutdown_exit;
+ }
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 741b2d854789..3613a172483a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2149,7 +2149,7 @@ struct bnxt_bs_trace_info {
static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
u32 offset)
{
- if (!bs_trace->wrapped &&
+ if (!bs_trace->wrapped && bs_trace->magic_byte &&
*bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
bs_trace->wrapped = 1;
bs_trace->last_offset = offset;
@@ -2941,6 +2941,7 @@ void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 0181ab1f2dfd..ccb8b509662d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -333,13 +333,14 @@ static void bnxt_fill_drv_seg_record(struct bnxt *bp,
u32 offset = 0;
int rc = 0;
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+
rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
if (rc)
return;
bnxt_bs_trace_check_wrap(bs_trace, offset);
- record->max_entries = cpu_to_le32(ctxm->max_entries);
- record->entry_size = cpu_to_le32(ctxm->entry_size);
record->offset = cpu_to_le32(bs_trace->last_offset);
record->wrapped = bs_trace->wrapped;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 02961d93ed35..67ca02d84c97 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -461,7 +461,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
rtnl_unlock();
break;
}
- bnxt_cancel_reservations(bp, false);
+ bnxt_clear_reservations(bp, false);
bnxt_free_ctx_mem(bp, false);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index db81cf6d5289..0abaa2bbe357 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -1051,9 +1051,9 @@ static void bnxt_ptp_free(struct bnxt *bp)
if (ptp->ptp_clock) {
ptp_clock_unregister(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
}
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
}
int bnxt_ptp_init(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 7f00ec7fd7b9..d78cafdb2094 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5803,7 +5803,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
- u32 local_adv, remote_adv, sgsr;
+ u32 local_adv = 0, remote_adv = 0, sgsr;
if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) &&
@@ -5944,9 +5944,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
else
current_duplex = DUPLEX_HALF;
- local_adv = 0;
- remote_adv = 0;
-
if (bmcr & BMCR_ANENABLE) {
u32 common;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index ecd9a0bd5e18..49b57bb5fac1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -290,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
return -EINVAL;
}
+ if (unlikely(!try_module_get(THIS_MODULE))) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
+ return -ENODEV;
+ }
+
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
+ module_put(THIS_MODULE);
goto out;
}
@@ -301,7 +307,6 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
- try_module_get(THIS_MODULE);
out:
return res;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1996d2e4e3e2..6e4f17142519 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -508,25 +508,34 @@ static int alloc_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
+ dma_addr_t addr;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (!skb) {
- free_list(dev);
- return -ENOMEM;
- }
+ if (!skb)
+ goto err_free_list;
+
+ addr = dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pdev->dev, addr))
+ goto err_kfree_skb;
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo =
- cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
- np->rx_buf_sz, DMA_FROM_DEVICE));
+ np->rx_ring[i].fraginfo = cpu_to_le64(addr);
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
return 0;
+
+err_kfree_skb:
+ dev_kfree_skb(np->rx_skbuff[i]);
+ np->rx_skbuff[i] = NULL;
+err_free_list:
+ free_list(dev);
+ return -ENOMEM;
}
static void rio_hw_init(struct net_device *dev)
@@ -724,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index c96d1d6ba8fe..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index aae462a0cf5a..0535e92404e3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1595,6 +1595,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1630,7 +1632,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1638,6 +1642,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1947,6 +1953,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
struct xdp_buff xdp_buff;
@@ -2010,7 +2018,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
*/
enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
@@ -2045,7 +2055,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
@@ -2065,8 +2077,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -2075,6 +2090,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -2093,6 +2110,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -2104,10 +2122,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -2116,6 +2132,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 0ec010a7d640..f279fa597991 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -76,7 +76,7 @@ struct enetc_lso_t {
#define ENETC_LSO_MAX_DATA_LEN SZ_256K
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1edcfaee6819..3222359ac15b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1835,6 +1835,8 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.rx_packets++;
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index bceaf9b05cb4..4cc6dcbfd367 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -100,6 +100,8 @@
*/
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
+#define GVE_DQO_RX_HWTSTAMP_VALID 0x1
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index d17da841b5a0..f7786b03c744 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -236,7 +236,8 @@ struct gve_rx_compl_desc_dqo {
u8 status_error1;
- __le16 reserved5;
+ u8 reserved5;
+ u8 ts_sub_nsecs_low;
__le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
union {
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
index e96247c9d68d..a384a9ed4914 100644
--- a/drivers/net/ethernet/google/gve/gve_ptp.c
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -26,6 +26,19 @@ int gve_clock_nic_ts_read(struct gve_priv *priv)
return 0;
}
+static int gve_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int gve_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
{
const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
@@ -47,6 +60,8 @@ out:
static const struct ptp_clock_info gve_ptp_caps = {
.owner = THIS_MODULE,
.name = "gve clock",
+ .gettimex64 = gve_ptp_gettimex64,
+ .settime64 = gve_ptp_settime64,
.do_aux_work = gve_ptp_do_aux_work,
};
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 55393b784317..1aff3bbb8cfc 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -456,14 +456,20 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
* Note that this means if the time delta between packet reception and the last
* clock read is greater than ~2 seconds, this will provide invalid results.
*/
-static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts)
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *desc)
{
u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
struct sk_buff *skb = rx->ctx.skb_head;
- u32 low = (u32)last_read;
- s32 diff = hwts - low;
-
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+ u32 ts, low;
+ s32 diff;
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) {
+ ts = le32_to_cpu(desc->ts);
+ low = (u32)last_read;
+ diff = ts - low;
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+ }
}
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
@@ -944,7 +950,7 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
- gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts));
+ gve_rx_skb_hwtstamp(rx, desc);
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 65302c41bfb1..38875c196cb6 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -148,6 +148,7 @@ config HIBMCGE
tristate "Hisilicon BMC Gigabit Ethernet Device Support"
depends on PCI && PCI_MSI
select PHYLIB
+ select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
help
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index ea09a09c451b..2097e4c2b3d7 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -17,6 +17,7 @@
#define HBG_PCU_CACHE_LINE_SIZE 32
#define HBG_TX_TIMEOUT_BUF_LEN 1024
#define HBG_RX_DESCR 0x01
+#define HBG_NO_PHY 0xFF
#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 83cf75bf7a17..e11495b7ee98 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -136,12 +136,11 @@ static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ netif_device_detach(netdev);
return PCI_ERS_RESULT_DISCONNECT;
+ }
- pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -150,6 +149,9 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
+ netif_device_detach(netdev);
+ pci_disable_device(pdev);
+
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"failed to re-enable PCI device after reset\n");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index d0aa0661ecd4..d6e8ce8e351a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -244,6 +244,9 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+ if (priv->mac.phy_addr == HBG_NO_PHY)
+ return;
+
/* wait MAC link up */
ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
link_status,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 8af0bc4cca21..ae4cb35186d8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -32,6 +32,7 @@ static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
const struct hbg_irq_info *irq_info)
{
priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+ hbg_hw_irq_enable(priv, irq_info->mask, true);
}
#define HBG_IRQ_I(name, handle) \
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 37791de47f6f..b6f0a2780ea8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -20,7 +20,6 @@
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
-#define HBG_NO_PHY 0xFF
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 9d34d28ff168..782bb48c9f3d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9429,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 96553109f44c..cf881108fa57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)
{
struct hclge_phy_reg_cmd *req;
struct hclge_desc desc;
@@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
req->reg_addr = cpu_to_le16(reg_addr);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"failed to read phy reg, ret = %d.\n", ret);
+ return ret;
+ }
- return le16_to_cpu(req->reg_val);
+ *val = le16_to_cpu(req->reg_val);
+ return 0;
}
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index 4200d0b6d931..21d434c82475 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index a563a94e2780..122ee23497e6 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -146,7 +146,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select LIBIE_FWLOG
+ select LIBIE_FWLOG if DEBUG_FS
select MDIO
select NET_DEVLINK
select PLDMFW
@@ -298,7 +298,7 @@ config ICE
select DIMLIB
select LIBIE
select LIBIE_ADMINQ
- select LIBIE_FWLOG
+ select LIBIE_FWLOG if DEBUG_FS
select NET_DEVLINK
select PACKING
select PLDMFW
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2250426ec91b..2532b6f82e97 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4382,6 +4382,15 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
unsigned int lane;
int err;
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
if (!options)
return -ENOMEM;
@@ -6497,6 +6506,28 @@ u32 ice_get_link_speed(u16 index)
}
/**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+{
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+}
+
+/**
* ice_read_cgu_reg - Read a CGU register
* @hw: Pointer to the HW struct
* @addr: Register address to read
@@ -6510,8 +6541,8 @@ u32 ice_get_link_speed(u16 index)
int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_rd,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@ -6542,8 +6573,8 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_wr,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 363ae79a3620..013c93b6605e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1479,7 +1479,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 183dd5457d6a..21bb861febbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -50,6 +50,7 @@ enum ice_sbq_dev_id {
ice_sbq_dev_phy_0 = 0x02,
ice_sbq_dev_cgu = 0x06,
ice_sbq_dev_phy_0_peer = 0x0D,
+ ice_sbq_dev_cgu_peer = 0x0F,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 142823af1f9e..3e1052d070cf 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -863,6 +863,9 @@ static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
u64_stats_inc(&vport->tstamp_stats.flushed);
list_del(&ptp_tx_tstamp->list_member);
+ if (ptp_tx_tstamp->skb)
+ consume_skb(ptp_tx_tstamp->skb);
+
kfree(ptp_tx_tstamp);
}
u64_stats_update_end(&vport->tstamp_stats.stats_sync);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
index 8a2e0f8c5e36..61cedb6f2854 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -517,6 +517,7 @@ idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps);
consume_skb(ptp_tx_tstamp->skb);
+ ptp_tx_tstamp->skb = NULL;
list_add(&ptp_tx_tstamp->list_member,
&tx_tstamp_caps->latches_free);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f8a208c84f15..10e2445e0ded 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2281,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGB_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f3e7218ba6f3..bb783042d1af 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -810,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGC_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -2094,6 +2094,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,
netdev_info(adapter->netdev, "Offline testing starting");
set_bit(__IGC_TESTING, &adapter->state);
+ /* power up PHY for link test */
+ igc_power_up_phy_copper(&adapter->hw);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 14d275270123..dce4936708eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -821,9 +821,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
-#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
/* Bitmask indicating in use pools */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 90d4e57b1c93..3190ce7e44c7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11507,10 +11507,10 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
shutdown_aci:
mutex_destroy(&adapter->hw.aci.lock);
ixgbe_release_hw_control(adapter);
- devlink_free(adapter->devlink);
clean_up_probe:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
if (disable_dev)
pci_disable_device(pdev);
@@ -12101,7 +12101,6 @@ static void ixgbe_remove(struct pci_dev *pdev)
devl_port_unregister(&adapter->devlink_port);
devl_unlock(adapter->devlink);
- devlink_free(adapter->devlink);
ixgbe_stop_ipsec_offload(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -12137,6 +12136,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (disable_dev)
pci_disable_device(pdev);
+
+ devlink_free(adapter->devlink);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 4af149b63a39..0334ed4b8fa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -50,6 +50,9 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -86,6 +89,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
@@ -96,6 +105,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+/* features negotiated between PF/VF */
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBE_SUPPORTED_FEATURES IXGBEVF_PF_SUP_IPSEC
+
struct ixgbe_hw;
int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 114dd88fc71c..6885d2343c48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -641,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
* disabled
*/
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 32ac1e020d91..ee133d6749b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -510,6 +510,8 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
/* Version 1.1 supports jumbo frames on VFs if PF has
* jumbo frames enabled which means legacy VFs are
* disabled
@@ -1046,6 +1048,8 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
@@ -1072,6 +1076,8 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -1;
@@ -1112,6 +1118,8 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -1145,6 +1153,8 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
/* verify the PF is supporting the correct API */
switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -1174,6 +1184,8 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
fallthrough;
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -1244,6 +1256,8 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -1254,6 +1268,65 @@ static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
return 0;
}
+/**
+ * ixgbe_send_vf_link_status - send link status data to VF
+ * @adapter: pointer to adapter struct
+ * @msgbuf: pointer to message buffers
+ * @vf: VF identifier
+ *
+ * Reply for IXGBE_VF_GET_PF_LINK_STATE mbox command sending link status data.
+ *
+ * Return: 0 on success or -EOPNOTSUPP when operation is not supported.
+ */
+static int ixgbe_send_vf_link_status(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ /* Simply provide stored values as watchdog & link status events take
+ * care of its freshness.
+ */
+ msgbuf[1] = adapter->link_speed;
+ msgbuf[2] = adapter->link_up;
+
+ return 0;
+}
+
+/**
+ * ixgbe_negotiate_vf_features - negotiate supported features with VF driver
+ * @adapter: pointer to adapter struct
+ * @msgbuf: pointer to message buffers
+ * @vf: VF identifier
+ *
+ * Return: 0 on success or -EOPNOTSUPP when operation is not supported.
+ */
+static int ixgbe_negotiate_vf_features(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 features = msgbuf[1];
+
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ features &= IXGBE_SUPPORTED_FEATURES;
+ msgbuf[1] = features;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1328,6 +1401,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_IPSEC_DEL:
retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_PF_LINK_STATE:
+ retval = ixgbe_send_vf_link_status(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_FEATURES_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_features(adapter, msgbuf, vf);
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = -EIO;
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index a9bc96f6399d..e177d1d58696 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -28,6 +28,7 @@
/* Link speed */
typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index 65580b9cb06f..fce35924ff8b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -273,6 +273,9 @@ static int ixgbevf_ipsec_add_sa(struct net_device *dev,
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return -EOPNOTSUPP;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
@@ -405,6 +408,9 @@ static void ixgbevf_ipsec_del_sa(struct net_device *dev,
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
@@ -612,6 +618,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
size_t size;
switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_17:
+ if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
+ return;
+ break;
case ixgbe_mbox_api_14:
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 3a379e6a3a2a..039187607e98 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -363,6 +363,13 @@ struct ixgbevf_adapter {
struct ixgbe_hw hw;
u16 msg_enable;
+ u32 pf_features;
+#define IXGBEVF_PF_SUP_IPSEC BIT(0)
+#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
+
+#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \
+ IXGBEVF_PF_SUP_ESX_MBX)
+
struct ixgbevf_hw_stats stats;
unsigned long state;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 28e25641b167..d5ce20f47def 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2271,10 +2271,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
+/**
+ * ixgbevf_set_features - Set features supported by PF
+ * @adapter: pointer to the adapter struct
+ *
+ * Negotiate with PF supported features and then set pf_features accordingly.
+ */
+static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
+{
+ u32 *pf_features = &adapter->pf_features;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ err = hw->mac.ops.negotiate_features(hw, pf_features);
+ if (err && err != -EOPNOTSUPP)
+ netdev_dbg(adapter->netdev,
+ "PF feature negotiation failed.\n");
+
+ /* Address also pre API 1.7 cases */
+ if (hw->api_version == ixgbe_mbox_api_14)
+ *pf_features |= IXGBEVF_PF_SUP_IPSEC;
+ else if (hw->api_version == ixgbe_mbox_api_15)
+ *pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
+}
+
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_17,
+ ixgbe_mbox_api_16,
ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
@@ -2294,7 +2320,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- if (hw->api_version >= ixgbe_mbox_api_15) {
+ ixgbevf_set_features(adapter);
+
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
hw->mbx.ops.init_params(hw);
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
@@ -2651,6 +2679,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -4645,6 +4675,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 835bbcc5cc8e..a8ed23ee66aa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
+ ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
+ ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+/* mailbox API, version 1.6 VF requests */
+#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
+
+/* mailbox API, version 1.7 VF requests */
+#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index dcaef34b88b6..74d320879513 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -625,6 +631,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
}
/**
+ * ixgbevf_get_pf_link_state - Get PF's link status
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Ask PF to provide link_up state and speed of the link.
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 msgbuf[3] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* No need to set @link_up to false as it will be done by
+ * ixgbe_check_mac_link_vf().
+ */
+ } else {
+ *speed = msgbuf[1];
+ *link_up = msgbuf[2];
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
+ * @hw: pointer to the HW structure
+ * @pf_features: bitmask of features supported by PF
+ *
+ * Return: IXGBE_ERR_MBX in the case of mailbox error,
+ * -EOPNOTSUPP if the op is not supported or 0 on success.
+ */
+static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
+{
+ u32 msgbuf[2] = {};
+ int err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_17:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
+ msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
+ ARRAY_SIZE(msgbuf));
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ err = IXGBE_ERR_MBX;
+ *pf_features = 0x0;
+ } else {
+ *pf_features = msgbuf[1];
+ }
+
+ return err;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -659,6 +744,58 @@ mbx_err:
}
/**
+ * ixgbe_read_vflinks - Read VFLINKS register
+ * @hw: pointer to the HW structure
+ * @speed: link speed
+ * @link_up: indicate if link is up/down
+ *
+ * Get linkup status and link speed from the VFLINKS register.
+ */
+static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up)
+{
+ u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ /* if link status is down no point in checking to see if PF is up */
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (hw->mac.type == ixgbe_mac_82599_vf) {
+ for (int i = 0; i < 5; i++) {
+ udelay(100);
+ vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(vflinks & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ return;
+ }
+ }
+ }
+
+ /* We reached this point so there's link */
+ *link_up = true;
+
+ switch (vflinks & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+}
+
+/**
* ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
* @vlan: unused
@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
bool *link_up,
bool autoneg_wait_to_complete)
{
+ struct ixgbevf_adapter *adapter = hw->back;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = 0;
- u32 links_reg;
u32 in_msg = 0;
/* If we were hit with a reset drop the link */
@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!mac->get_link_status)
goto out;
- /* if link status is down no point in checking to see if pf is up */
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
-
- /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
- * before the link status is correct
- */
- if (mac->type == ixgbe_mac_82599_vf) {
- int i;
-
- for (i = 0; i < 5; i++) {
- udelay(100);
- links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
-
- if (!(links_reg & IXGBE_LINKS_UP))
- goto out;
- }
- }
-
- switch (links_reg & IXGBE_LINKS_SPEED_82599) {
- case IXGBE_LINKS_SPEED_10G_82599:
- *speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_1G_82599:
- *speed = IXGBE_LINK_SPEED_1GB_FULL;
- break;
- case IXGBE_LINKS_SPEED_100_82599:
- *speed = IXGBE_LINK_SPEED_100_FULL;
- break;
+ if (hw->mac.type == ixgbe_mac_e610_vf) {
+ ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
+ if (ret_val)
+ goto out;
+ } else {
+ ixgbe_read_vflinks(hw, speed, link_up);
+ if (*link_up == false)
+ goto out;
}
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1)) {
- if (hw->api_version >= ixgbe_mbox_api_15)
+ if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
mac->get_link_status = false;
goto out;
}
@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
+ case ixgbe_mbox_api_16:
+ case ixgbe_mbox_api_17:
break;
default:
return 0;
@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
+ .negotiate_features = ixgbevf_negotiate_features_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 2d791bc26ae4..4f19b8900c29 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
+ int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index d374a4454836..ec0e11c77cbf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1981,6 +1981,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
!is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n",
cgx->cgx_id);
+ err = -ENODEV;
goto err_release_regions;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index aff17c37ddde..902d6abaa3ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1516,10 +1516,8 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->xdp_cnt = numptrs;
pool->xdp = devm_kcalloc(pfvf->dev,
numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
- if (IS_ERR(pool->xdp)) {
- netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
- return PTR_ERR(pool->xdp);
- }
+ if (!pool->xdp)
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 3dbb113b792c..1ed1f88dd7f8 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -677,7 +677,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
void *buf;
int s;
- page = __dev_alloc_page(GFP_KERNEL);
+ page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -800,7 +800,7 @@ mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
struct page *page;
int s;
- page = __dev_alloc_page(GFP_KERNEL);
+ page = __dev_alloc_page(GFP_KERNEL | GFP_DMA32);
if (!page)
return -ENOMEM;
@@ -2426,6 +2426,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->version = hw->version;
dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
+ ret = dma_set_mask_and_coherent(hw->dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto out;
+
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index e9f319a9bdd6..60f7ab1d72e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -66,8 +66,8 @@ void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
tasklet_schedule(&ctx->task);
}
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
- struct mlx5_eqe *eqe)
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
+ struct mlx5_eqe *eqe)
{
unsigned long flags;
struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
@@ -95,7 +95,15 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
if (schedule_tasklet)
tasklet_schedule(&tasklet_ctx->task);
}
+EXPORT_SYMBOL(mlx5_add_cq_to_tasklet);
+static void mlx5_core_cq_dummy_cb(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
+{
+ mlx5_core_err(cq->eq->core.dev,
+ "CQ default completion callback, CQ #%u\n", cq->cqn);
+}
+
+#define MLX5_CQ_INIT_CMD_SN cpu_to_be32(2 << 28)
/* Callers must verify outbox status in case of err */
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen)
@@ -121,10 +129,19 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
cq->eq = eq;
cq->uid = MLX5_GET(create_cq_in, in, uid);
+
+ /* Kernel CQs must set the arm_db address prior to calling
+ * this function, allowing for the proper value to be
+ * initialized. User CQs are responsible for their own
+ * initialization since they do not use the arm_db field.
+ */
+ if (cq->arm_db)
+ *cq->arm_db = MLX5_CQ_INIT_CMD_SN;
+
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
- cq->comp = mlx5_add_cq_to_tasklet;
+ cq->comp = mlx5_core_cq_dummy_cb;
/* assuming CQ will be deleted before the EQ */
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index fceea83abbd7..887adf4807d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -541,7 +541,7 @@ static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
max_num_channels = mlx5e_get_max_num_channels(mdev);
if (val32 > max_num_channels) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Requested num_doorbells (%u) exceeds maximum number of channels (%u)",
+ "Requested num_doorbells (%u) exceeds max number of channels (%u)",
val32, max_num_channels);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 14e3207b14e7..a163f81f07c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -634,7 +634,10 @@ struct mlx5e_dma_info {
struct mlx5e_shampo_hd {
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
+ u32 hd_per_page;
u16 hd_per_wqe;
+ u8 log_hd_per_page;
+ u8 log_hd_entry_size;
unsigned long *bitmap;
u16 pi;
u16 ci;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3692298e10f2..c9bdee9a8b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -100,7 +100,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
return sizeof(struct mlx5_ksm) * 4;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
- return 0;
+ return 1;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 0a4fb8c92268..35d9530037a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -804,7 +804,8 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
goto err_xfrm;
}
- if (mlx5_eswitch_block_mode(priv->mdev))
+ err = mlx5_eswitch_block_mode(priv->mdev);
+ if (err)
goto unblock_ipsec;
if (x->props.mode == XFRM_MODE_TUNNEL &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 5d7c15abfcaf..f8eaaf37963b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -342,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -387,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
+
+static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index bf1d2769d4f1..feef86fff4bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2893,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
- if (!priv->ipsec)
- return; /* IPsec not supported */
+ if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2)
+ return; /* IPsec not supported or no peers */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
+
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5e_priv *peer_priv;
+
+ if (!priv->devcom)
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(priv->devcom))
+ goto out;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
+ if (peer_priv)
+ complete_all(&peer_priv->ipsec->comp);
+
+ mlx5_devcom_for_each_peer_end(priv->devcom);
+out:
+ mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index d7a11ff9bbdb..da2d1eb52c13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -320,7 +320,6 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
return;
}
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- if (resync_post_get_progress_params(sq, priv_rx))
+ if (resync_post_get_progress_params(sq, priv_rx)) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
mlx5e_ktls_priv_rx_put(priv_rx);
+ }
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct tls_offload_resync_async *async_resync;
+ struct tls_offload_context_rx *rx_ctx;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
dev = mlx5_core_dma_dev(sq->channel->mdev);
- if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+ rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk));
+ async_resync = rx_ctx->resync_async;
+ if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
+ }
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
DMA_FROM_DEVICE);
@@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
- tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ tls_offload_rx_resync_async_request_end(async_resync,
+ cpu_to_be32(hw_seq));
priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
@@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)
resync = &priv_rx->resync;
mlx5e_ktls_priv_rx_get(priv_rx);
- if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+ if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ return false;
+ }
return true;
}
@@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct tls_offload_resync_async *resync_async;
struct net_device *netdev = rq->netdev;
struct net *net = dev_net(netdev);
struct sock *sk = NULL;
@@ -527,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
seq = th->seq;
datalen = skb->len - depth;
- tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async;
+ tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);
rq->stats->tls_resync_req_start++;
unref:
@@ -556,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
resync_handle_seq_match(priv_rx, c);
}
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct mlx5e_ktls_rx_resync_buf *buf;
+
+ buf = wi->tls_get_params.buf;
+ priv_rx = buf->priv_rx;
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core);
+}
+
/* End of resync section */
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index f87b65c560ea..cb08799769ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi);
+
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index d166c0d5189e..9b93da4d52f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -595,32 +595,55 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
- __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
+ __u64 upper_limit_mbps;
+ __u64 upper_limit_gbps;
int i;
+ struct {
+ int scale;
+ const char *units_str;
+ } units[] = {
+ [MLX5_100_MBPS_UNIT] = {
+ .scale = 100,
+ .units_str = "Mbps",
+ },
+ [MLX5_GBPS_UNIT] = {
+ .scale = 1,
+ .units_str = "Gbps",
+ },
+ };
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
+ upper_limit_mbps = 255 * MLX5E_100MB;
+ upper_limit_gbps = 255 * MLX5E_1GB;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
if (!maxrate->tc_maxrate[i]) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
- if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
+ if (maxrate->tc_maxrate[i] <= upper_limit_mbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else {
+ } else if (max_bw_value[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
+ } else {
+ netdev_err(netdev,
+ "tc_%d maxrate %llu Kbps exceeds limit %llu\n",
+ i, maxrate->tc_maxrate[i],
+ upper_limit_gbps);
+ return -EINVAL;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
- __func__, i, max_bw_value[i]);
+ netdev_dbg(netdev, "%s: tc_%d <=> max_bw %u %s\n", __func__, i,
+ max_bw_value[i] * units[max_bw_unit[i]].scale,
+ units[max_bw_unit[i]].units_str);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 53e5ae252eac..893e1380a7c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2125,14 +2125,12 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (!size_read)
return i;
- if (size_read == -EINVAL)
- return -EINVAL;
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
"Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
- return i;
+ return size_read;
}
i += size_read;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a56825921c23..5e17eae81f4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -242,8 +242,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
&attr,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR(priv->devcom))
- return PTR_ERR(priv->devcom);
+ if (!priv->devcom)
+ return -EINVAL;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -256,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->devcom))
+ if (!priv->devcom)
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
@@ -266,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
}
mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -790,8 +791,9 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
int node)
{
void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u8 log_hd_per_page, log_hd_entry_size;
+ u16 hd_per_wq, hd_per_wqe;
u32 hd_pool_size;
- u16 hd_per_wq;
int wq_size;
int err;
@@ -814,11 +816,24 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
if (err)
goto err_umr_mkey;
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
- hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+ BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+ if (hd_per_wqe >= MLX5E_SHAMPO_WQ_HEADER_PER_PAGE) {
+ log_hd_per_page = MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
+ log_hd_entry_size = MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ } else {
+ log_hd_per_page = order_base_2(hd_per_wqe);
+ log_hd_entry_size = order_base_2(PAGE_SIZE / hd_per_wqe);
+ }
+
+ rq->mpwqe.shampo->hd_per_wqe = hd_per_wqe;
+ rq->mpwqe.shampo->hd_per_page = BIT(log_hd_per_page);
+ rq->mpwqe.shampo->log_hd_per_page = log_hd_per_page;
+ rq->mpwqe.shampo->log_hd_entry_size = log_hd_entry_size;
+
+ hd_pool_size = (hd_per_wqe * wq_size) >> log_hd_per_page;
if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
/* Separate page pool for shampo headers */
@@ -2204,7 +2219,6 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
- *mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
@@ -6120,6 +6134,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_ipsec_disable_events(priv);
mlx5e_disable_blocking_events(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 263d5628ee44..687cf123211d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -648,17 +648,20 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
+static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq,
+ int header_index)
{
- BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE];
+ return &shampo->pages[header_index >> shampo->log_hd_per_page];
}
-static u64 mlx5e_shampo_hd_offset(int header_index)
+static u64 mlx5e_shampo_hd_offset(struct mlx5e_rq *rq, int header_index)
{
- return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u32 hd_per_page = shampo->hd_per_page;
+
+ return (header_index & (hd_per_page - 1)) << shampo->log_hd_entry_size;
}
static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
@@ -671,7 +674,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct mlx5e_umr_wqe *umr_wqe;
- int headroom, i = 0;
+ int headroom, i;
headroom = rq->buff.headroom;
wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
@@ -679,25 +682,24 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
- WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
- while (i < ksm_entries) {
- struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ for (i = 0; i < ksm_entries; i++, index++) {
+ struct mlx5e_frag_page *frag_page;
u64 addr;
- err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
- if (unlikely(err))
- goto err_unmap;
+ frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
+ if (!header_offset) {
+ err = mlx5e_page_alloc_fragmented(rq->hd_page_pool,
+ frag_page);
+ if (err)
+ goto err_unmap;
+ }
addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
-
- for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
- header_offset = mlx5e_shampo_hd_offset(index++);
-
- umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) {
- .key = cpu_to_be32(lkey),
- .va = cpu_to_be64(addr + header_offset + headroom),
- };
- }
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(addr + header_offset + headroom),
+ };
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
@@ -713,9 +715,9 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
return 0;
err_unmap:
- while (--i) {
+ while (--i >= 0) {
--index;
- header_offset = mlx5e_shampo_hd_offset(index);
+ header_offset = mlx5e_shampo_hd_offset(rq, index);
if (!header_offset) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
@@ -735,12 +737,11 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_ksm_entries, len;
- max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev),
- MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
+ max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
+ ksm_entries = ALIGN_DOWN(ksm_entries, shampo->hd_per_page);
if (!ksm_entries)
return 0;
@@ -858,7 +859,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ if (((header_index + 1) & (shampo->hd_per_page - 1)) == 0) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
@@ -1036,6 +1037,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+#ifdef CONFIG_MLX5_EN_TLS
+ if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
+ mlx5e_ktls_rx_resync_async_request_cancel(wi);
+#endif
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
@@ -1221,9 +1226,10 @@ static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ void *addr = netmem_address(frag_page->netmem);
- return netmem_address(frag_page->netmem) + head_offset;
+ return addr + head_offset + rq->buff.headroom;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -1794,14 +1800,27 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- struct mlx5e_wqe_frag_info *pwi;
+ if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
- for (pwi = head_wi; pwi < wi; pwi++)
- pwi->frag_page->frags++;
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
+ rq->flags)) {
+ struct mlx5e_wqe_frag_info *pwi;
+
+ wi -= old_nr_frags - sinfo->nr_frags;
+
+ for (pwi = head_wi; pwi < wi; pwi++)
+ pwi->frag_page->frags++;
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ wi -= nr_frags_free;
+ truesize -= nr_frags_free * frag_info->frag_stride;
}
- return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(
@@ -2027,6 +2046,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
unsigned int truesize = 0;
+ u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2080,7 +2100,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
@@ -2096,10 +2117,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+ u32 len;
+
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
+ frag_page -= old_nr_frags - sinfo->nr_frags;
+
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
@@ -2110,9 +2136,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ frag_page -= nr_frags_free;
+ truesize -= (nr_frags_free - 1) * PAGE_SIZE +
+ ALIGN(pg_consumed_bytes,
+ BIT(rq->mpwqe.log_stride_sz));
+ }
+
+ len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+
skb = mlx5e_build_linear_skb(
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
- mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
@@ -2137,8 +2173,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
do
pagep->frags++;
while (++pagep < frag_page);
+
+ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
+ skb->data_len);
+ __pskb_pull_tail(skb, headlen);
}
- __pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
@@ -2230,7 +2269,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- u16 head_offset = mlx5e_shampo_hd_offset(header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(rq, header_index);
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
@@ -2246,7 +2286,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
- if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ if (likely(frag_size <= BIT(shampo->log_hd_entry_size))) {
/* build SKB around header */
dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
@@ -2319,7 +2359,10 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ if (PAGE_SIZE >= GRO_LEGACY_MAX_SIZE)
+ return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
+ else
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7c029a7d0fd7..a2802cfc9b98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1614,7 +1614,9 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, mode, fec_stats);
- fec_set_histograms_stats(priv, mode, hist);
+
+ if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b7227afcb51d..2702b3885f06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -256,7 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 mode;
#ifdef CONFIG_MLX5_EN_TLS
- if (accel && accel->tls.tls_tisn)
+ if (accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
@@ -982,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5_wqe_datagram_seg *datagram;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
@@ -992,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
u16 pi;
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
@@ -1009,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg);
eseg->mss = attr.mss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 76382626ad41..929adeb50a98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
- atomic64_set(&esw->user_count, 0);
}
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4cf995be127d..44a142a041b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1978,7 +1978,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
- atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@ -3129,7 +3128,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
attr,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3140,7 +3139,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index cb1319974f83..ccef64fb40b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -421,6 +421,13 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
__be64 *pas;
u32 i;
+ conn->cq.mcq.cqe_sz = 64;
+ conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
+ conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
+ *conn->cq.mcq.set_ci_db = 0;
+ conn->cq.mcq.vector = 0;
+ conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
+
cq_size = roundup_pow_of_two(cq_size);
MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
@@ -468,15 +475,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
if (err)
goto err_cqwq;
- conn->cq.mcq.cqe_sz = 64;
- conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db;
- conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1;
- *conn->cq.mcq.set_ci_db = 0;
- *conn->cq.mcq.arm_db = 0;
- conn->cq.mcq.vector = 0;
- conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
-
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 59c00c911275..3db0387bf6dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1430,11 +1430,10 @@ static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc,
MLX5_DEVCOM_HCA_PORTS,
&attr, NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp)) {
+ if (!dev->priv.hca_devcom_comp) {
mlx5_core_err(dev,
- "Failed to register devcom HCA component, err: %ld\n",
- PTR_ERR(dev->priv.hca_devcom_comp));
- return PTR_ERR(dev->priv.hca_devcom_comp);
+ "Failed to register devcom HCA component.");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d0ba83d77cd1..29e7fa09c32c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -1444,7 +1444,7 @@ static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
compd = mlx5_devcom_register_component(mdev->priv.devc,
MLX5_DEVCOM_SHARED_CLOCK,
&attr, NULL, mdev);
- if (IS_ERR(compd))
+ if (!compd)
return;
mdev->clock_state->compdev = compd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index faa2833602c8..e749618229bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -76,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_dev *devc = NULL;
mutex_lock(&dev_list_lock);
if (devcom_dev_exists(dev)) {
- devc = ERR_PTR(-EEXIST);
+ mlx5_core_err(dev, "devcom device already exists");
goto out;
}
devc = mlx5_devcom_dev_alloc(dev);
- if (!devc) {
- devc = ERR_PTR(-ENOMEM);
+ if (!devc)
goto out;
- }
list_add_tail(&devc->list, &devcom_dev_list);
out:
@@ -110,8 +108,10 @@ mlx5_devcom_dev_release(struct kref *ref)
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
{
- if (!IS_ERR_OR_NULL(devc))
- kref_put(&devc->ref, mlx5_devcom_dev_release);
+ if (!devc)
+ return;
+
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
}
static struct mlx5_devcom_comp *
@@ -122,7 +122,7 @@ mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
comp->id = id;
comp->key.key = attr->key;
@@ -160,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
- return ERR_PTR(-ENOMEM);
+ return NULL;
kref_get(&devc->ref);
devcom->devc = devc;
@@ -240,31 +240,28 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
mlx5_devcom_event_handler_t handler,
void *data)
{
- struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devc))
- return ERR_PTR(-EINVAL);
+ if (!devc)
+ return NULL;
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_PTR(-EINVAL);
+ if (IS_ERR(comp))
goto out_unlock;
- }
if (!comp) {
comp = mlx5_devcom_comp_alloc(id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_CAST(comp);
+ if (!comp)
goto out_unlock;
- }
+
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
- if (IS_ERR(devcom))
+ if (!devcom)
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
@@ -276,8 +273,10 @@ out_unlock:
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
- if (!IS_ERR_OR_NULL(devcom))
- devcom_free_comp_dev(devcom);
+ if (!devcom)
+ return;
+
+ devcom_free_comp_dev(devcom);
}
int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
@@ -296,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int err = 0;
void *data;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return -ENODEV;
comp = devcom->comp;
@@ -338,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
return READ_ONCE(devcom->comp->ready);
@@ -348,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
comp = devcom->comp;
@@ -421,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index f5c2701f6e87..8e17daae48af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -221,8 +221,8 @@ static int sd_register(struct mlx5_core_dev *dev)
attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
&attr, NULL, dev);
- if (IS_ERR(devcom))
- return PTR_ERR(devcom);
+ if (!devcom)
+ return -EINVAL;
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index df93625c9dfa..70c156591b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -978,9 +978,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %pe\n",
- dev->priv.devc);
+ if (!dev->priv.devc)
+ mlx5_core_warn(dev, "failed to register devcom device\n");
err = mlx5_query_board_id(dev);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 24ef7d66fa8a..7510c46e58a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -873,12 +873,6 @@ err_free_sqc:
return err;
}
-static void hws_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
int numa_node,
struct mlx5hws_send_engine *queue,
@@ -901,7 +895,6 @@ static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
- mcq->comp = hws_cq_complete;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 077a77fde670..d034372fa047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1049,12 +1049,6 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
return 0;
}
-static void dr_cq_complete(struct mlx5_core_cq *mcq,
- struct mlx5_eqe *eqe)
-{
- pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
struct mlx5_uars_page *uar,
size_t ncqe)
@@ -1089,6 +1083,13 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
}
+ cq->mcq.cqe_sz = 64;
+ cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+ cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ cq->mcq.vector = 0;
+ cq->mdev = mdev;
+
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -1112,27 +1113,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
- cq->mcq.comp = dr_cq_complete;
-
err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
goto err_cqwq;
- cq->mcq.cqe_sz = 64;
- cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
- cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
- *cq->mcq.set_ci_db = 0;
-
- /* set no-zero value, in order to avoid the HW to run db-recovery on
- * CQ that used in polling mode.
- */
- *cq->mcq.arm_db = cpu_to_be32(2 << 28);
-
- cq->mcq.vector = 0;
- cq->mdev = mdev;
-
return cq;
err_cqwq:
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 2474dfd330f4..fe4e61405284 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -294,7 +294,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
{
int i, j;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
for (i = 0; i < lan966x->num_phys_ports; i++) {
uint idx = i * lan966x->num_stats;
@@ -310,7 +310,7 @@ static void lan966x_stats_update(struct lan966x *lan966x)
}
}
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static int lan966x_get_sset_count(struct net_device *dev, int sset)
@@ -365,7 +365,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
mac_stats->FramesTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_UC] +
@@ -416,7 +416,7 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_RX_LONG] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
@@ -442,7 +442,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
rmon_stats->undersize_pkts =
lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
@@ -500,7 +500,7 @@ static void lan966x_get_eth_rmon_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
*ranges = lan966x_rmon_ranges;
}
@@ -603,7 +603,7 @@ void lan966x_stats_get(struct net_device *dev,
idx = port->chip_port * lan966x->num_stats;
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
@@ -685,7 +685,7 @@ void lan966x_stats_get(struct net_device *dev,
stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
int lan966x_stats_init(struct lan966x *lan966x)
@@ -701,7 +701,7 @@ int lan966x_stats_init(struct lan966x *lan966x)
return -ENOMEM;
/* Init stats worker */
- mutex_init(&lan966x->stats_lock);
+ spin_lock_init(&lan966x->stats_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(lan966x->dev));
lan966x->stats_queue = create_singlethread_workqueue(queue_name);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 7001584f1b7a..47752d3fde0b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -1261,7 +1261,6 @@ cleanup_ports:
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
debugfs_remove_recursive(lan966x->debugfs_root);
@@ -1279,7 +1278,6 @@ static void lan966x_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&lan966x->stats_work);
destroy_workqueue(lan966x->stats_queue);
- mutex_destroy(&lan966x->stats_lock);
lan966x_mac_purge_entries(lan966x);
lan966x_mdb_deinit(lan966x);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 4f75f0688369..eea286c29474 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -295,8 +295,8 @@ struct lan966x {
const struct lan966x_stat_layout *stats_layout;
u32 num_stats;
- /* workqueue for reading stats */
- struct mutex stats_lock;
+ /* lock for reading stats */
+ spinlock_t stats_lock;
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a1471e38d118..2a37fc1ba4bc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -403,11 +403,11 @@ static void lan966x_es0_read_esdx_counter(struct lan966x *lan966x,
u32 counter;
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
counter = lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS)) +
lan_rd(lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
if (counter)
admin->cache.counter = counter;
}
@@ -417,14 +417,14 @@ static void lan966x_es0_write_esdx_counter(struct lan966x *lan966x,
{
id = id & 0xff; /* counter limit */
- mutex_lock(&lan966x->stats_lock);
+ spin_lock(&lan966x->stats_lock);
lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(id), lan966x, SYS_STAT_CFG);
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_GRN_BYTES));
lan_wr(admin->cache.counter, lan966x,
SYS_CNT(LAN966X_STAT_ESDX_GRN_PKTS));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_BYTES));
lan_wr(0, lan966x, SYS_CNT(LAN966X_STAT_ESDX_YEL_PKTS));
- mutex_unlock(&lan966x->stats_lock);
+ spin_unlock(&lan966x->stats_lock);
}
static void lan966x_vcap_cache_write(struct net_device *dev,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 132626a3f9f7..9ef72f294117 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2557,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
err = nfp_ccm_mbox_alloc(nn);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
return nn;
+err_free_xsk_pools:
+ kfree(nn->dp.xsk_pools);
err_free_nn:
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index d10b58ebf603..301ebee2fdc5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -29,6 +29,10 @@ static void ionic_tx_clean(struct ionic_queue *q,
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
+ /* Ensure TX descriptor writes reach memory before NIC reads them.
+ * Prevents device from fetching stale descriptors.
+ */
+ dma_wmb();
ionic_q_post(q, ring_dbell);
}
@@ -1444,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
bool encap;
int err;
- desc_info = &q->tx_info[q->head_idx];
-
- if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
- return -EIO;
-
- len = skb->len;
- mss = skb_shinfo(skb)->gso_size;
- outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPXIP4 |
- SKB_GSO_IPXIP6 |
- SKB_GSO_UDP_TUNNEL |
- SKB_GSO_UDP_TUNNEL_CSUM));
has_vlan = !!skb_vlan_tag_present(skb);
vlan_tci = skb_vlan_tag_get(skb);
encap = skb->encapsulation;
@@ -1470,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (unlikely(err)) {
- /* clean up mapping from ionic_tx_map_skb */
- ionic_tx_desc_unmap_bufs(q, desc_info);
+ if (unlikely(err))
return err;
- }
+ desc_info = &q->tx_info[q->head_idx];
+ if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+ return -EIO;
+
+ len = skb->len;
+ mss = skb_shinfo(skb)->gso_size;
+ outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM));
if (encap)
hdrlen = skb_inner_tcp_all_headers(skb);
else
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 8903ae90afcb..d18734fe12e4 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4994,8 +4994,9 @@ static int rtl8169_resume(struct device *device)
if (!device_may_wakeup(tp_to_dev(tp)))
clk_prepare_enable(tp->clk);
- /* Reportedly at least Asus X453MA truncates packets otherwise */
- if (tp->mac_version == RTL_GIGA_MAC_VER_37)
+ /* Some chip versions may truncate packets without this initialization */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_46)
rtl_init_rxcfg(tp);
return rtl8169_runtime_resume(device);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9d3bd65b85ff..e2d7ce1a85e8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2211,15 +2211,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
+
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
+ /* When using multi-descriptors, DT_FEND needs to get written
+ * before DT_FSTART, but the compiler may reorder the memory
+ * writes in an attempt to optimize the code.
+ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
+ * are written exactly in the order shown in the code.
+ * This is particularly important for cases where the DMA engine
+ * is already running when we are running this code. If the DMA
+ * sees DT_FSTART without the corresponding DT_FEND it will enter
+ * an error condition.
+ */
+ dma_wmb();
desc->die_dt = DT_FSTART;
} else {
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
desc->die_dt = DT_FSINGLE;
}
+
+ /* Before ringing the doorbell we need to make sure that the latest
+ * writes have been committed to memory, otherwise it could delay
+ * things until the doorbell is rang again.
+ * This is in replacement of the read operation mentioned in the HW
+ * manuals.
+ */
+ dma_wmb();
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 6fd0c1e9a7d5..7cfd9000f79d 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -1090,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)
kfree(mport);
}
+/*
+ * Takes ownership of @desc, even if it returns an error
+ */
static int efx_mae_process_mport(struct efx_nic *efx,
struct mae_mport_desc *desc)
{
@@ -1100,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,
if (!IS_ERR_OR_NULL(mport)) {
netif_err(efx, drv, efx->net_dev,
"mport with id %u does exist!!!\n", desc->mport_id);
+ kfree(desc);
return -EEXIST;
}
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
index e1c5faff3b71..220eb5ce7583 100644
--- a/drivers/net/ethernet/spacemit/k1_emac.c
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -1441,6 +1441,9 @@ static int emac_set_pauseparam(struct net_device *dev,
struct emac_priv *priv = netdev_priv(dev);
u8 fc = 0;
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
priv->flow_control_autoneg = pause->autoneg;
if (pause->autoneg) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 51ea0caf16c1..0786816e05f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1446,14 +1446,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
}
} else {
if (bsp_priv->clk_enabled) {
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+ }
+
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
-
bsp_priv->clk_enabled = false;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 650d75b73e0b..7b90ecd3a55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4089,18 +4089,11 @@ static int stmmac_release(struct net_device *dev)
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (!skb_vlan_tag_present(skb))
- return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@ -4109,7 +4102,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@ -4507,6 +4500,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
@@ -4524,10 +4518,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (priv->est && priv->est->enable &&
- priv->est->max_sdu[queue] &&
- skb->len > priv->est->max_sdu[queue]){
- priv->xstats.max_sdu_txq_drop[queue]++;
- goto max_sdu_err;
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -7573,11 +7572,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 97e89a604abd..3b4d4696afe9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- if (qopt->num_entries >= dep)
+ if (qopt->num_entries > dep)
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
@@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
s64 delta_ns = qopt->entries[i].interval;
u32 gates = qopt->entries[i].gate_mask;
- if (delta_ns > GENMASK(wid, 0))
+ if (delta_ns > GENMASK(wid - 1, 0))
return -ERANGE;
if (gates > GENMASK(31 - wid, 0))
return -ERANGE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
index 0b6f6228ae35..ff02a79c00d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -212,7 +212,7 @@ static void vlan_enable(struct mac_device_info *hw, u32 type)
value = readl(ioaddr + VLAN_INCL);
value |= VLAN_VLTI;
- value |= VLAN_CSVL; /* Only use SVLAN */
+ value &= ~VLAN_CSVL; /* Only use CVLAN */
value &= ~VLAN_VLC;
value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
writel(value, ioaddr + VLAN_INCL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index fa96db7c1a13..66e8b224827b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -276,9 +276,31 @@ static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
/* The number of wireside clocks contained in the verify
* timeout counter. The default is 0x1312d0
* (10ms at 125Mhz in 1G mode).
+ * The frequency of the clock depends on the link speed
+ * and the PHY interface.
*/
- val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
+ switch (port->slave.phy_if) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (port->qos.link_speed == SPEED_1000)
+ val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/
+ else if (port->qos.link_speed == SPEED_100)
+ val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/
+ else
+ val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/
+ break;
+
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ val = 125 * HZ_PER_MHZ; /* 125 MHz */
+ break;
+ default:
+ netdev_err(port->ndev, "selected mode does not supported IET\n");
+ return -EOPNOTSUPP;
+ }
val /= MILLIHZ_PER_HZ; /* count per ms timeout */
val *= verify_time_ms; /* count for timeout ms */
@@ -295,20 +317,21 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
u32 ctrl, status;
int try;
- try = 20;
- do {
- /* Reset the verify state machine by writing 1
- * to LINKFAIL
- */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ try = 3;
- /* Clear MAC_LINKFAIL bit to start Verify. */
- ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
- ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
- writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Reset the verify state machine by writing 1
+ * to LINKFAIL
+ */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ /* Clear MAC_LINKFAIL bit to start Verify. */
+ ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+ ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
+ writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
+
+ do {
msleep(port->qos.iet.verify_time_ms);
status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
@@ -330,7 +353,7 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
netdev_dbg(port->ndev, "MAC Merge verify error\n");
return -ENODEV;
}
- } while (try-- > 0);
+ } while (--try > 0);
netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 59d6ab989c55..8ffbfaa3ab18 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -163,7 +163,9 @@ struct am65_cpts {
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
- struct list_head events;
+ /* separate lists to handle TX and RX timestamp independently */
+ struct list_head events_tx;
+ struct list_head events_rx;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
@@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts)
am65_cpts_write32(cpts, 0, int_enable);
}
+static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
+ struct list_head *events)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+ return removed;
+}
+
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
@@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event)
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
-static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+static int am65_cpts_purge_events(struct am65_cpts *cpts)
{
- struct list_head *this, *next;
- struct am65_cpts_event *event;
int removed = 0;
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct am65_cpts_event, list);
- if (time_after(jiffies, event->tmo)) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- ++removed;
- }
- }
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
@@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
struct am65_cpts_event, list);
if (!event) {
- if (am65_cpts_cpts_purge_events(cpts)) {
+ if (am65_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
@@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_move_tail(&event->list, &cpts->events_rx);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ break;
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_move_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events_tx);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
return found;
}
-static void am65_cpts_find_ts(struct am65_cpts *cpts)
+static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
@@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_init(&cpts->events, &events);
+ list_splice_init(&cpts->events_tx, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
@@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
}
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events, &cpts->events_tx);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
@@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
unsigned long flags;
long delay = -1;
- am65_cpts_find_ts(cpts);
+ am65_cpts_find_tx_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
@@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
spin_lock_irqsave(&cpts->lock, flags);
__am65_cpts_fifo_read(cpts);
- list_for_each_safe(this, next, &cpts->events) {
+ list_for_each_safe(this, next, &cpts->events_rx) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_move(&event->list, &cpts->pool);
@@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
- INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->events_tx);
+ INIT_LIST_HEAD(&cpts->events_rx);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index da53eb04b0a4..3f8237c17d09 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -66,6 +66,9 @@
#define FDB_GEN_CFG1 0x60
#define SMEM_VLAN_OFFSET 8
#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
+#define FDB_HASH_SIZE_MASK GENMASK(6, 3)
+#define FDB_HASH_SIZE_SHIFT 3
+#define FDB_HASH_SIZE 3
#define FDB_GEN_CFG2 0x64
#define FDB_VLAN_EN BIT(6)
@@ -463,6 +466,8 @@ void icssg_init_emac_mode(struct prueth *prueth)
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
@@ -484,6 +489,8 @@ void icssg_init_fw_offload_mode(struct prueth *prueth)
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
+ FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
/* Set enable VLAN aware mode, and FDBs for all PRUs */
regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 857820657bac..5ee13db568f0 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_channel = knav_dma_open_channel(dev,
tx_pipe->dma_chan_name, &config);
- if (IS_ERR(tx_pipe->dma_channel)) {
+ if (!tx_pipe->dma_channel) {
dev_err(dev, "failed opening tx chan(%s)\n",
tx_pipe->dma_chan_name);
- ret = PTR_ERR(tx_pipe->dma_channel);
+ ret = -EINVAL;
goto err;
}
@@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
return 0;
err:
- if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+ if (tx_pipe->dma_channel)
knav_dma_close_channel(tx_pipe->dma_channel);
tx_pipe->dma_channel = NULL;
return ret;
@@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
netcp->dma_chan_name, &config);
- if (IS_ERR(netcp->rx_channel)) {
+ if (!netcp->rx_channel) {
dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
netcp->dma_chan_name);
- ret = PTR_ERR(netcp->rx_channel);
+ ret = -EINVAL;
goto fail;
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1e2713f0c921..b37d6cfbfbe9 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -2427,7 +2427,8 @@ int wx_sw_init(struct wx *wx)
wx->oem_svid = pdev->subsystem_vendor;
wx->oem_ssid = pdev->subsystem_device;
wx->bus.device = PCI_SLOT(pdev->devfn);
- wx->bus.func = PCI_FUNC(pdev->devfn);
+ wx->bus.func = FIELD_GET(WX_CFG_PORT_ST_LANID,
+ rd32(wx, WX_CFG_PORT_ST));
if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN ||
pdev->is_virtfn) {
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index d89b9b8a0a2c..2f8319e03182 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -97,6 +97,8 @@
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
#define WX_CFG_PORT_CTL_QINQ BIT(2)
#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
+#define WX_CFG_PORT_ST 0x14404
+#define WX_CFG_PORT_ST_LANID GENMASK(9, 8)
#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */
@@ -557,8 +559,6 @@ enum WX_MSCA_CMD_value {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-#define WX_CFG_PORT_ST 0x14404
-
/******************* Receive Descriptor bit definitions **********************/
#define WX_RXD_STAT_DD BIT(0) /* Done */
#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index 36ccc53b1797..ef860cfc629f 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -96,11 +96,13 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
skb->data, skb->len,
mctp_usb_out_complete, skb);
+ /* Stops TX queue first to prevent race condition with URB complete */
+ netif_stop_queue(dev);
rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc)
+ if (rc) {
+ netif_wake_queue(dev);
goto err_drop;
- else
- netif_stop_queue(dev);
+ }
return NETDEV_TX_OK;
diff --git a/drivers/net/mdio/mdio-airoha.c b/drivers/net/mdio/mdio-airoha.c
index 1dc9939c8d7d..52e7475121ea 100644
--- a/drivers/net/mdio/mdio-airoha.c
+++ b/drivers/net/mdio/mdio-airoha.c
@@ -219,6 +219,8 @@ static int airoha_mdio_probe(struct platform_device *pdev)
priv = bus->priv;
priv->base_addr = addr;
priv->regmap = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
priv->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(priv->clk))
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 194570443493..bb6e03a92956 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -886,8 +886,11 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)
static void update_userdata(struct netconsole_target *nt)
{
- int complete_idx = 0, child_count = 0;
struct list_head *entry;
+ int child_count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_list_lock, flags);
/* Clear the current string in case the last userdatum was deleted */
nt->userdata_length = 0;
@@ -897,8 +900,11 @@ static void update_userdata(struct netconsole_target *nt)
struct userdatum *udm_item;
struct config_item *item;
- if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS))
- break;
+ if (child_count >= MAX_EXTRADATA_ITEMS) {
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
child_count++;
item = container_of(entry, struct config_item, ci_entry);
@@ -912,12 +918,11 @@ static void update_userdata(struct netconsole_target *nt)
* one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
* checked to not exceed MAX items with child_count above
*/
- complete_idx += scnprintf(&nt->extradata_complete[complete_idx],
- MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
- item->ci_name, udm_item->value);
+ nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length],
+ MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
+ item->ci_name, udm_item->value);
}
- nt->userdata_length = strnlen(nt->extradata_complete,
- sizeof(nt->extradata_complete));
+ spin_unlock_irqrestore(&target_list_lock, flags);
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
@@ -931,6 +936,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
if (count > MAX_EXTRADATA_VALUE_LEN)
return -EMSGSIZE;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
ret = strscpy(udm->value, buf, sizeof(udm->value));
@@ -944,6 +950,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -969,6 +976,7 @@ static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_MSGID);
if (msgid_enabled == curr)
@@ -989,6 +997,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1003,6 +1012,7 @@ static ssize_t sysdata_release_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
if (release_enabled == curr)
@@ -1023,6 +1033,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1037,6 +1048,7 @@ static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
if (taskname_enabled == curr)
@@ -1057,6 +1069,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
@@ -1072,6 +1085,7 @@ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
if (ret)
return ret;
+ mutex_lock(&netconsole_subsys.su_mutex);
mutex_lock(&dynamic_netconsole_mutex);
curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
if (cpu_nr_enabled == curr)
@@ -1100,6 +1114,7 @@ unlock_ok:
ret = strnlen(buf, count);
unlock:
mutex_unlock(&dynamic_netconsole_mutex);
+ mutex_unlock(&netconsole_subsys.su_mutex);
return ret;
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index ebc3833e95b4..fa1d97885caa 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -545,6 +545,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
static int nsim_open(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
int err;
netdev_assert_locked(dev);
@@ -555,6 +556,12 @@ static int nsim_open(struct net_device *dev)
nsim_enable_napi(ns);
+ peer = rtnl_dereference(ns->peer);
+ if (peer && netif_running(peer->netdev)) {
+ netif_carrier_on(dev);
+ netif_carrier_on(peer->netdev);
+ }
+
return 0;
}
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 289f62c5d2c7..0d7f30360d87 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -560,16 +560,34 @@ static void ovpn_tcp_close(struct sock *sk, long timeout)
static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
+ struct sk_buff_head *queue = &sock->sk->sk_receive_queue;
struct ovpn_socket *ovpn_sock;
+ struct ovpn_peer *peer = NULL;
+ __poll_t mask;
rcu_read_lock();
ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
- if (ovpn_sock && ovpn_sock->peer &&
- !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
+ /* if we landed in this callback, we expect to have a
+ * meaningful state. The ovpn_socket lifecycle would
+ * prevent it otherwise.
+ */
+ if (WARN(!ovpn_sock || !ovpn_sock->peer,
+ "ovpn: null state in ovpn_tcp_poll!")) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (ovpn_peer_hold(ovpn_sock->peer)) {
+ peer = ovpn_sock->peer;
+ queue = &peer->tcp.user_queue;
+ }
rcu_read_unlock();
+ mask = datagram_poll_queue(file, sock, wait, queue);
+
+ if (peer)
+ ovpn_peer_put(peer);
+
return mask;
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 3459a0e9d8b9..cb306f9e80cc 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -405,7 +405,7 @@ static int bcm5481x_set_brrmode(struct phy_device *phydev, bool on)
static int bcm54811_config_init(struct phy_device *phydev)
{
struct bcm54xx_phy_priv *priv = phydev->priv;
- int err, reg, exp_sync_ethernet;
+ int err, reg, exp_sync_ethernet, aux_rgmii_en;
/* Enable CLK125 MUX on LED4 if ref clock is enabled. */
if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
@@ -434,6 +434,24 @@ static int bcm54811_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ /* Enable RGMII if configured */
+ if (phy_interface_is_rgmii(phydev))
+ aux_rgmii_en = MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN |
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ else
+ aux_rgmii_en = 0;
+
+ /* Also writing Reserved bits 6:5 because the documentation requires
+ * them to be written to 0b11
+ */
+ err = bcm54xx_auxctl_write(phydev,
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+ MII_BCM54XX_AUXCTL_MISC_WREN |
+ aux_rgmii_en |
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RSVD);
+ if (err < 0)
+ return err;
+
return bcm5481x_set_brrmode(phydev, priv->brr_mode);
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index deeefb962566..36a0c1b7f59c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -738,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
+ /* Although the DP83867 reports EEE capability through the
+ * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature
+ * is not actually implemented in hardware.
+ */
+ phy_disable_eee(phydev);
+
if (phy_interface_is_rgmii(phydev) ||
phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index a2cd1cc35cde..1f381d7b13ff 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -84,7 +84,7 @@
#define DP83869_CLK_DELAY_DEF 7
/* STRAP_STS1 bits */
-#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0)
+#define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9)
#define DP83869_STRAP_STS1_RESERVED BIT(11)
#define DP83869_STRAP_MIRROR_ENABLED BIT(12)
@@ -528,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
if (val < 0)
return val;
- dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK;
+ dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val);
return 0;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index cad6ed3aa10b..4354241137d5 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -73,8 +73,11 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
return err;
err = mdiobus_register_reset(mdiodev);
- if (err)
+ if (err) {
+ gpiod_put(mdiodev->reset_gpio);
+ mdiodev->reset_gpio = NULL;
return err;
+ }
/* Assert the reset signal */
mdio_device_reset(mdiodev, 1);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 79ce3eb6752b..01c87c9b7702 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -466,6 +466,12 @@ struct lan8842_priv {
u16 rev;
};
+struct lanphy_reg_data {
+ int page;
+ u16 addr;
+ u16 val;
+};
+
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
.cable_diag_reg = LAN8814_CABLE_DIAG,
@@ -2836,6 +2842,13 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
#define LAN8814_PAGE_PCS_DIGITAL 2
/**
+ * LAN8814_PAGE_EEE - Selects Extended Page 3.
+ *
+ * This page contains EEE registers
+ */
+#define LAN8814_PAGE_EEE 3
+
+/**
* LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
*
* This page contains device-common registers that affect the entire chip.
@@ -2854,6 +2867,13 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
#define LAN8814_PAGE_PORT_REGS 5
/**
+ * LAN8814_PAGE_POWER_REGS - Selects Extended Page 28.
+ *
+ * This page contains analog control registers and power mode registers.
+ */
+#define LAN8814_PAGE_POWER_REGS 28
+
+/**
* LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
*
* This page appears to hold fundamental system or global controls. In the
@@ -4262,6 +4282,8 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
{
struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
+ shared->phydev = phydev;
+
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -4317,8 +4339,6 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
phydev_dbg(phydev, "successfully registered ptp clock\n");
- shared->phydev = phydev;
-
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
@@ -4360,12 +4380,6 @@ static int lan8814_config_init(struct phy_device *phydev)
{
struct kszphy_priv *lan8814 = phydev->priv;
- /* Reset the PHY */
- lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
- LAN8814_QSGMII_SOFT_RESET,
- LAN8814_QSGMII_SOFT_RESET_BIT,
- LAN8814_QSGMII_SOFT_RESET_BIT);
-
/* Disable ANEG with QSGMII PCS Host side */
lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
@@ -4451,6 +4465,12 @@ static int lan8814_probe(struct phy_device *phydev)
addr, sizeof(struct lan8814_shared_priv));
if (phy_package_init_once(phydev)) {
+ /* Reset the PHY */
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+
err = lan8814_release_coma_mode(phydev);
if (err)
return err;
@@ -5884,6 +5904,144 @@ static int lan8842_probe(struct phy_device *phydev)
return 0;
}
+#define LAN8814_POWER_MGMT_MODE_3_ANEG_MDI 0x13
+#define LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX 0x14
+#define LAN8814_POWER_MGMT_MODE_5_10BT_MDI 0x15
+#define LAN8814_POWER_MGMT_MODE_6_10BT_MDIX 0x16
+#define LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN 0x17
+#define LAN8814_POWER_MGMT_MODE_8_100BT_MDI 0x18
+#define LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX 0x19
+#define LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX 0x1a
+#define LAN8814_POWER_MGMT_MODE_11_100BT_MDIX 0x1b
+#define LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX 0x1c
+#define LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX 0x1d
+#define LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX 0x1e
+
+#define LAN8814_POWER_MGMT_DLLPD_D BIT(0)
+#define LAN8814_POWER_MGMT_ADCPD_D BIT(1)
+#define LAN8814_POWER_MGMT_PGAPD_D BIT(2)
+#define LAN8814_POWER_MGMT_TXPD_D BIT(3)
+#define LAN8814_POWER_MGMT_DLLPD_C BIT(4)
+#define LAN8814_POWER_MGMT_ADCPD_C BIT(5)
+#define LAN8814_POWER_MGMT_PGAPD_C BIT(6)
+#define LAN8814_POWER_MGMT_TXPD_C BIT(7)
+#define LAN8814_POWER_MGMT_DLLPD_B BIT(8)
+#define LAN8814_POWER_MGMT_ADCPD_B BIT(9)
+#define LAN8814_POWER_MGMT_PGAPD_B BIT(10)
+#define LAN8814_POWER_MGMT_TXPD_B BIT(11)
+#define LAN8814_POWER_MGMT_DLLPD_A BIT(12)
+#define LAN8814_POWER_MGMT_ADCPD_A BIT(13)
+#define LAN8814_POWER_MGMT_PGAPD_A BIT(14)
+#define LAN8814_POWER_MGMT_TXPD_A BIT(15)
+
+#define LAN8814_POWER_MGMT_C_D (LAN8814_POWER_MGMT_DLLPD_D | \
+ LAN8814_POWER_MGMT_ADCPD_D | \
+ LAN8814_POWER_MGMT_PGAPD_D | \
+ LAN8814_POWER_MGMT_DLLPD_C | \
+ LAN8814_POWER_MGMT_ADCPD_C | \
+ LAN8814_POWER_MGMT_PGAPD_C)
+
+#define LAN8814_POWER_MGMT_B_C_D (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_DLLPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_B)
+
+#define LAN8814_POWER_MGMT_VAL1 (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_A | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL2 LAN8814_POWER_MGMT_C_D
+
+#define LAN8814_POWER_MGMT_VAL3 (LAN8814_POWER_MGMT_C_D | \
+ LAN8814_POWER_MGMT_DLLPD_B | \
+ LAN8814_POWER_MGMT_ADCPD_B | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL4 (LAN8814_POWER_MGMT_B_C_D | \
+ LAN8814_POWER_MGMT_ADCPD_A | \
+ LAN8814_POWER_MGMT_PGAPD_A)
+
+#define LAN8814_POWER_MGMT_VAL5 LAN8814_POWER_MGMT_B_C_D
+
+#define LAN8814_EEE_WAKE_TX_TIMER 0x0e
+#define LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL 0x1f
+
+static const struct lanphy_reg_data short_center_tap_errata[] = {
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_3_ANEG_MDI,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_4_ANEG_MDIX,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_5_10BT_MDI,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_6_10BT_MDIX,
+ LAN8814_POWER_MGMT_VAL1 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_7_100BT_TRAIN,
+ LAN8814_POWER_MGMT_VAL2 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_8_100BT_MDI,
+ LAN8814_POWER_MGMT_VAL3 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_9_100BT_EEE_MDI_TX,
+ LAN8814_POWER_MGMT_VAL3 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_10_100BT_EEE_MDI_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_11_100BT_MDIX,
+ LAN8814_POWER_MGMT_VAL5 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_12_100BT_EEE_MDIX_TX,
+ LAN8814_POWER_MGMT_VAL5 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_13_100BT_EEE_MDIX_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+ { LAN8814_PAGE_POWER_REGS,
+ LAN8814_POWER_MGMT_MODE_14_100BTX_EEE_TX_RX,
+ LAN8814_POWER_MGMT_VAL4 },
+};
+
+static const struct lanphy_reg_data waketx_timer_errata[] = {
+ { LAN8814_PAGE_EEE,
+ LAN8814_EEE_WAKE_TX_TIMER,
+ LAN8814_EEE_WAKE_TX_TIMER_MAX_VAL },
+};
+
+static int lanphy_write_reg_data(struct phy_device *phydev,
+ const struct lanphy_reg_data *data,
+ size_t num)
+{
+ int ret = 0;
+
+ while (num--) {
+ ret = lanphy_write_page_reg(phydev, data->page, data->addr,
+ data->val);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int lan8842_erratas(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = lanphy_write_reg_data(phydev, short_center_tap_errata,
+ ARRAY_SIZE(short_center_tap_errata));
+ if (ret)
+ return ret;
+
+ return lanphy_write_reg_data(phydev, waketx_timer_errata,
+ ARRAY_SIZE(waketx_timer_errata));
+}
+
static int lan8842_config_init(struct phy_device *phydev)
{
int ret;
@@ -5896,6 +6054,11 @@ static int lan8842_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ /* Apply the erratas for this device */
+ ret = lan8842_erratas(phydev);
+ if (ret < 0)
+ return ret;
+
/* Even if the GPIOs are set to control the LEDs the behaviour of the
* LEDs is wrong, they are not blinking when there is traffic.
* To fix this it is required to set extended LED mode
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 82d8e1335215..16a347084293 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -154,7 +154,7 @@
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
#define RTL_8221B_VB_CG 0x001cc849
-#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8221B_VM_CG 0x001cc84a
#define RTL_8251B 0x001cc862
#define RTL_8261C 0x001cc890
@@ -633,26 +633,25 @@ static int rtl8211f_config_init(struct phy_device *phydev)
str_enabled_disabled(val_rxdly));
}
+ if (!priv->has_phycr2)
+ return 0;
+
/* Disable PHY-mode EEE so LPI is passed to the MAC */
ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE, RTL8211F_PHYCR2,
RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
if (ret)
return ret;
- if (priv->has_phycr2) {
- ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
- RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN,
- priv->phycr2);
- if (ret < 0) {
- dev_err(dev, "clkout configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
- return genphy_soft_reset(phydev);
+ ret = phy_modify_paged(phydev, RTL8211F_PHYCR_PAGE,
+ RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN,
+ priv->phycr2);
+ if (ret < 0) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
}
- return 0;
+ return genphy_soft_reset(phydev);
}
static int rtl821x_suspend(struct phy_device *phydev)
@@ -1524,16 +1523,16 @@ static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true);
}
static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
@@ -1880,7 +1879,7 @@ static struct phy_driver realtek_drvs[] = {
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
}, {
- .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
.probe = rtl822x_probe,
.get_features = rtl822x_get_features,
@@ -1893,8 +1892,8 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
- .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)",
.probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 85bd5d845409..232bbd79a4de 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
int i;
unsigned long gpio_bits = dev->driver_info->data;
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
@@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->driver_priv = priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Maybe the boot loader passed the MAC address via device tree */
if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
@@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
u8 buf[ETH_ALEN] = {0};
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 42d35cc6b421..00397a807393 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1175,10 +1175,13 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
}
write_raw_eeprom_done:
- if (dev->chipid == ID_REV_CHIP_ID_7800_)
- return lan78xx_write_reg(dev, HW_CFG, saved);
-
- return 0;
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ int rc = lan78xx_write_reg(dev, HW_CFG, saved);
+ /* If USB fails, there is nothing to do */
+ if (rc < 0)
+ return rc;
+ }
+ return ret;
}
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
@@ -3247,10 +3250,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
- ret = lan78xx_init_mac_address(dev);
- if (ret < 0)
- return ret;
-
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
if (ret < 0)
@@ -3259,6 +3258,10 @@ static int lan78xx_reset(struct lan78xx_net *dev)
dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
+ ret = lan78xx_init_mac_address(dev);
+ if (ret < 0)
+ return ret;
+
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (ret < 0)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 11352d85475a..3a4985b582cb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -192,6 +192,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (!skbn)
return 0;
+ /* Raw IP packets don't have a MAC header, but other subsystems
+ * (like xfrm) may still access MAC header offsets, so they must
+ * be initialized.
+ */
+ skb_reset_mac_header(skbn);
+
switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
skbn->protocol = htons(ETH_P_IP);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 44cba7acfe7d..a22d4bb2cf3b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10122,7 +10122,12 @@ static int __init rtl8152_driver_init(void)
ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
if (ret)
return ret;
- return usb_register(&rtl8152_driver);
+
+ ret = usb_register(&rtl8152_driver);
+ if (ret)
+ usb_deregister_device_driver(&rtl8152_cfgselector_driver);
+
+ return ret;
}
static void __exit rtl8152_driver_exit(void)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 92add3daadbb..278e6cb6f4d9 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -685,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
rtl8150_t *dev = netdev_priv(netdev);
int count, res;
+ /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */
+ count = max(skb->len, ETH_ZLEN);
+ if (count % 64 == 0)
+ count++;
+ if (skb_padto(skb, count)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
netif_stop_queue(netdev);
- count = (skb->len < 60) ? 60 : skb->len;
- count = (count & 0x3f) ? count : count + 1;
dev->tx_skb = skb;
usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2),
skb->data, count, write_bulk_callback, dev);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 511c4154cf74..697cd9d866d3 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -702,6 +702,7 @@ void usbnet_resume_rx(struct usbnet *dev)
struct sk_buff *skb;
int num = 0;
+ local_bh_disable();
clear_bit(EVENT_RX_PAUSED, &dev->flags);
while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
@@ -710,6 +711,7 @@ void usbnet_resume_rx(struct usbnet *dev)
}
queue_work(system_bh_wq, &dev->bh_work);
+ local_bh_enable();
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@@ -1657,6 +1659,8 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
+ cancel_work_sync(&dev->kevent);
+
while ((urb = usb_get_from_anchor(&dev->deferred))) {
dev_kfree_skb(urb->context);
kfree(urb->sg);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a757cbcab87f..0369dda5ed60 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -910,17 +910,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
goto ok;
}
- /*
- * Verify that we can indeed put this data into a skb.
- * This is here to handle cases when the device erroneously
- * tries to receive more than is possible. This is usually
- * the case of a broken device.
- */
- if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
- net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
- dev_kfree_skb(skb);
- return NULL;
- }
BUG_ON(offset >= PAGE_SIZE);
while (len) {
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
@@ -1379,9 +1368,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct
ret = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(rq->xdp_prog);
- /* TODO: support multi buffer. */
- if (prog && num_buf == 1)
- ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ if (prog) {
+ /* TODO: support multi buffer. */
+ if (num_buf == 1)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
+ stats);
+ else
+ ret = XDP_ABORTED;
+ }
rcu_read_unlock();
switch (ret) {
@@ -2107,9 +2101,19 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
+ struct sk_buff *skb;
+
+ /* Make sure that len does not exceed the size allocated in
+ * add_recvbuf_big.
+ */
+ if (unlikely(len > (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE)) {
+ pr_debug("%s: rx error: len %u exceeds allocated size %lu\n",
+ dev->name, len,
+ (vi->big_packets_num_skbfrags + 1) * PAGE_SIZE);
+ goto err;
+ }
+ skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
u64_stats_add(&stats->bytes, len - vi->hdr_len);
if (unlikely(!skb))
goto err;
@@ -2534,6 +2538,13 @@ err_buf:
return NULL;
}
+static inline u32
+virtio_net_hash_value(const struct virtio_net_hdr_v1_hash *hdr_hash)
+{
+ return __le16_to_cpu(hdr_hash->hash_value_lo) |
+ (__le16_to_cpu(hdr_hash->hash_value_hi) << 16);
+}
+
static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
struct sk_buff *skb)
{
@@ -2560,7 +2571,7 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
default:
rss_hash_type = PKT_HASH_TYPE_NONE;
}
- skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
+ skb_set_hash(skb, virtio_net_hash_value(hdr_hash), rss_hash_type);
}
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
@@ -2620,22 +2631,28 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
- /* 1. Save the flags early, as the XDP program might overwrite them.
+ /* About the flags below:
+ * 1. Save the flags early, as the XDP program might overwrite them.
* These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
* stay valid after XDP processing.
* 2. XDP doesn't work with partially checksummed packets (refer to
* virtnet_xdp_set()), so packets marked as
* VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
*/
- flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
- if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs) {
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
- else if (vi->big_packets)
+ } else if (vi->big_packets) {
+ void *p = page_address((struct page *)buf);
+
+ flags = ((struct virtio_net_common_hdr *)p)->hdr.flags;
skb = receive_big(dev, vi, rq, buf, len, stats);
- else
+ } else {
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
+ }
if (unlikely(!skb))
return;
@@ -3306,6 +3323,10 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
+ /* Make sure it's safe to cast between formats */
+ BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr));
+ BUILD_BUG_ON(__alignof__(*hdr) != __alignof__(hdr->hash_hdr.hdr));
+
can_push = vi->any_header_sg &&
!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
@@ -6745,7 +6766,7 @@ static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
hash_report = VIRTIO_NET_HASH_REPORT_NONE;
*rss_type = virtnet_xdp_rss_type[hash_report];
- *hash = __le32_to_cpu(hdr_hash->hash_value);
+ *hash = virtio_net_hash_value(hdr_hash);
return 0;
}
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index c5501826db1e..c058cc79137d 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -648,7 +648,8 @@ static int pef2256_add_audio_devices(struct pef2256 *pef2256)
audio_devs[i].id = i;
}
- ret = mfd_add_devices(pef2256->dev, 0, audio_devs, count, NULL, 0, NULL);
+ ret = devm_mfd_add_devices(pef2256->dev, 0, audio_devs, count,
+ NULL, 0, NULL);
kfree(audio_devs);
return ret;
}
@@ -822,8 +823,8 @@ static int pef2256_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pef2256);
- ret = mfd_add_devices(pef2256->dev, 0, pef2256_devs,
- ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(pef2256->dev, 0, pef2256_devs,
+ ARRAY_SIZE(pef2256_devs), NULL, 0, NULL);
if (ret) {
dev_err(pef2256->dev, "add devices failed (%d)\n", ret);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index e595b0979a56..b4aad6604d6d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1764,32 +1764,33 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
- /* Sometimes the PCI HIF doesn't receive interrupt
- * for the service ready message even if the buffer
- * was completed. PCIe sniffer shows that it's
- * because the corresponding CE ring doesn't fires
- * it. Workaround here by polling CE rings. Since
- * the message could arrive at any time, continue
- * polling until timeout.
- */
- do {
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings once.
+ */
+ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
- /* The 100 ms granularity is a tradeoff considering scheduler
- * overhead and response latency
- */
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- msecs_to_jiffies(100));
- if (time_left)
- return 0;
- } while (time_before(jiffies, timeout));
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "polling timed out\n");
+ return -ETIMEDOUT;
+ }
- ath10k_warn(ar, "failed to receive service ready completion\n");
- return -ETIMEDOUT;
+ ath10k_warn(ar, "service ready completion received, continuing normally\n");
+ }
+
+ return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
@@ -1937,6 +1938,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
if (cmd_id == WMI_CMD_UNSUPPORTED) {
ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
cmd_id);
+ dev_kfree_skb_any(skb);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 2810752260f2..812686173ac8 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -912,42 +912,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
static const struct dmi_system_id ath11k_pm_quirk_table[] = {
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* X13 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T14 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* P14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K5"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T16 G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K7"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* P16s G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K9"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
},
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 106e2530b64e..0e41b5a91d66 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -4417,9 +4417,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- flags |= WMI_KEY_PAIRWISE;
+ flags = WMI_KEY_PAIRWISE;
else
- flags |= WMI_KEY_GROUP;
+ flags = WMI_KEY_GROUP;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
@@ -4456,7 +4456,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
!arvif->num_stations);
- if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) {
+ if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {
ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
@@ -4470,7 +4470,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
- if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta)
+ if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)
arvif->reinstall_group_keys = true;
}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 0491e3fd6b5e..e3b444333dee 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -5961,6 +5961,9 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
!tx_compl_param->status) {
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 1d7b60aa5cb0..db351c922018 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -4064,68 +4064,12 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
return ret;
}
-static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- struct ieee80211_vif *vif = arvif->ahvif->vif;
- struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
- enum wmi_sta_powersave_param param;
- struct ieee80211_bss_conf *info;
- enum wmi_sta_ps_mode psmode;
- int ret;
- int timeout;
- bool enable_ps;
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- enable_ps = arvif->ahvif->ps;
- if (enable_ps) {
- psmode = WMI_STA_PS_MODE_ENABLED;
- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
-
- timeout = conf->dynamic_ps_timeout;
- if (timeout == 0) {
- info = ath12k_mac_get_link_bss_conf(arvif);
- if (!info) {
- ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return;
- }
-
- /* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
- }
-
- ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
- timeout);
- if (ret) {
- ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
- arvif->vdev_id, ret);
- return;
- }
- } else {
- psmode = WMI_STA_PS_MODE_DISABLED;
- }
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
- arvif->vdev_id, psmode ? "enable" : "disable");
-
- ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
- if (ret)
- ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
- psmode, arvif->vdev_id, ret);
-}
-
static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u64 changed)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
- struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4189,24 +4133,61 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
}
}
+}
- if (changed & BSS_CHANGED_PS) {
- links = ahvif->links_map;
- vif_cfg = &vif->cfg;
+static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
- for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
- arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- if (!arvif || !arvif->ar)
- continue;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ar = arvif->ar;
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
- if (ar->ab->hw_params->supports_sta_ps) {
- ahvif->ps = vif_cfg->ps;
- ath12k_mac_vif_setup_ps(arvif);
+ enable_ps = arvif->ahvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
}
+
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
}
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
}
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
}
static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif,
@@ -4228,6 +4209,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -4514,6 +4496,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
ath12k_mac_fils_discovery(arvif, info);
+
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params->supports_sta_ps) {
+ ahvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
+ }
}
static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
@@ -8290,23 +8278,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
wake_up(&ar->txmgmt_empty_waitq);
}
-int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)
{
- struct sk_buff *msdu = skb;
+ struct sk_buff *msdu;
struct ieee80211_tx_info *info;
- struct ath12k *ar = ctx;
- struct ath12k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
+ msdu = idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+
+ if (!msdu)
+ return;
+
+ dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
- ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ ath12k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath12k *ar = ctx;
+
+ ath12k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@@ -8315,17 +8312,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct sk_buff *msdu = skb;
struct ath12k *ar = skb_cb->ar;
- struct ath12k_base *ab = ar->ab;
- if (skb_cb->vif == vif) {
- spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
- spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
- }
+ if (skb_cb->vif == vif)
+ ath12k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8afaffe31031..bb96b87b2a6e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5627,8 +5627,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
*cookie, le16_to_cpu(action_frame->len),
le32_to_cpu(af_params->channel));
- ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
- af_params);
+ ack = brcmf_p2p_send_action_frame(vif->ifp, af_params);
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 0dc9d28cd77b..e1752a513c73 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
/**
* brcmf_p2p_tx_action_frame() - send action frame over fil.
*
+ * @ifp: interface to transmit on.
* @p2p: p2p info struct for vif.
* @af_params: action frame data/info.
*
@@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
* The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
* frame is transmitted.
*/
-static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp,
+ struct brcmf_p2p_info *p2p,
struct brcmf_fil_af_params_le *af_params)
{
struct brcmf_pub *drvr = p2p->cfg->pub;
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
- /* check if it is a p2p_presence response */
- p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
- if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
- vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
- else
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
-
- err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+ err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
bphy_err(drvr, " sending action frame has failed\n");
@@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell,
/**
* brcmf_p2p_send_action_frame() - send action frame .
*
- * @cfg: driver private data for cfg80211 interface.
- * @ndev: net device to transmit on.
+ * @ifp: interface to transmit on.
* @af_params: configuration data for action frame.
*/
-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
struct brcmf_fil_af_params_le *af_params)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct brcmf_p2p_info *p2p = &cfg->p2p;
- struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_fil_action_frame_le *action_frame;
struct brcmf_config_af_params config_af_params;
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
@@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
if (af_params->channel)
msleep(P2P_AF_RETRY_DELAY_TIME);
- ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+ ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params);
tx_retry++;
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
dwell_jiffies);
@@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx);
- init_completion(&p2p->send_af_done);
INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
init_completion(&p2p->afx_hdl.act_frm_scan);
init_completion(&p2p->wait_next_af);
@@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
pri_ifp = brcmf_get_ifp(cfg->pub, 0);
p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+ init_completion(&p2p->send_af_done);
+
if (p2pdev_forced) {
err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
if (IS_ERR(err_ptr)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index d2ecee565bf2..d3137ebd7158 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data);
-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
struct brcmf_fil_af_params_le *af_params);
bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 738f80fe0c50..f6f52d297a72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -501,6 +501,7 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
bool is_deflink = link == &mld_vif->deflink;
+ u8 fw_id = link->fw_id;
if (WARN_ON(!link || link->active))
return;
@@ -513,10 +514,10 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
- if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
+ if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links))
return;
- RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL);
}
void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
@@ -707,18 +708,13 @@ static int
iwl_mld_get_chan_load_from_element(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf)
{
- struct ieee80211_vif *vif = link_conf->vif;
const struct cfg80211_bss_ies *ies;
const struct element *bss_load_elem = NULL;
const struct ieee80211_bss_load_elem *bss_load;
guard(rcu)();
- if (ieee80211_vif_link_active(vif, link_conf->link_id))
- ies = rcu_dereference(link_conf->bss->beacon_ies);
- else
- ies = rcu_dereference(link_conf->bss->ies);
-
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
if (ies)
bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
ies->data, ies->len);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 9c9e0e1c6e1d..867807abde66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -938,19 +938,12 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
{
+ u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10;
- u16 flags, cck_flag;
-
- if (is_new_rate) {
- flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
- cck_flag = IWL_MAC_BEACON_CCK;
- } else {
- cck_flag = IWL_MAC_BEACON_CCK_V1;
- flags = iwl_fw_rate_idx_to_plcp(rate_idx);
- }
if (rate_idx <= IWL_LAST_CCK_RATE)
- flags |= cck_flag;
+ flags |= is_new_rate ? IWL_MAC_BEACON_CCK
+ : IWL_MAC_BEACON_CCK_V1;
return flags;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 0c9c2492d8a7..0b12ee8ad618 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -463,7 +463,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
if (!aux_roc_te) /* Not a Aux ROC time event */
return -EINVAL;
- iwl_mvm_te_check_trigger(mvm, notif, te_data);
+ iwl_mvm_te_check_trigger(mvm, notif, aux_roc_te);
IWL_DEBUG_TE(mvm,
"Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
@@ -475,14 +475,14 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
ieee80211_remain_on_channel_expired(mvm->hw);
iwl_mvm_roc_finished(mvm); /* flush aux queue */
- list_del(&te_data->list); /* remove from list */
- te_data->running = false;
- te_data->vif = NULL;
- te_data->uid = 0;
- te_data->id = TE_MAX;
+ list_del(&aux_roc_te->list); /* remove from list */
+ aux_roc_te->running = false;
+ aux_roc_te->vif = NULL;
+ aux_roc_te->uid = 0;
+ aux_roc_te->id = TE_MAX;
} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
- te_data->running = true;
+ aux_roc_te->running = true;
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
} else {
IWL_DEBUG_TE(mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 22602c32faa5..fa995e235d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -159,9 +159,15 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- return (rate_idx >= IWL_FIRST_OFDM_RATE ?
- rate_idx - IWL_FIRST_OFDM_RATE :
- rate_idx);
+ if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
+ /* In the new rate legacy rates are indexed:
+ * 0 - 3 for CCK and 0 - 7 for OFDM.
+ */
+ return (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE :
+ rate_idx);
+
+ return iwl_fw_rate_idx_to_plcp(rate_idx);
}
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 891e125ad30b..54d6d00ecdf1 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -2966,6 +2966,51 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
/*
* CMD_SET_BEACON.
*/
+
+static bool mwl8k_beacon_has_ds_params(const u8 *buf, int len)
+{
+ const struct ieee80211_mgmt *mgmt = (const void *)buf;
+ int ies_len;
+
+ if (len <= offsetof(struct ieee80211_mgmt, u.beacon.variable))
+ return false;
+
+ ies_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+
+ return cfg80211_find_ie(WLAN_EID_DS_PARAMS, mgmt->u.beacon.variable,
+ ies_len) != NULL;
+}
+
+static void mwl8k_beacon_copy_inject_ds_params(struct ieee80211_hw *hw,
+ u8 *buf_dst, const u8 *buf_src,
+ int src_len)
+{
+ const struct ieee80211_mgmt *mgmt = (const void *)buf_src;
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ };
+ const u8 *ies;
+ int hdr_len, left, offs, pos;
+
+ ies = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+
+ offs = ieee80211_ie_split(ies, src_len - hdr_len, before_ds_params,
+ ARRAY_SIZE(before_ds_params), 0);
+
+ pos = hdr_len + offs;
+ left = src_len - pos;
+
+ memcpy(buf_dst, buf_src, pos);
+
+ /* Inject a DSSS Parameter Set after SSID + Supp Rates */
+ buf_dst[pos + 0] = WLAN_EID_DS_PARAMS;
+ buf_dst[pos + 1] = 1;
+ buf_dst[pos + 2] = hw->conf.chandef.chan->hw_value;
+
+ memcpy(buf_dst + pos + 3, buf_src + pos, left);
+}
struct mwl8k_cmd_set_beacon {
struct mwl8k_cmd_pkt_hdr header;
__le16 beacon_len;
@@ -2975,17 +3020,33 @@ struct mwl8k_cmd_set_beacon {
static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u8 *beacon, int len)
{
+ bool ds_params_present = mwl8k_beacon_has_ds_params(beacon, len);
struct mwl8k_cmd_set_beacon *cmd;
- int rc;
+ int rc, final_len = len;
- cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (!ds_params_present) {
+ /*
+ * mwl8k firmware requires a DS Params IE with the current
+ * channel in AP beacons. If mac80211/hostapd does not
+ * include it, inject one here. IE ID + length + channel
+ * number = 3 bytes.
+ */
+ final_len += 3;
+ }
+
+ cmd = kzalloc(sizeof(*cmd) + final_len, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
- cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
- cmd->beacon_len = cpu_to_le16(len);
- memcpy(cmd->beacon, beacon, len);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + final_len);
+ cmd->beacon_len = cpu_to_le16(final_len);
+
+ if (ds_params_present)
+ memcpy(cmd->beacon, beacon, len);
+ else
+ mwl8k_beacon_copy_inject_ds_params(hw, cmd->beacon, beacon,
+ len);
rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 9f856042a67a..5903d82e1ab1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -2003,8 +2003,14 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_sta *sta = control->sta;
struct ieee80211_bss_conf *bss_conf;
+ /* This can happen in case of monitor injection */
+ if (!vif) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
if (link != IEEE80211_LINK_UNSPECIFIED) {
- bss_conf = rcu_dereference(txi->control.vif->link_conf[link]);
+ bss_conf = rcu_dereference(vif->link_conf[link]);
if (sta)
link_sta = rcu_dereference(sta->link[link]);
} else {
@@ -2065,13 +2071,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (txi->control.vif)
- hwsim_check_magic(txi->control.vif);
+ if (vif)
+ hwsim_check_magic(vif);
if (control->sta)
hwsim_check_sta_magic(control->sta);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
- ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
+ ieee80211_get_tx_rates(vif, control->sta, skb,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
@@ -6698,14 +6704,15 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
-static void remove_user_radios(u32 portid)
+static void remove_user_radios(u32 portid, int netgroup)
{
struct mac80211_hwsim_data *entry, *tmp;
LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
- if (entry->destroy_on_close && entry->portid == portid) {
+ if (entry->destroy_on_close && entry->portid == portid &&
+ entry->netgroup == netgroup) {
list_move(&entry->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
hwsim_rht_params);
@@ -6730,7 +6737,7 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
if (state != NETLINK_URELEASE)
return NOTIFY_DONE;
- remove_user_radios(notify->portid);
+ remove_user_radios(notify->portid, hwsim_net_get_netgroup(notify->net));
if (notify->portid == hwsim_net_get_wmediumd(notify->net)) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 2faa0de2a36e..8ee15a15f4ca 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -791,6 +791,7 @@ error:
if (urbs) {
for (i = 0; i < RX_URBS_COUNT; i++)
free_rx_urb(urbs[i]);
+ kfree(urbs);
}
return r;
}