summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dpll/dpll_netlink.c130
-rw-r--r--drivers/dpll/dpll_nl.c5
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c5
-rw-r--r--drivers/net/can/esd/esdacc.c55
-rw-r--r--drivers/net/can/esd/esdacc.h38
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c50
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h26
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c21
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c41
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c114
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c7
-rw-r--r--drivers/net/dsa/microchip/ksz8.h3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c110
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c287
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h5
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h12
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c401
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h56
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c15
-rw-r--r--drivers/net/dsa/mt7530-mmio.c1
-rw-r--r--drivers/net/dsa/mt7530.c49
-rw-r--r--drivers/net/dsa/mt7530.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c2
-rw-r--r--drivers/net/dsa/ocelot/felix.c5
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c10
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c127
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c34
-rw-r--r--drivers/net/ethernet/alteon/acenic.c26
-rw-r--r--drivers/net/ethernet/alteon/acenic.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c92
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h389
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c19
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c14
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c8
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c28
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c14
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c18
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c182
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h59
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c33
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c172
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c183
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c59
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c89
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c160
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c25
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c178
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c223
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.c2430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h540
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c861
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c403
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c23
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h1
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c114
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c533
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c43
-rw-r--r--drivers/net/ethernet/meta/Kconfig2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c136
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c51
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h10
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c597
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c19
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c34
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c19
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h9
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c31
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.h158
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h115
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c379
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ipa/ipa_power.c7
-rw-r--r--drivers/net/mdio/of_mdio.c5
-rw-r--r--drivers/net/netconsole.c192
-rw-r--r--drivers/net/phy/Kconfig4
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/dp83td510.c119
-rw-r--r--drivers/net/phy/dp83tg720.c154
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/microchip_t1.c577
-rw-r--r--drivers/net/phy/open_alliance_helpers.c77
-rw-r--r--drivers/net/phy/open_alliance_helpers.h47
-rw-r--r--drivers/net/phy/phy.c5
-rw-r--r--drivers/net/phy/phy_device.c101
-rw-r--r--drivers/net/phy/phy_link_topology.c105
-rw-r--r--drivers/net/phy/phylink.c3
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c12
-rw-r--r--drivers/net/phy/sfp-bus.c26
-rw-r--r--drivers/net/phy/vitesse.c183
-rw-r--r--drivers/net/pse-pd/tps23881.c21
-rw-r--r--drivers/net/sungem_phy.c35
-rw-r--r--drivers/net/usb/cdc_ether.c3
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c78
-rw-r--r--drivers/net/vxlan/vxlan_core.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h9
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c53
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c34
-rw-r--r--drivers/net/xen-netback/hash.c5
-rw-r--r--drivers/nfc/pn533/usb.c1
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/virtio/virtio.c59
228 files changed, 11620 insertions, 2378 deletions
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 98e6ad8528d3..fc0280dcddd1 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -342,6 +342,51 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
return 0;
}
+static int
+dpll_msg_add_pin_esync(struct sk_buff *msg, struct dpll_pin *pin,
+ struct dpll_pin_ref *ref, struct netlink_ext_ack *extack)
+{
+ const struct dpll_pin_ops *ops = dpll_pin_ops(ref);
+ struct dpll_device *dpll = ref->dpll;
+ struct dpll_pin_esync esync;
+ struct nlattr *nest;
+ int ret, i;
+
+ if (!ops->esync_get)
+ return 0;
+ ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &esync, extack);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+ if (nla_put_64bit(msg, DPLL_A_PIN_ESYNC_FREQUENCY, sizeof(esync.freq),
+ &esync.freq, DPLL_A_PIN_PAD))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DPLL_A_PIN_ESYNC_PULSE, esync.pulse))
+ return -EMSGSIZE;
+ for (i = 0; i < esync.range_num; i++) {
+ nest = nla_nest_start(msg,
+ DPLL_A_PIN_ESYNC_FREQUENCY_SUPPORTED);
+ if (!nest)
+ return -EMSGSIZE;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN,
+ sizeof(esync.range[i].min),
+ &esync.range[i].min, DPLL_A_PIN_PAD))
+ goto nest_cancel;
+ if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX,
+ sizeof(esync.range[i].max),
+ &esync.range[i].max, DPLL_A_PIN_PAD))
+ goto nest_cancel;
+ nla_nest_end(msg, nest);
+ }
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(msg, nest);
+ return -EMSGSIZE;
+}
+
static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
@@ -483,6 +528,9 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_ffo(msg, pin, ref, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_pin_esync(msg, pin, ref, extack);
+ if (ret)
+ return ret;
if (xa_empty(&pin->parent_refs))
ret = dpll_msg_add_pin_dplls(msg, pin, extack);
else
@@ -739,6 +787,83 @@ rollback:
}
static int
+dpll_pin_esync_set(struct dpll_pin *pin, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ struct dpll_pin_ref *ref, *failed;
+ const struct dpll_pin_ops *ops;
+ struct dpll_pin_esync esync;
+ u64 freq = nla_get_u64(a);
+ struct dpll_device *dpll;
+ bool supported = false;
+ unsigned long i;
+ int ret;
+
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ ops = dpll_pin_ops(ref);
+ if (!ops->esync_set || !ops->esync_get) {
+ NL_SET_ERR_MSG(extack,
+ "embedded sync feature is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+ }
+ ref = dpll_xa_ref_dpll_first(&pin->dpll_refs);
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ ret = ops->esync_get(pin, dpll_pin_on_dpll_priv(dpll, pin), dpll,
+ dpll_priv(dpll), &esync, extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current embedded sync frequency value");
+ return ret;
+ }
+ if (freq == esync.freq)
+ return 0;
+ for (i = 0; i < esync.range_num; i++)
+ if (freq <= esync.range[i].max && freq >= esync.range[i].min)
+ supported = true;
+ if (!supported) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "requested embedded sync frequency value is not supported by this device");
+ return -EINVAL;
+ }
+
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ void *pin_dpll_priv;
+
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ ret = ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll),
+ freq, extack);
+ if (ret) {
+ failed = ref;
+ NL_SET_ERR_MSG_FMT(extack,
+ "embedded sync frequency set failed for dpll_id: %u",
+ dpll->id);
+ goto rollback;
+ }
+ }
+ __dpll_pin_change_ntf(pin);
+
+ return 0;
+
+rollback:
+ xa_for_each(&pin->dpll_refs, i, ref) {
+ void *pin_dpll_priv;
+
+ if (ref == failed)
+ break;
+ ops = dpll_pin_ops(ref);
+ dpll = ref->dpll;
+ pin_dpll_priv = dpll_pin_on_dpll_priv(dpll, pin);
+ if (ops->esync_set(pin, pin_dpll_priv, dpll, dpll_priv(dpll),
+ esync.freq, extack))
+ NL_SET_ERR_MSG(extack, "set embedded sync frequency rollback failed");
+ }
+ return ret;
+}
+
+static int
dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
enum dpll_pin_state state,
struct netlink_ext_ack *extack)
@@ -1039,6 +1164,11 @@ dpll_pin_set_from_nlattr(struct dpll_pin *pin, struct genl_info *info)
if (ret)
return ret;
break;
+ case DPLL_A_PIN_ESYNC_FREQUENCY:
+ ret = dpll_pin_esync_set(pin, a, info->extack);
+ if (ret)
+ return ret;
+ break;
}
}
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 1e95f5397cfc..fe9b6893d261 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -62,7 +62,7 @@ static const struct nla_policy dpll_pin_get_dump_nl_policy[DPLL_A_PIN_ID + 1] =
};
/* DPLL_CMD_PIN_SET - do */
-static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PHASE_ADJUST + 1] = {
+static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_ESYNC_FREQUENCY + 1] = {
[DPLL_A_PIN_ID] = { .type = NLA_U32, },
[DPLL_A_PIN_FREQUENCY] = { .type = NLA_U64, },
[DPLL_A_PIN_DIRECTION] = NLA_POLICY_RANGE(NLA_U32, 1, 2),
@@ -71,6 +71,7 @@ static const struct nla_policy dpll_pin_set_nl_policy[DPLL_A_PIN_PHASE_ADJUST +
[DPLL_A_PIN_PARENT_DEVICE] = NLA_POLICY_NESTED(dpll_pin_parent_device_nl_policy),
[DPLL_A_PIN_PARENT_PIN] = NLA_POLICY_NESTED(dpll_pin_parent_pin_nl_policy),
[DPLL_A_PIN_PHASE_ADJUST] = { .type = NLA_S32, },
+ [DPLL_A_PIN_ESYNC_FREQUENCY] = { .type = NLA_U64, },
};
/* Ops table for dpll */
@@ -138,7 +139,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_pin_set_doit,
.post_doit = dpll_pin_post_doit,
.policy = dpll_pin_set_nl_policy,
- .maxattr = DPLL_A_PIN_PHASE_ADJUST,
+ .maxattr = DPLL_A_PIN_ESYNC_FREQUENCY,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bb9c3d6ef435..cb6a694313d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2300,7 +2300,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
goto err_sysfs_del;
}
- res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ res = dev_xdp_propagate(slave_dev, &xdp);
if (res < 0) {
/* ndo_bpf() sets extack error message */
slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
@@ -2436,7 +2436,7 @@ static int __bond_release_one(struct net_device *bond_dev,
.prog = NULL,
.extack = NULL,
};
- if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+ if (dev_xdp_propagate(slave_dev, &xdp))
slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
}
@@ -5626,7 +5626,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
goto err;
}
- err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err = dev_xdp_propagate(slave_dev, &xdp);
if (err < 0) {
/* ndo_bpf() sets extack error message */
slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
@@ -5658,7 +5658,7 @@ err:
if (slave == rollback_slave)
break;
- err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+ err_unwind = dev_xdp_propagate(slave_dev, &xdp);
if (err_unwind < 0)
slave_err(dev, slave_dev,
"Error %d when unwinding XDP program change\n", err_unwind);
@@ -6384,7 +6384,8 @@ static int bond_init(struct net_device *bond_dev)
netdev_dbg(bond_dev, "Begin bond_init\n");
- bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
+ bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ bond_dev->name);
if (!bond->wq)
return -ENOMEM;
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
index b7cdcffd0e45..5d6d2828cd04 100644
--- a/drivers/net/can/esd/esd_402_pci-core.c
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -369,12 +369,13 @@ static int pci402_init_cores(struct pci_dev *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
priv = netdev_priv(netdev);
+ priv->can.clock.freq = card->ov.core_frequency;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_CC_LEN8_DLC;
-
- priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
priv->can.bittiming_const = &pci402_bittiming_const_canfd;
else
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index 121cbbf81458..c80032bc1a52 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -17,6 +17,9 @@
/* esdACC DLC register layout */
#define ACC_DLC_DLC_MASK GENMASK(3, 0)
#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
+
+/* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
#define ACC_DLC_TXD_FLAG BIT(5)
/* ecc value of esdACC equals SJA1000's ECC register */
@@ -43,8 +46,8 @@
static void acc_resetmode_enter(struct acc_core *core)
{
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_set_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
@@ -52,14 +55,14 @@ static void acc_resetmode_enter(struct acc_core *core)
static void acc_resetmode_leave(struct acc_core *core)
{
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_RESETMODE);
/* Read back reset mode bit to flush PCI write posting */
acc_resetmode_entered(core);
}
-static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
const void *data)
{
acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
@@ -172,7 +175,7 @@ int acc_open(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
u32 tx_fifo_status;
- u32 ctrl_mode;
+ u32 ctrl;
int err;
/* Retry to enter RESET mode if out of sync. */
@@ -187,19 +190,19 @@ int acc_open(struct net_device *netdev)
if (err)
return err;
- ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS;
+ ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS;
if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+ ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+ ctrl |= ACC_REG_CTRL_MASK_LOM;
- acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+ acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
acc_resetmode_leave(core);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -218,13 +221,13 @@ int acc_close(struct net_device *netdev)
struct acc_net_priv *priv = netdev_priv(netdev);
struct acc_core *core = priv->core;
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_IE_RXTX |
- ACC_REG_CONTROL_MASK_IE_TXERROR |
- ACC_REG_CONTROL_MASK_IE_ERRWARN |
- ACC_REG_CONTROL_MASK_IE_OVERRUN |
- ACC_REG_CONTROL_MASK_IE_ERRPASS |
- ACC_REG_CONTROL_MASK_IE_BUSERR);
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_IE_RXTX |
+ ACC_REG_CTRL_MASK_IE_TXERROR |
+ ACC_REG_CTRL_MASK_IE_ERRWARN |
+ ACC_REG_CTRL_MASK_IE_OVERRUN |
+ ACC_REG_CTRL_MASK_IE_ERRPASS |
+ ACC_REG_CTRL_MASK_IE_BUSERR);
netif_stop_queue(netdev);
acc_resetmode_enter(core);
@@ -233,9 +236,9 @@ int acc_close(struct net_device *netdev)
/* Mark pending TX requests to be aborted after controller restart. */
acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
- /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
- acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
- ACC_REG_CONTROL_MASK_MODE_LOM);
+ /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL,
+ ACC_REG_CTRL_MASK_LOM);
close_candev(netdev);
return 0;
@@ -249,7 +252,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 tx_fifo_head = core->tx_fifo_head;
int fifo_usage;
u32 acc_id;
- u8 acc_dlc;
+ u32 acc_dlc;
if (can_dropped_invalid_skb(netdev, skb))
return NETDEV_TX_OK;
@@ -274,6 +277,8 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
if (cf->can_id & CAN_RTR_FLAG)
acc_dlc |= ACC_DLC_RTR_FLAG;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ acc_dlc |= ACC_DLC_SSTX_FLAG;
if (cf->can_id & CAN_EFF_FLAG) {
acc_id = cf->can_id & CAN_EFF_MASK;
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
index a70488b25d39..6b7ebd8c91b2 100644
--- a/drivers/net/can/esd/esdacc.h
+++ b/drivers/net/can/esd/esdacc.h
@@ -35,6 +35,7 @@
*/
#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+#define ACC_OV_REG_FEAT_MASK_DAR BIT(30 - 16)
#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
@@ -50,7 +51,7 @@
#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
/* esdACC CAN Core Module */
-#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_CTRL 0x0000
#define ACC_CORE_OF_STATUS_IRQ 0x0008
#define ACC_CORE_OF_BRP 0x000c
#define ACC_CORE_OF_BTR 0x0010
@@ -66,21 +67,22 @@
#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
-#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
-#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
-#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
-#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
-#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
-#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
-
-#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
-#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
-#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
-#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
-#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
-#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
-#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
-#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+/* CTRL register layout */
+#define ACC_REG_CTRL_MASK_RESETMODE BIT(0)
+#define ACC_REG_CTRL_MASK_LOM BIT(1)
+#define ACC_REG_CTRL_MASK_STM BIT(2)
+#define ACC_REG_CTRL_MASK_TRANSEN BIT(5)
+#define ACC_REG_CTRL_MASK_TS BIT(6)
+#define ACC_REG_CTRL_MASK_SCHEDULE BIT(7)
+
+#define ACC_REG_CTRL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CTRL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CTRL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CTRL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CTRL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CTRL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CTRL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CTRL_MASK_IE_BUSERR BIT(15)
/* BRP and BTR register layout for CAN-Classic version */
#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
@@ -300,9 +302,9 @@ static inline void acc_clear_bits(struct acc_core *core,
static inline int acc_resetmode_entered(struct acc_core *core)
{
- u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL);
- return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+ return (ctrl & ACC_REG_CTRL_MASK_RESETMODE) != 0;
}
static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 8ea7f2795551..d11411029330 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -354,6 +354,14 @@ static struct flexcan_devtype_data fsl_imx93_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data fsl_imx95_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
@@ -544,6 +552,13 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
} else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI) {
+ /* For the SCMI mode, driver do nothing, ATF will send request to
+ * SM(system manager, M33 core) through SCMI protocol after linux
+ * suspend. Once SM get this request, it will send IPG_STOP signal
+ * to Flex_CAN, let CAN in STOP mode.
+ */
+ return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -555,7 +570,11 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
u32 reg_mcr;
int ret;
- /* remove stop request */
+ /* Remove stop request, for FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI,
+ * do nothing here, because ATF already send request to SM before
+ * linux resume. Once SM get this request, it will deassert the
+ * IPG_STOP signal to Flex_CAN.
+ */
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
@@ -1983,6 +2002,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
ret = flexcan_setup_stop_mode_scfw(pdev);
else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)
+ /* ATF will handle all STOP_IPG related work */
+ ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
@@ -2009,6 +2031,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
{ .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, },
+ { .compatible = "fsl,imx95-flexcan", .data = &fsl_imx95_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -2309,9 +2332,19 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, true);
- err = pm_runtime_force_suspend(device);
- if (err)
- return err;
+ /* For FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI, it need ATF to send
+ * to SM through SCMI protocol, SM will assert the IPG_STOP
+ * signal. But all this works need the CAN clocks keep on.
+ * After the CAN module get the IPG_STOP mode, and switch to
+ * STOP mode, whether still keep the CAN clocks on or gate them
+ * off depend on the Hardware design.
+ */
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
}
return 0;
@@ -2325,9 +2358,12 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI)) {
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+ }
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 025c3417031f..4933d8c7439e 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -68,6 +68,8 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+/* Setup stop mode with ATF SCMI protocol to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index ff10b3790d84..078496d9b7ba 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -22,6 +22,8 @@
*/
#include <linux/completion.h>
+#include <linux/ktime.h>
+#include <linux/math64.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/usb.h>
@@ -39,7 +41,6 @@
#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
-#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -68,6 +69,7 @@ struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
struct kvaser_usb_dev_card_data_hydra hydra;
+ u32 usbcan_timestamp_msb;
};
/* Context for an outstanding, not yet ACKed, transmission */
@@ -216,4 +218,26 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev);
extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+static inline ktime_t kvaser_usb_ticks_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ u64 ticks)
+{
+ return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+}
+
+static inline ktime_t kvaser_usb_timestamp48_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ const __le16 *timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp[0]) |
+ (u64)(le16_to_cpu(timestamp[1])) << 16 |
+ (u64)(le16_to_cpu(timestamp[2])) << 32;
+
+ return kvaser_usb_ticks_to_ktime(cfg, ticks);
+}
+
+static inline ktime_t kvaser_usb_timestamp64_to_ktime(const struct kvaser_usb_dev_cfg *cfg,
+ __le64 timestamp)
+{
+ return kvaser_usb_ticks_to_ktime(cfg, le64_to_cpu(timestamp));
+}
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index daa34b532aa8..35b4132b0639 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -94,7 +94,7 @@
#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
- .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
+ .quirks = 0,
.ops = &kvaser_usb_hydra_dev_ops,
};
@@ -756,23 +756,12 @@ freeurb:
static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_open = kvaser_usb_open,
.ndo_stop = kvaser_usb_close,
- .ndo_start_xmit = kvaser_usb_start_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
- .ndo_open = kvaser_usb_open,
- .ndo_stop = kvaser_usb_close,
.ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops kvaser_usb_ethtool_ops = {
- .get_ts_info = ethtool_op_get_ts_info,
-};
-
-static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
.get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
@@ -859,13 +848,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
- if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
- netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
- } else {
- netdev->netdev_ops = &kvaser_usb_netdev_ops;
- netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
- }
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index c7ba768dfe17..3764b263add3 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -10,7 +10,6 @@
* - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only
* reported after a call to do_get_berr_counter(), since firmware does not
* distinguish between ERROR_WARNING and ERROR_ACTIVE.
- * - Hardware timestamps are not set for CAN Tx frames.
*/
#include <linux/completion.h>
@@ -261,6 +260,15 @@ struct kvaser_cmd_tx_can {
u8 reserved[11];
} __packed;
+struct kvaser_cmd_tx_ack {
+ __le32 id;
+ u8 data[8];
+ u8 dlc;
+ u8 flags;
+ __le16 timestamp[3];
+ u8 reserved0[8];
+} __packed;
+
struct kvaser_cmd_header {
u8 cmd_no;
/* The destination HE address is stored in 0..5 of he_addr.
@@ -297,6 +305,7 @@ struct kvaser_cmd {
struct kvaser_cmd_rx_can rx_can;
struct kvaser_cmd_tx_can tx_can;
+ struct kvaser_cmd_tx_ack tx_ack;
} __packed;
} __packed;
@@ -522,23 +531,25 @@ kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev,
return priv;
}
-static ktime_t
-kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg,
- const struct kvaser_cmd *cmd)
+static ktime_t kvaser_usb_hydra_ktime_from_cmd(const struct kvaser_usb_dev_cfg *cfg,
+ const struct kvaser_cmd *cmd)
{
- u64 ticks;
+ ktime_t hwtstamp = 0;
if (cmd->header.cmd_no == CMD_EXTENDED) {
struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd;
- ticks = le64_to_cpu(cmd_ext->rx_can.timestamp);
- } else {
- ticks = le16_to_cpu(cmd->rx_can.timestamp[0]);
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16;
- ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32;
+ if (cmd_ext->cmd_no_ext == CMD_RX_MESSAGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->rx_can.timestamp);
+ else if (cmd_ext->cmd_no_ext == CMD_TX_ACKNOWLEDGE_FD)
+ hwtstamp = kvaser_usb_timestamp64_to_ktime(cfg, cmd_ext->tx_ack.timestamp);
+ } else if (cmd->header.cmd_no == CMD_RX_MESSAGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->rx_can.timestamp);
+ } else if (cmd->header.cmd_no == CMD_TX_ACKNOWLEDGE) {
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(cfg, cmd->tx_ack.timestamp);
}
- return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq));
+ return hwtstamp;
}
static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
@@ -1175,6 +1186,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
bool one_shot_fail = false;
bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
+ struct sk_buff *skb;
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
if (!priv)
@@ -1201,6 +1213,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
@@ -1234,7 +1249,7 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
flags = cmd->rx_can.flags;
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
@@ -1302,7 +1317,7 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
KVASER_USB_KCAN_DATA_DLC_SHIFT;
flags = le32_to_cpu(cmd->rx_can.flags);
- hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd);
+ hwtstamp = kvaser_usb_hydra_ktime_from_cmd(dev->cfg, std_cmd);
if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) {
kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 23bd7574b1c7..6b9122ab1464 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -119,6 +119,10 @@
/* Extended CAN identifier flag */
#define KVASER_EXTENDED_FRAME BIT(31)
+/* USBCanII timestamp */
+#define KVASER_USB_USBCAN_CLK_OVERFLOW_MASK GENMASK(31, 16)
+#define KVASER_USB_USBCAN_TIMESTAMP_FACTOR 10
+
struct kvaser_cmd_simple {
u8 tid;
u8 channel;
@@ -235,6 +239,20 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
+struct leaf_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time[3];
+ u8 padding[2];
+} __packed;
+
+struct usbcan_cmd_tx_acknowledge {
+ u8 channel;
+ u8 tid;
+ __le16 time;
+ u8 padding[2];
+} __packed;
+
struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
@@ -281,6 +299,12 @@ struct usbcan_cmd_error_event {
__le16 padding;
} __packed;
+struct usbcan_cmd_clk_overflow_event {
+ u8 tid;
+ u8 padding;
+ __le32 time;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -347,6 +371,7 @@ struct kvaser_cmd {
struct leaf_cmd_error_event error_event;
struct kvaser_cmd_cap_req cap_req;
struct kvaser_cmd_cap_res cap_res;
+ struct leaf_cmd_tx_acknowledge tx_ack;
} __packed leaf;
union {
@@ -355,6 +380,8 @@ struct kvaser_cmd {
struct usbcan_cmd_chip_state_event chip_state_event;
struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
+ struct usbcan_cmd_tx_acknowledge tx_ack;
+ struct usbcan_cmd_clk_overflow_event clk_overflow_event;
} __packed usbcan;
struct kvaser_cmd_tx_can tx_can;
@@ -370,7 +397,7 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.leaf.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
@@ -388,15 +415,14 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
[CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
[CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
- [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.usbcan.tx_ack),
[CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
[CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
[CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
[CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
[CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
- /* ignored events: */
- [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = kvaser_fsize(u.usbcan.clk_overflow_event),
};
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
@@ -463,11 +489,27 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
.bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 16 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
@@ -475,7 +517,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
.freq = 16 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 16,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -483,7 +525,7 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
.freq = 24 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 24,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
@@ -491,10 +533,19 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
.freq = 32 * MEGA /* Hz */,
},
- .timestamp_freq = 1,
+ .timestamp_freq = 32,
.bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static inline ktime_t kvaser_usb_usbcan_timestamp_to_ktime(const struct kvaser_usb *dev,
+ __le16 timestamp)
+{
+ u64 ticks = le16_to_cpu(timestamp) |
+ dev->card_data.usbcan_timestamp_msb;
+
+ return kvaser_usb_ticks_to_ktime(dev->cfg, ticks * KVASER_USB_USBCAN_TIMESTAMP_FACTOR);
+}
+
static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -678,8 +729,19 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
/* Firmware expects bittiming parameters calculated for 16MHz
* clock, regardless of the actual clock
+ * Though, the reported freq is used for timestamps
*/
- dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg_32mhz;
+ break;
+ }
} else {
switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
@@ -880,6 +942,8 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_net_priv *priv;
unsigned long flags;
u8 channel, tid;
+ struct sk_buff *skb;
+ ktime_t hwtstamp = 0;
channel = cmd->u.tx_acknowledge_header.channel;
tid = cmd->u.tx_acknowledge_header.tid;
@@ -901,14 +965,14 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
- struct sk_buff *skb;
+ struct sk_buff *err_skb;
struct can_frame *cf;
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb) {
+ err_skb = alloc_can_err_skb(priv->netdev, &cf);
+ if (err_skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
+ netif_rx(err_skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -919,9 +983,20 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.tx_ack.time);
+ break;
+ case KVASER_USBCAN:
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.tx_ack.time);
+ break;
+ }
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
+ skb = priv->can.echo_skb[context->echo_index];
+ if (skb)
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->tx_packets++;
stats->tx_bytes += can_get_echo_skb(priv->netdev,
context->echo_index, NULL);
@@ -1299,6 +1374,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
struct net_device_stats *stats;
u8 channel = cmd->u.rx_can_header.channel;
const u8 *rx_data = NULL; /* GCC */
+ ktime_t hwtstamp = 0;
if (channel >= dev->nchannels) {
dev_err(&dev->intf->dev,
@@ -1329,9 +1405,11 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
+ hwtstamp = kvaser_usb_timestamp48_to_ktime(dev->cfg, cmd->u.leaf.rx_can.time);
break;
case KVASER_USBCAN:
rx_data = cmd->u.usbcan.rx_can.data;
+ hwtstamp = kvaser_usb_usbcan_timestamp_to_ktime(dev, cmd->u.usbcan.rx_can.time);
break;
}
@@ -1375,6 +1453,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
memcpy(cf->data, &rx_data[6], cf->len);
}
+ skb_hwtstamps(skb)->hwtstamp = hwtstamp;
stats->rx_packets++;
if (!(cf->can_id & CAN_RTR_FLAG))
stats->rx_bytes += cf->len;
@@ -1508,7 +1587,7 @@ static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
complete(&priv->get_busparams_comp);
}
-static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
+static void kvaser_usb_leaf_handle_command(struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
@@ -1554,12 +1633,15 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_get_busparams_reply(dev, cmd);
break;
- /* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
+ dev->card_data.usbcan_timestamp_msb =
+ le32_to_cpu(cmd->u.usbcan.clk_overflow_event.time) &
+ KVASER_USB_USBCAN_CLK_OVERFLOW_MASK;
break;
+ /* Ignored commands */
case CMD_FLUSH_QUEUE_REPLY:
if (dev->driver_info->family != KVASER_LEAF)
goto warn;
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 897e5e8b3d69..31d070bf161a 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -343,10 +343,9 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
dev_set_drvdata(&mdiodev->dev, dev);
ret = b53_switch_register(dev);
- if (ret) {
- dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&mdiodev->dev, ret,
+ "failed to register switch\n");
return ret;
}
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index ae43077e76c3..e1c79ff97123 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -54,6 +54,9 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value);
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data);
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data);
void ksz8_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phydev, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index d27b9c36d73f..aa09d89debf0 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -38,6 +38,20 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
+/**
+ * ksz8_ind_write8 - EEE/ACL/PME indirect register write
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @data: The data to be written.
+ *
+ * This function performs an indirect register write for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
{
const u16 *regs;
@@ -58,6 +72,59 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
return ret;
}
+/**
+ * ksz8_ind_read8 - EEE/ACL/PME indirect register read
+ * @dev: The device structure.
+ * @table: Function & table select, register 110.
+ * @addr: Indirect access control, register 111.
+ * @val: The value read.
+ *
+ * This function performs an indirect register read for EEE, ACL or
+ * PME switch functionalities. Both 8-bit registers 110 and 111 are
+ * written at once with ksz_write16, using the serial multiple write
+ * functionality.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz8_ind_read8(struct ksz_device *dev, u8 table, u16 addr, u8 *val)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (!ret)
+ ret = ksz_read8(dev, regs[REG_IND_BYTE], val);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_pme_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz8_ind_write8(dev, (u8)(reg >> 8), (u8)(reg), value);
+}
+
+int ksz8_pme_pread8(struct ksz_device *dev, int port, int offset, u8 *data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_read8(dev, table, (u8)(offset), data);
+}
+
+int ksz8_pme_pwrite8(struct ksz_device *dev, int port, int offset, u8 data)
+{
+ u8 table = (u8)(offset >> 8 | (port + 1));
+
+ return ksz8_ind_write8(dev, table, (u8)(offset), data);
+}
+
int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
@@ -121,6 +188,8 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
case KSZ8765_CHIP_ID:
return ksz8795_change_mtu(dev, frame_size);
case KSZ8830_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return ksz8863_change_mtu(dev, frame_size);
}
@@ -317,7 +386,7 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
else
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -325,7 +394,7 @@ void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
- if (ksz_is_ksz88x3(dev))
+ if (is_ksz88xx(dev))
return;
/* enable the port for flush/freeze function */
@@ -343,7 +412,8 @@ void ksz8_port_init_cnt(struct ksz_device *dev, int port)
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
/* flush all enabled port MIB counters */
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
@@ -542,11 +612,11 @@ static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
shifts[STATIC_MAC_FWD_PORTS];
alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- /* KSZ8795 family switches have STATIC_MAC_TABLE_USE_FID and
+ /* KSZ8795/KSZ8895 family switches have STATIC_MAC_TABLE_USE_FID and
* STATIC_MAC_TABLE_FID definitions off by 1 when doing read on the
* static MAC table compared to doing write.
*/
- if (ksz_is_ksz87xx(dev))
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
data_hi >>= 1;
alu->is_static = true;
alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
@@ -1545,6 +1615,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
int queues;
@@ -1575,6 +1646,13 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
member = BIT(dsa_upstream_port(ds, port));
ksz8_cfg_port_member(dev, port, member);
+
+ /* Disable all WoL options by default. Otherwise
+ * ksz_switch_macaddr_get/put logic will not work properly.
+ * CPU port 4 has no WoL functionality.
+ */
+ if (ksz_is_ksz87xx(dev) && !cpu_port)
+ ksz8_pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
static void ksz88x3_config_rmii_clk(struct ksz_device *dev)
@@ -1617,7 +1695,8 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- if (!ksz_is_ksz88x3(dev)) {
+ /* For KSZ8795 family. */
+ if (ksz_is_ksz87xx(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
@@ -1790,7 +1869,8 @@ int ksz8_enable_stp_addr(struct ksz_device *dev)
int ksz8_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- int i;
+ const u16 *regs = dev->info->regs;
+ int i, ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1829,7 +1909,21 @@ int ksz8_setup(struct dsa_switch *ds)
for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- return ksz8_handle_global_errata(ds);
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown. PME only
+ * available on KSZ87xx family.
+ */
+ if (ksz_is_ksz87xx(dev)) {
+ ret = ksz8_pme_write8(dev, regs[REG_SW_PME_CTRL], 0);
+ if (!ret)
+ ret = ksz_rmw8(dev, REG_INT_ENABLE, INT_PME, 0);
+ }
+
+ if (!ret)
+ return ksz8_handle_global_errata(ds);
+ else
+ return ret;
}
void ksz8_get_caps(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 425e20daf1e9..0ba658a72d8f 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -56,187 +56,6 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
REG_SW_MTU_MASK, frame_size);
}
-/**
- * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
- * @dev: The device structure.
- * @port: The port number.
- *
- * This function reads the PME (Power Management Event) status register of a
- * specified port to determine the wake reason. If there is no wake event, it
- * returns early. Otherwise, it logs the wake reason which could be due to a
- * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
- * is then cleared to acknowledge the handling of the wake event.
- *
- * Return: 0 on success, or an error code on failure.
- */
-static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
-{
- u8 pme_status;
- int ret;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
- if (ret)
- return ret;
-
- if (!pme_status)
- return 0;
-
- dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
- pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
- pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
- pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
-
- return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
-}
-
-/**
- * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function checks the PME Pin Control Register to see if PME Pin Output
- * Enable is set, indicating PME is enabled. If enabled, it sets the supported
- * and active WoL flags.
- */
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl;
- int ret;
-
- if (!dev->wakeup_source)
- return;
-
- wol->supported = WAKE_PHY;
-
- /* Check if the current MAC address on this port can be set
- * as global for WAKE_MAGIC support. The result may vary
- * dynamically based on other ports configurations.
- */
- if (ksz_is_port_mac_global_usable(dev->ds, port))
- wol->supported |= WAKE_MAGIC;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
- if (ret)
- return;
-
- if (pme_ctrl & PME_WOL_MAGICPKT)
- wol->wolopts |= WAKE_MAGIC;
- if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
- wol->wolopts |= WAKE_PHY;
-}
-
-/**
- * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
- * @dev: The device structure.
- * @port: The port number.
- * @wol: Pointer to ethtool Wake-on-LAN settings structure.
- *
- * This function configures Wake-on-LAN (WoL) settings for a specified port.
- * It validates the provided WoL options, checks if PME is enabled via the
- * switch's PME Pin Control Register, clears any previous wake reasons,
- * and sets the Magic Packet flag in the port's PME control register if
- * specified.
- *
- * Return: 0 on success, or other error codes on failure.
- */
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol)
-{
- u8 pme_ctrl = 0, pme_ctrl_old = 0;
- bool magic_switched_off;
- bool magic_switched_on;
- int ret;
-
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
-
- if (!dev->wakeup_source)
- return -EOPNOTSUPP;
-
- ret = ksz9477_handle_wake_reason(dev, port);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- pme_ctrl |= PME_WOL_MAGICPKT;
- if (wol->wolopts & WAKE_PHY)
- pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
-
- ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old);
- if (ret)
- return ret;
-
- if (pme_ctrl_old == pme_ctrl)
- return 0;
-
- magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
- !(pme_ctrl & PME_WOL_MAGICPKT);
- magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
- (pme_ctrl & PME_WOL_MAGICPKT);
-
- /* To keep reference count of MAC address, we should do this
- * operation only on change of WOL settings.
- */
- if (magic_switched_on) {
- ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
- if (ret)
- return ret;
- } else if (magic_switched_off) {
- ksz_switch_macaddr_put(dev->ds);
- }
-
- ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
- if (ret) {
- if (magic_switched_on)
- ksz_switch_macaddr_put(dev->ds);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while
- * considering Wake-on-LAN (WoL) settings.
- * @dev: The switch device structure.
- * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
- * enabled on any port.
- *
- * This function prepares the switch device for a safe shutdown while taking
- * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
- * the wol_enabled flag accordingly to reflect whether WoL is active on any
- * port.
- */
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
-{
- struct dsa_port *dp;
- int ret;
-
- *wol_enabled = false;
-
- if (!dev->wakeup_source)
- return;
-
- dsa_switch_for_each_user_port(dp, dev->ds) {
- u8 pme_ctrl = 0;
-
- ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl);
- if (!ret && pme_ctrl)
- *wol_enabled = true;
-
- /* make sure there are no pending wake events which would
- * prevent the device from going to sleep/shutdown.
- */
- ksz9477_handle_wake_reason(dev, dp->index);
- }
-
- /* Now we are save to enable PME pin. */
- if (*wol_enabled)
- ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE);
-}
-
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -427,54 +246,70 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-int ksz9477_errata_monitor(struct ksz_device *dev, int port,
- u64 tx_late_col)
+static int ksz9477_half_duplex_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
{
+ u8 lue_ctrl;
u32 pmavbc;
- u8 status;
u16 pqm;
int ret;
- ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states. If you see this message, please read the
+ * errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &lue_ctrl);
if (ret)
return ret;
- if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
- !(status & PORT_INTF_FULL_DUPLEX)) {
- /* Errata DS80000754 recommends monitoring potential faults in
- * half-duplex mode. The switch might not be able to communicate anymore
- * in these states.
- * If you see this message, please read the errata-sheet for more information:
- * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
- * To workaround this issue, half-duplex mode should be avoided.
- * A software reset could be implemented to recover from this state.
- */
- dev_warn_once(dev->dev,
- "Half-duplex detected on port %d, transmission halt may occur\n",
- port);
- if (tx_late_col != 0) {
- /* Transmission halt with late collisions */
- dev_crit_once(dev->dev,
- "TX late collisions detected, transmission may be halted on port %d\n",
- port);
- }
- ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (lue_ctrl & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
if (ret)
return ret;
- if (status & SW_VLAN_ENABLE) {
- ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
- if (ret)
- return ret;
- ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
- if (ret)
- return ret;
- if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
- (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
- /* Transmission halt with Half-Duplex and VLAN */
- dev_crit_once(dev->dev,
- "resources out of limits, transmission may be halted\n");
- }
+
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
}
}
+
+ return ret;
+}
+
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u8 status;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status)
+ == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ ret = ksz9477_half_duplex_monitor(dev, port, tx_late_col);
+ }
+
return ret;
}
@@ -1188,6 +1023,7 @@ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
+ const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
u16 data16;
u8 member;
@@ -1232,12 +1068,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_port_acl_init(dev, port);
/* clear pending wake flags */
- ksz9477_handle_wake_reason(dev, port);
+ ksz_handle_wake_reason(dev, port);
/* Disable all WoL options by default. Otherwise
* ksz_switch_macaddr_get/put logic will not work properly.
*/
- ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0);
+ ksz_pwrite8(dev, port, regs[REG_PORT_PME_CTRL], 0);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
@@ -1334,6 +1170,7 @@ int ksz9477_enable_stp_addr(struct ksz_device *dev)
int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
int ret = 0;
ds->mtu_enforcement_ingress = true;
@@ -1364,13 +1201,11 @@ int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* Make sure PME (WoL) is not enabled. If requested, it will be
- * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not
- * like PME events changes before shutdown.
+ /* Make sure PME (WoL) is not enabled. If requested, it will
+ * be enabled by ksz_wol_pre_shutdown(). Otherwise, some PMICs
+ * do not like PME events changes before shutdown.
*/
- ksz_write8(dev, REG_SW_PME_CTRL, 0);
-
- return 0;
+ return ksz_write8(dev, regs[REG_SW_PME_CTRL], 0);
}
u32 ksz9477_get_port_addr(int port, int offset)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index 239a281da10b..d2166b0d881e 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -60,11 +60,6 @@ void ksz9477_switch_exit(struct ksz_device *dev);
void ksz9477_port_queue_split(struct ksz_device *dev, int port);
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
-void ksz9477_get_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-int ksz9477_set_wol(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
-void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled);
int ksz9477_port_acl_init(struct ksz_device *dev, int port);
void ksz9477_port_acl_free(struct ksz_device *dev, int port);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index d5354c600ea1..04235c22bf40 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -38,11 +38,6 @@
#define SWITCH_REVISION_S 4
#define SWITCH_RESET 0x01
-#define REG_SW_PME_CTRL 0x0006
-
-#define PME_ENABLE BIT(1)
-#define PME_POLARITY BIT(0)
-
#define REG_GLOBAL_OPTIONS 0x000F
#define SW_GIGABIT_ABLE BIT(6)
@@ -807,13 +802,6 @@
#define REG_PORT_AVB_SR_1_TYPE 0x0008
#define REG_PORT_AVB_SR_2_TYPE 0x000A
-#define REG_PORT_PME_STATUS 0x0013
-#define REG_PORT_PME_CTRL 0x0017
-
-#define PME_WOL_MAGICPKT BIT(2)
-#define PME_WOL_LINKUP BIT(1)
-#define PME_WOL_ENERGY BIT(0)
-
#define REG_PORT_INT_STATUS 0x001B
#define REG_PORT_INT_MASK 0x001F
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 1491099528be..6609bf271ad0 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2,7 +2,7 @@
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/delay.h>
@@ -277,7 +277,7 @@ static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_link_up = ksz8_phylink_mac_link_up,
};
-static const struct ksz_dev_ops ksz8_dev_ops = {
+static const struct ksz_dev_ops ksz88xx_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
.cfg_port_member = ksz8_cfg_port_member,
@@ -307,6 +307,44 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.init = ksz8_switch_init,
.exit = ksz8_switch_exit,
.change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
+};
+
+static const struct ksz_dev_ops ksz87xx_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+ .pme_write8 = ksz8_pme_write8,
+ .pme_pread8 = ksz8_pme_pread8,
+ .pme_pwrite8 = ksz8_pme_pwrite8,
};
static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
@@ -348,9 +386,9 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .get_wol = ksz9477_get_wol,
- .set_wol = ksz9477_set_wol,
- .wol_pre_shutdown = ksz9477_wol_pre_shutdown,
+ .pme_write8 = ksz_write8,
+ .pme_pread8 = ksz_pread8,
+ .pme_pwrite8 = ksz_pwrite8,
.config_cpu_port = ksz9477_config_cpu_port,
.tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -423,6 +461,9 @@ static const u16 ksz8795_regs[] = {
[S_MULTICAST_CTRL] = 0x04,
[P_XMII_CTRL_0] = 0x06,
[P_XMII_CTRL_1] = 0x06,
+ [REG_SW_PME_CTRL] = 0x8003,
+ [REG_PORT_PME_STATUS] = 0x8003,
+ [REG_PORT_PME_CTRL] = 0x8007,
};
static const u32 ksz8795_masks[] = {
@@ -531,6 +572,61 @@ static u8 ksz8863_shifts[] = {
[DYNAMIC_MAC_SRC_PORT] = 20,
};
+static const u16 ksz8895_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x68,
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x75,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8895_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(22),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(20, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(22, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+};
+
+static const u8 ksz8895_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 13,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
static const u16 ksz9477_regs[] = {
[REG_SW_MAC_ADDR] = 0x0302,
[P_STP_CTRL] = 0x0B04,
@@ -539,6 +635,9 @@ static const u16 ksz9477_regs[] = {
[S_MULTICAST_CTRL] = 0x0331,
[P_XMII_CTRL_0] = 0x0300,
[P_XMII_CTRL_1] = 0x0301,
+ [REG_SW_PME_CTRL] = 0x0006,
+ [REG_PORT_PME_STATUS] = 0x0013,
+ [REG_PORT_PME_CTRL] = 0x0017,
};
static const u32 ksz9477_masks[] = {
@@ -1253,12 +1352,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8795",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1294,12 +1393,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8794",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1321,12 +1420,12 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.dev_name = "KSZ8765",
.num_vlans = 4096,
.num_alus = 0,
- .num_statics = 8,
+ .num_statics = 32,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz87xx_dev_ops,
.phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1353,7 +1452,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3,
.num_tx_queues = 4,
.num_ipms = 4,
- .ops = &ksz8_dev_ops,
+ .ops = &ksz88xx_dev_ops,
.phylink_mac_ops = &ksz8830_phylink_mac_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
@@ -1368,6 +1467,61 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.rd_table = &ksz8873_register_set,
},
+ [KSZ8864] = {
+ /* WARNING
+ * =======
+ * KSZ8864 is similar to KSZ8895, except the first port
+ * does not exist.
+ * external cpu
+ * KSZ8864 1,2,3 4
+ * KSZ8895 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8864_CHIP_ID,
+ .dev_name = "KSZ8864",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz8830_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {false, true, true, true, false},
+ },
+
+ [KSZ8895] = {
+ .chip_id = KSZ8895_CHIP_ID,
+ .dev_name = "KSZ8895",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 32,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .num_ipms = 4,
+ .ops = &ksz88xx_dev_ops,
+ .phylink_mac_ops = &ksz8830_phylink_mac_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8895_regs,
+ .masks = ksz8895_masks,
+ .shifts = ksz8895_shifts,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
[KSZ9477] = {
.chip_id = KSZ9477_CHIP_ID,
.dev_name = "KSZ9477",
@@ -2893,9 +3047,7 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
struct ksz_device *dev = ds->priv;
enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
- if (dev->chip_id == KSZ8795_CHIP_ID ||
- dev->chip_id == KSZ8794_CHIP_ID ||
- dev->chip_id == KSZ8765_CHIP_ID)
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev))
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ8830_CHIP_ID ||
@@ -3011,6 +3163,8 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8765_CHIP_ID:
return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8830_CHIP_ID:
+ case KSZ8864_CHIP_ID:
+ case KSZ8895_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
case KSZ8567_CHIP_ID:
@@ -3368,6 +3522,18 @@ static int ksz_switch_detect(struct ksz_device *dev)
else
return -ENODEV;
break;
+ case KSZ8895_FAMILY_ID:
+ if (id2 == KSZ8895_CHIP_ID_95 ||
+ id2 == KSZ8895_CHIP_ID_95R)
+ dev->chip_id = KSZ8895_CHIP_ID;
+ else
+ return -ENODEV;
+ ret = ksz_read8(dev, REG_KSZ8864_CHIP_ID, &id4);
+ if (ret)
+ return ret;
+ if (id4 & SW_KSZ8864)
+ dev->chip_id = KSZ8864_CHIP_ID;
+ break;
default:
ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
if (ret)
@@ -3742,24 +3908,214 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_handle_wake_reason - Handle wake reason on a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ *
+ * This function reads the PME (Power Management Event) status register of a
+ * specified port to determine the wake reason. If there is no wake event, it
+ * returns early. Otherwise, it logs the wake reason which could be due to a
+ * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
+ * is then cleared to acknowledge the handling of the wake event.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+int ksz_handle_wake_reason(struct ksz_device *dev, int port)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_status;
+ int ret;
+
+ ret = ops->pme_pread8(dev, port, regs[REG_PORT_PME_STATUS],
+ &pme_status);
+ if (ret)
+ return ret;
+
+ if (!pme_status)
+ return 0;
+
+ dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
+ pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
+ pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
+ pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
+
+ return ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_STATUS],
+ pme_status);
+}
+
+/**
+ * ksz_get_wol - Get Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function checks the device PME wakeup_source flag and chip_id.
+ * If enabled and supported, it sets the supported and active WoL
+ * flags.
+ */
static void ksz_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ u8 pme_ctrl;
+ int ret;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
- if (dev->dev_ops->get_wol)
- dev->dev_ops->get_wol(dev, port, wol);
+ if (!dev->wakeup_source)
+ return;
+
+ wol->supported = WAKE_PHY;
+
+ /* Check if the current MAC address on this port can be set
+ * as global for WAKE_MAGIC support. The result may vary
+ * dynamically based on other ports configurations.
+ */
+ if (ksz_is_port_mac_global_usable(dev->ds, port))
+ wol->supported |= WAKE_MAGIC;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl);
+ if (ret)
+ return;
+
+ if (pme_ctrl & PME_WOL_MAGICPKT)
+ wol->wolopts |= WAKE_MAGIC;
+ if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
+ wol->wolopts |= WAKE_PHY;
}
+/**
+ * ksz_set_wol - Set Wake-on-LAN settings for a specified port.
+ * @ds: The dsa_switch structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function configures Wake-on-LAN (WoL) settings for a specified
+ * port. It validates the provided WoL options, checks if PME is
+ * enabled and supported, clears any previous wake reasons, and sets
+ * the Magic Packet flag in the port's PME control register if
+ * specified.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
static int ksz_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
+ u8 pme_ctrl = 0, pme_ctrl_old = 0;
struct ksz_device *dev = ds->priv;
+ const u16 *regs = dev->info->regs;
+ bool magic_switched_off;
+ bool magic_switched_on;
+ int ret;
- if (dev->dev_ops->set_wol)
- return dev->dev_ops->set_wol(dev, port, wol);
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
- return -EOPNOTSUPP;
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return -EOPNOTSUPP;
+
+ if (!dev->wakeup_source)
+ return -EOPNOTSUPP;
+
+ ret = ksz_handle_wake_reason(dev, port);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ pme_ctrl |= PME_WOL_MAGICPKT;
+ if (wol->wolopts & WAKE_PHY)
+ pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
+
+ ret = dev->dev_ops->pme_pread8(dev, port, regs[REG_PORT_PME_CTRL],
+ &pme_ctrl_old);
+ if (ret)
+ return ret;
+
+ if (pme_ctrl_old == pme_ctrl)
+ return 0;
+
+ magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ !(pme_ctrl & PME_WOL_MAGICPKT);
+ magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
+ (pme_ctrl & PME_WOL_MAGICPKT);
+
+ /* To keep reference count of MAC address, we should do this
+ * operation only on change of WOL settings.
+ */
+ if (magic_switched_on) {
+ ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
+ if (ret)
+ return ret;
+ } else if (magic_switched_off) {
+ ksz_switch_macaddr_put(dev->ds);
+ }
+
+ ret = dev->dev_ops->pme_pwrite8(dev, port, regs[REG_PORT_PME_CTRL],
+ pme_ctrl);
+ if (ret) {
+ if (magic_switched_on)
+ ksz_switch_macaddr_put(dev->ds);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_wol_pre_shutdown - Prepares the switch device for shutdown while
+ * considering Wake-on-LAN (WoL) settings.
+ * @dev: The switch device structure.
+ * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
+ * enabled on any port.
+ *
+ * This function prepares the switch device for a safe shutdown while taking
+ * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
+ * the wol_enabled flag accordingly to reflect whether WoL is active on any
+ * port.
+ */
+static void ksz_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
+{
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ const u16 *regs = dev->info->regs;
+ u8 pme_pin_en = PME_ENABLE;
+ struct dsa_port *dp;
+ int ret;
+
+ *wol_enabled = false;
+
+ if (!is_ksz9477(dev) && !ksz_is_ksz87xx(dev))
+ return;
+
+ if (!dev->wakeup_source)
+ return;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ u8 pme_ctrl = 0;
+
+ ret = ops->pme_pread8(dev, dp->index,
+ regs[REG_PORT_PME_CTRL], &pme_ctrl);
+ if (!ret && pme_ctrl)
+ *wol_enabled = true;
+
+ /* make sure there are no pending wake events which would
+ * prevent the device from going to sleep/shutdown.
+ */
+ ksz_handle_wake_reason(dev, dp->index);
+ }
+
+ /* Now we are save to enable PME pin. */
+ if (*wol_enabled) {
+ if (dev->pme_active_high)
+ pme_pin_en |= PME_POLARITY;
+ ops->pme_write8(dev, regs[REG_SW_PME_CTRL], pme_pin_en);
+ if (ksz_is_ksz87xx(dev))
+ ksz_write8(dev, KSZ87XX_REG_INT_EN, KSZ87XX_INT_PME_MASK);
+ }
}
static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
@@ -4072,8 +4428,7 @@ void ksz_switch_shutdown(struct ksz_device *dev)
{
bool wol_enabled = false;
- if (dev->dev_ops->wol_pre_shutdown)
- dev->dev_ops->wol_pre_shutdown(dev, &wol_enabled);
+ ksz_wol_pre_shutdown(dev, &wol_enabled);
if (dev->dev_ops->reset && !wol_enabled)
dev->dev_ops->reset(dev);
@@ -4475,6 +4830,8 @@ int ksz_switch_register(struct ksz_device *dev)
dev->wakeup_source = of_property_read_bool(dev->dev->of_node,
"wakeup-source");
+ dev->pme_active_high = of_property_read_bool(dev->dev->of_node,
+ "microchip,pme-active-high");
}
ret = dsa_register_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 5f0a628b9849..e08d5a1339f4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip switch driver common header
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ_COMMON_H
@@ -174,6 +174,7 @@ struct ksz_device {
bool synclko_125;
bool synclko_disable;
bool wakeup_source;
+ bool pme_active_high;
struct vlan_table *vlan_cache;
@@ -200,6 +201,8 @@ enum ksz_model {
KSZ8794,
KSZ8765,
KSZ8830,
+ KSZ8864,
+ KSZ8895,
KSZ9477,
KSZ9896,
KSZ9897,
@@ -235,6 +238,9 @@ enum ksz_regs {
S_MULTICAST_CTRL,
P_XMII_CTRL_0,
P_XMII_CTRL_1,
+ REG_SW_PME_CTRL,
+ REG_PORT_PME_STATUS,
+ REG_PORT_PME_CTRL,
};
enum ksz_masks {
@@ -354,6 +360,11 @@ struct ksz_dev_ops {
void (*get_caps)(struct ksz_device *dev, int port,
struct phylink_config *config);
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
+ int (*pme_write8)(struct ksz_device *dev, u32 reg, u8 value);
+ int (*pme_pread8)(struct ksz_device *dev, int port, int offset,
+ u8 *data);
+ int (*pme_pwrite8)(struct ksz_device *dev, int port, int offset,
+ u8 data);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
@@ -363,11 +374,6 @@ struct ksz_dev_ops {
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
- void (*get_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- int (*set_wol)(struct ksz_device *dev, int port,
- struct ethtool_wolinfo *wol);
- void (*wol_pre_shutdown)(struct ksz_device *dev, bool *wol_enabled);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
@@ -391,6 +397,7 @@ int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
void ksz_switch_macaddr_put(struct dsa_switch *ds);
void ksz_switch_shutdown(struct ksz_device *dev);
+int ksz_handle_wake_reason(struct ksz_device *dev, int port);
/* Common register access functions */
static inline struct regmap *ksz_regmap_8(struct ksz_device *dev)
@@ -624,9 +631,26 @@ static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
return dev->chip_id == KSZ8830_CHIP_ID;
}
+static inline bool ksz_is_8895_family(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8895_CHIP_ID ||
+ dev->chip_id == KSZ8864_CHIP_ID;
+}
+
static inline bool is_ksz8(struct ksz_device *dev)
{
- return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev);
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev) ||
+ ksz_is_8895_family(dev);
+}
+
+static inline bool is_ksz88xx(struct ksz_device *dev)
+{
+ return ksz_is_ksz88x3(dev) || ksz_is_8895_family(dev);
+}
+
+static inline bool is_ksz9477(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ9477_CHIP_ID;
}
static inline int is_lan937x(struct ksz_device *dev)
@@ -655,6 +679,7 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define SW_FAMILY_ID_M GENMASK(15, 8)
#define KSZ87_FAMILY_ID 0x87
#define KSZ88_FAMILY_ID 0x88
+#define KSZ8895_FAMILY_ID 0x95
#define KSZ8_PORT_STATUS_0 0x08
#define KSZ8_PORT_FIBER_MODE BIT(7)
@@ -663,6 +688,12 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define KSZ87_CHIP_ID_94 0x6
#define KSZ87_CHIP_ID_95 0x9
#define KSZ88_CHIP_ID_63 0x3
+#define KSZ8895_CHIP_ID_95 0x4
+#define KSZ8895_CHIP_ID_95R 0x6
+
+/* KSZ8895 specific register */
+#define REG_KSZ8864_CHIP_ID 0xFE
+#define SW_KSZ8864 BIT(7)
#define SW_REV_ID_M GENMASK(7, 4)
@@ -695,6 +726,17 @@ static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* KSZ9477, KSZ87xx Wake-on-LAN (WoL) masks */
+#define PME_WOL_MAGICPKT BIT(2)
+#define PME_WOL_LINKUP BIT(1)
+#define PME_WOL_ENERGY BIT(0)
+
+#define PME_ENABLE BIT(1)
+#define PME_POLARITY BIT(0)
+
+#define KSZ87XX_REG_INT_EN 0x7D
+#define KSZ87XX_INT_PME_MASK BIT(4)
+
/* Interrupt */
#define REG_SW_PORT_INT_STATUS__1 0x001B
#define REG_SW_PORT_INT_MASK__1 0x001F
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index 086bc9b3cf53..30b4a6186e38 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -113,7 +113,7 @@ static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
int *per_reg, u8 *mask)
{
- if (ksz_is_ksz87xx(dev)) {
+ if (ksz_is_ksz87xx(dev) || ksz_is_8895_family(dev)) {
*reg = KSZ8765_REG_TOS_DSCP_CTRL;
*per_reg = 4;
*mask = GENMASK(1, 0);
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 8e8d83213b04..f4287310e89f 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -2,7 +2,7 @@
/*
* Microchip ksz series register access through SPI
*
- * Copyright (C) 2017 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
* Tristram Ha <Tristram.Ha@microchip.com>
*/
@@ -60,6 +60,9 @@ static int ksz_spi_probe(struct spi_device *spi)
chip->chip_id == KSZ8794_CHIP_ID ||
chip->chip_id == KSZ8765_CHIP_ID)
regmap_config = ksz8795_regmap_config;
+ else if (chip->chip_id == KSZ8895_CHIP_ID ||
+ chip->chip_id == KSZ8864_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
else
regmap_config = ksz9477_regmap_config;
@@ -137,10 +140,18 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8830]
},
{
+ .compatible = "microchip,ksz8864",
+ .data = &ksz_switch_chips[KSZ8864]
+ },
+ {
.compatible = "microchip,ksz8873",
.data = &ksz_switch_chips[KSZ8830]
},
{
+ .compatible = "microchip,ksz8895",
+ .data = &ksz_switch_chips[KSZ8895]
+ },
+ {
.compatible = "microchip,ksz9477",
.data = &ksz_switch_chips[KSZ9477]
},
@@ -201,7 +212,9 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8794" },
{ "ksz8795" },
{ "ksz8863" },
+ { "ksz8864" },
{ "ksz8873" },
+ { "ksz8895" },
{ "ksz9477" },
{ "ksz9896" },
{ "ksz9897" },
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
index b74a230a3f13..10dc49961f15 100644
--- a/drivers/net/dsa/mt7530-mmio.c
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -11,6 +11,7 @@
#include "mt7530.h"
static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "airoha,en7581-switch", .data = &mt753x_table[ID_EN7581], },
{ .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ec18e68bf3a8..d84ee1b419a6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1152,7 +1152,8 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
*/
- if (priv->id == ID_MT7531 || priv->id == ID_MT7988)
+ if (priv->id == ID_MT7531 || priv->id == ID_MT7988 ||
+ priv->id == ID_EN7581)
mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
/* CPU port gets connected to all user ports of
@@ -2207,7 +2208,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
return priv->irq ? : -EINVAL;
}
- if (priv->id == ID_MT7988)
+ if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
&mt7988_irq_domain_ops,
priv);
@@ -2438,8 +2439,10 @@ mt7530_setup(struct dsa_switch *ds)
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7530_FORCE_MODE, MT7530_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2550,8 +2553,10 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK);
+ mt7530_rmw(priv, MT753X_PMCR_P(i),
+ PMCR_LINK_SETTINGS_MASK |
+ MT753X_FORCE_MODE(priv->id),
+ MT753X_FORCE_MODE(priv->id));
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2783,6 +2788,28 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
+static void en7581_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ switch (port) {
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+ break;
+
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
+ }
+}
+
static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
@@ -3220,6 +3247,16 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c45 = mt7531_ind_c45_phy_write,
.mac_port_get_caps = mt7988_mac_port_get_caps,
},
+ [ID_EN7581] = {
+ .id = ID_EN7581,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .mac_port_get_caps = en7581_mac_port_get_caps,
+ },
};
EXPORT_SYMBOL_GPL(mt753x_table);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 28592123070b..6ad33a9f6b1d 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -19,6 +19,7 @@ enum mt753x_id {
ID_MT7621 = 1,
ID_MT7531 = 2,
ID_MT7988 = 3,
+ ID_EN7581 = 4,
};
#define NUM_TRGMII_CTRL 5
@@ -64,25 +65,30 @@ enum mt753x_id {
#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_CFC : MT753X_MFC)
#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_EN : MT7530_MIRROR_EN)
#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_MASK : \
MT7530_MIRROR_PORT_MASK)
#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_GET(val) : \
MT7530_MIRROR_PORT_GET(val))
#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
- id == ID_MT7988) ? \
+ id == ID_MT7988 || \
+ id == ID_EN7581) ? \
MT7531_MIRROR_PORT_SET(val) : \
MT7530_MIRROR_PORT_SET(val))
@@ -355,6 +361,10 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_MODE_TX_FC | \
MT7531_FORCE_MODE_EEE100 | \
MT7531_FORCE_MODE_EEE1G)
+#define MT753X_FORCE_MODE(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_FORCE_MODE_MASK : \
+ MT7530_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
PMCR_FORCE_EEE1G | \
PMCR_FORCE_EEE100 | \
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index 61ab6cc4fbfc..53a6d3ed63b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -146,7 +146,7 @@ static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
* @chip: chip private data
* @pin: gpio index
*
- * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ * Return: 0 for output, 1 for input.
*/
static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
unsigned int pin)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 4a705f7333f4..3aa9c997018a 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1370,9 +1370,8 @@ static int felix_parse_ports_node(struct felix *felix,
phy_interface_t *port_phy_modes)
{
struct device *dev = felix->ocelot.dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
phy_interface_t phy_mode;
u32 port;
int err;
@@ -1381,7 +1380,6 @@ static int felix_parse_ports_node(struct felix *felix,
if (of_property_read_u32(child, "reg", &port) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1391,7 +1389,6 @@ static int felix_parse_ports_node(struct felix *felix,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
port);
- of_node_put(child);
return -ENODEV;
}
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index 35709a1756ae..3c5018d5e1f9 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -185,11 +185,9 @@ rtl83xx_probe(struct device *dev,
/* TODO: if power is software controlled, set up any regulators here */
priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
- if (IS_ERR(priv->reset_ctl)) {
- ret = PTR_ERR(priv->reset_ctl);
- dev_err_probe(dev, ret, "failed to get reset control\n");
- return ERR_CAST(priv->reset_ctl);
- }
+ if (IS_ERR(priv->reset_ctl))
+ return dev_err_cast_probe(dev, priv->reset_ctl,
+ "failed to get reset control\n");
priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c7282ce3d11c..bc7e50dcb57c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1188,9 +1188,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
- struct device_node *child;
- for_each_available_child_of_node(ports_node, child) {
+ for_each_available_child_of_node_scoped(ports_node, child) {
struct device_node *phy_node;
phy_interface_t phy_mode;
u32 index;
@@ -1200,7 +1199,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
- of_node_put(child);
return -ENODEV;
}
@@ -1210,7 +1208,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
- of_node_put(child);
return -ENODEV;
}
@@ -1219,7 +1216,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
- of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1233,10 +1229,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
priv->phy_mode[index] = phy_mode;
err = sja1105_parse_rgmii_delays(priv, index, child);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index e3f95d2cc2c1..4d693c029c1f 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
@@ -225,9 +226,27 @@
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE 3
/* MII block 3 registers */
-#define VSC73XX_MII_STAT 0x0
-#define VSC73XX_MII_CMD 0x1
-#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT 0x0
+#define VSC73XX_MII_CMD 0x1
+#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_MPRES 0x3
+
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+#define VSC73XX_MII_STAT_READ BIT(2)
+#define VSC73XX_MII_STAT_WRITE BIT(1)
+
+#define VSC73XX_MII_CMD_SCAN BIT(27)
+#define VSC73XX_MII_CMD_OPERATION BIT(26)
+#define VSC73XX_MII_CMD_PHY_ADDR GENMASK(25, 21)
+#define VSC73XX_MII_CMD_PHY_REG GENMASK(20, 16)
+#define VSC73XX_MII_CMD_WRITE_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_DATA_FAILURE BIT(16)
+#define VSC73XX_MII_DATA_READ_DATA GENMASK(15, 0)
+
+#define VSC73XX_MII_MPRES_NOPREAMBLE BIT(6)
+#define VSC73XX_MII_MPRES_PRESCALEVAL GENMASK(5, 0)
+#define VSC73XX_MII_PRESCALEVAL_MIN 3 /* min allowed mdio clock prescaler */
#define VSC73XX_MII_STAT_BUSY BIT(3)
@@ -562,8 +581,11 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
return ret;
/* Setting bit 26 means "read" */
- cmd = BIT(26) | (phy << 21) | (regnum << 16);
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = VSC73XX_MII_CMD_OPERATION |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -571,15 +593,16 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
if (ret)
return ret;
- ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
+ ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_DATA, &val);
if (ret)
return ret;
- if (val & BIT(16)) {
+ if (val & VSC73XX_MII_DATA_FAILURE) {
dev_err(vsc->dev, "reading reg %02x from phy%d failed\n",
regnum, phy);
return -EIO;
}
- val &= 0xFFFFU;
+ val &= VSC73XX_MII_DATA_READ_DATA;
dev_dbg(vsc->dev, "read reg %02x from phy%d = %04x\n",
regnum, phy, val);
@@ -598,8 +621,11 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
if (ret)
return ret;
- cmd = (phy << 21) | (regnum << 16) | val;
- ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ cmd = FIELD_PREP(VSC73XX_MII_CMD_PHY_ADDR, phy) |
+ FIELD_PREP(VSC73XX_MII_CMD_PHY_REG, regnum) |
+ FIELD_PREP(VSC73XX_MII_CMD_WRITE_DATA, val);
+ ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_CMD, cmd);
if (ret)
return ret;
@@ -708,10 +734,71 @@ vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
}
+static int vsc73xx_configure_rgmii_port_delay(struct dsa_switch *ds)
+{
+ /* Keep 2.0 ns delay for backward complatibility */
+ u32 tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS;
+ u32 rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS;
+ struct dsa_port *dp = dsa_to_port(ds, CPU_PORT);
+ struct device_node *port_dn = dp->dn;
+ struct vsc73xx *vsc = ds->priv;
+ u32 delay;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_NONE;
+ break;
+ case 1400:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_4_NS;
+ break;
+ case 1700:
+ tx_delay = VSC73XX_GMIIDELAY_GMII0_GTXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Transmit Clock Delay\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Transmit Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay)) {
+ switch (delay) {
+ case 0:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_NONE;
+ break;
+ case 1400:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_4_NS;
+ break;
+ case 1700:
+ rx_delay = VSC73XX_GMIIDELAY_GMII0_RXDELAY_1_7_NS;
+ break;
+ case 2000:
+ break;
+ default:
+ dev_err(vsc->dev,
+ "Unsupported RGMII Receive Clock Delay value\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(vsc->dev,
+ "RGMII Receive Clock Delay isn't configured, set to 2.0 ns\n");
+ }
+
+ /* MII delay, set both GTX and RX delay */
+ return vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
+ tx_delay | rx_delay);
+}
+
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i, ret;
+ int i, ret, val;
dev_info(vsc->dev, "set up the switch\n");
@@ -770,10 +857,11 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MAC_CFG, VSC73XX_MAC_CFG_RESET);
}
- /* MII delay, set both GTX and RX delay to 2 ns */
- vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
- VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
- VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
+ /* Configure RGMII delay */
+ ret = vsc73xx_configure_rgmii_port_delay(ds);
+ if (ret)
+ return ret;
+
/* Ingess VLAN reception mask (table 145) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
0xff);
@@ -783,6 +871,15 @@ static int vsc73xx_setup(struct dsa_switch *ds)
mdelay(50);
+ /* Disable preamble and use maximum allowed clock for the internal
+ * mdio bus, used for communication with internal PHYs only.
+ */
+ val = VSC73XX_MII_MPRES_NOPREAMBLE |
+ FIELD_PREP(VSC73XX_MII_MPRES_PRESCALEVAL,
+ VSC73XX_MII_PRESCALEVAL_MIN);
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_MPRES, val);
+
/* Release reset from the internal PHYs */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_PHY_RESET);
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 78231c85234d..7f0c07c20be3 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1051,11 +1051,9 @@ static int slic_load_rcvseq_firmware(struct slic_device *sdev)
file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS :
SLIC_RCV_FIRMWARE_MOJAVE;
err = request_firmware(&fw, file, &sdev->pdev->dev);
- if (err) {
- dev_err(&sdev->pdev->dev,
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
"failed to load receive sequencer firmware %s\n", file);
- return err;
- }
/* Do an initial sanity check concerning firmware size now. A further
* check follows below.
*/
@@ -1126,10 +1124,9 @@ static int slic_load_firmware(struct slic_device *sdev)
file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS :
SLIC_FIRMWARE_MOJAVE;
err = request_firmware(&fw, file, &sdev->pdev->dev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file);
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to load firmware %s\n", file);
/* Do an initial sanity check concerning firmware size now. A further
* check follows below.
*/
@@ -1678,17 +1675,15 @@ static int slic_init(struct slic_device *sdev)
slic_card_reset(sdev);
err = slic_load_firmware(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to load firmware\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to load firmware\n");
/* we need the shared memory to read EEPROM so set it up temporarily */
err = slic_init_shmem(sdev);
- if (err) {
- dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&sdev->pdev->dev, err,
+ "failed to init shared memory\n");
err = slic_read_eeprom(sdev);
if (err) {
@@ -1741,10 +1736,9 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to enable PCI device\n");
pci_set_master(pdev);
pci_try_set_mwi(pdev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 3d8ac63132fb..9e6f91df2ba0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1560,9 +1560,9 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(struct tasklet_struct *t)
+static void ace_bh_work(struct work_struct *work)
{
- struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct ace_private *ap = from_work(ap, work, ace_bh_work);
struct net_device *dev = ap->ndev;
int cur_size;
@@ -1595,7 +1595,7 @@ static void ace_tasklet(struct tasklet_struct *t)
#endif
ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
- ap->tasklet_pending = 0;
+ ap->bh_work_pending = 0;
}
@@ -1617,7 +1617,7 @@ static void ace_dump_trace(struct ace_private *ap)
*
* Loading rings is safe without holding the spin lock since this is
* done only before the device is enabled, thus no interrupts are
- * generated and by the interrupt handler/tasklet handler.
+ * generated and by the interrupt handler/bh handler.
*/
static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
@@ -2160,7 +2160,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
*/
if (netif_running(dev)) {
int cur_size;
- int run_tasklet = 0;
+ int run_bh_work = 0;
cur_size = atomic_read(&ap->cur_rx_bufs);
if (cur_size < RX_LOW_STD_THRES) {
@@ -2172,7 +2172,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
if (!ACE_IS_TIGON_I(ap)) {
@@ -2188,7 +2188,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_mini_rx_ring(dev,
RX_MINI_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
@@ -2205,12 +2205,12 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
ace_load_jumbo_rx_ring(dev,
RX_JUMBO_SIZE - cur_size);
} else
- run_tasklet = 1;
+ run_bh_work = 1;
}
}
- if (run_tasklet && !ap->tasklet_pending) {
- ap->tasklet_pending = 1;
- tasklet_schedule(&ap->ace_tasklet);
+ if (run_bh_work && !ap->bh_work_pending) {
+ ap->bh_work_pending = 1;
+ queue_work(system_bh_wq, &ap->ace_bh_work);
}
}
@@ -2267,7 +2267,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_setup(&ap->ace_tasklet, ace_tasklet);
+ INIT_WORK(&ap->ace_bh_work, ace_bh_work);
return 0;
}
@@ -2301,7 +2301,7 @@ static int ace_close(struct net_device *dev)
cmd.idx = 0;
ace_issue_cmd(regs, &cmd);
- tasklet_kill(&ap->ace_tasklet);
+ cancel_work_sync(&ap->ace_bh_work);
/*
* Make sure one CPU is not processing packets while
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index ca5ce0cbbad1..0e45a97b9c9b 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -2,7 +2,7 @@
#ifndef _ACENIC_H_
#define _ACENIC_H_
#include <linux/interrupt.h>
-
+#include <linux/workqueue.h>
/*
* Generate TX index update each time, when TX ring is closed.
@@ -667,8 +667,8 @@ struct ace_private
struct rx_desc *rx_mini_ring;
struct rx_desc *rx_return_ring;
- int tasklet_pending, jumbo;
- struct tasklet_struct ace_tasklet;
+ int bh_work_pending, jumbo;
+ struct work_struct ace_bh_work;
struct event *evt_ring;
@@ -776,7 +776,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(struct tasklet_struct *t);
+static void ace_bh_work(struct work_struct *work);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c4a4e316683f..5475867708f4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(struct tasklet_struct *t)
+static void xgbe_ecc_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, ecc_bh_work);
unsigned int ecc_isr;
bool stop = false;
@@ -465,17 +465,17 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_ecc);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->ecc_bh_work);
else
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(struct tasklet_struct *t)
+static void xgbe_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, dev_bh_work);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task(&pdata->tasklet_ecc);
+ xgbe_ecc_isr_bh_work(&pdata->ecc_bh_work);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -604,10 +604,10 @@ static irqreturn_t xgbe_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_dev);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->dev_bh_work);
else
- xgbe_isr_task(&pdata->tasklet_dev);
+ xgbe_isr_bh_work(&pdata->dev_bh_work);
return IRQ_HANDLED;
}
@@ -1007,8 +1007,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
- tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
+ INIT_WORK(&pdata->dev_bh_work, xgbe_isr_bh_work);
+ INIT_WORK(&pdata->ecc_bh_work, xgbe_ecc_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
@@ -1078,8 +1078,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- tasklet_kill(&pdata->tasklet_dev);
- tasklet_kill(&pdata->tasklet_ecc);
+ cancel_work_sync(&pdata->dev_bh_work);
+ cancel_work_sync(&pdata->ecc_bh_work);
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index a9ccc4258ee5..7a833894f52a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(struct tasklet_struct *t)
+static void xgbe_i2c_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, i2c_bh_work);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -321,10 +321,10 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_i2c);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->i2c_bh_work);
else
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task(&pdata->tasklet_i2c);
+ xgbe_i2c_isr_bh_work(&pdata->i2c_bh_work);
return IRQ_HANDLED;
}
@@ -449,7 +449,7 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
- tasklet_kill(&pdata->tasklet_i2c);
+ cancel_work_sync(&pdata->i2c_bh_work);
}
}
@@ -464,7 +464,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
+ INIT_WORK(&pdata->i2c_bh_work, xgbe_i2c_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4a2dc705b528..07f4f3418d01 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -703,9 +703,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(struct tasklet_struct *t)
+static void xgbe_an_isr_bh_work(struct work_struct *work)
{
- struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
+ struct xgbe_prv_data *pdata = from_work(pdata, work, an_bh_work);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -727,17 +727,17 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- if (pdata->isr_as_tasklet)
- tasklet_schedule(&pdata->tasklet_an);
+ if (pdata->isr_as_bh_work)
+ queue_work(system_bh_wq, &pdata->an_bh_work);
else
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task(&pdata->tasklet_an);
+ xgbe_an_isr_bh_work(&pdata->an_bh_work);
return IRQ_HANDLED;
}
@@ -1454,7 +1454,7 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
- tasklet_kill(&pdata->tasklet_an);
+ cancel_work_sync(&pdata->an_bh_work);
}
pdata->phy_if.phy_impl.stop(pdata);
@@ -1477,7 +1477,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
+ INIT_WORK(&pdata->an_bh_work, xgbe_an_isr_bh_work);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index c5e5fac49779..c636999a6a84 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -139,7 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = 1;
+ pdata->isr_as_bh_work = 1;
pdata->irq_count = ret;
pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
@@ -176,7 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
return ret;
}
- pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->isr_as_bh_work = pdata->pcidev->msi_enabled ? 1 : 0;
pdata->irq_count = 1;
pdata->channel_irq_count = 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f01a1e566da6..d85386cac8d1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1298,11 +1298,11 @@ struct xgbe_prv_data {
unsigned int lpm_ctrl; /* CTRL1 for resume */
- unsigned int isr_as_tasklet;
- struct tasklet_struct tasklet_dev;
- struct tasklet_struct tasklet_ecc;
- struct tasklet_struct tasklet_i2c;
- struct tasklet_struct tasklet_an;
+ unsigned int isr_as_bh_work;
+ struct work_struct dev_bh_work;
+ struct work_struct ecc_bh_work;
+ struct work_struct i2c_bh_work;
+ struct work_struct an_bh_work;
struct dentry *xgbe_debugfs;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index d0aecd1d7357..440ff4616fec 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -266,7 +266,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
char tc_string[8];
- int tc;
+ unsigned int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
@@ -275,22 +275,20 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
- snprintf(tc_string, 8, "TC%d ", tc);
+ snprintf(tc_string, 8, "TC%u ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
- p += ETH_GSTRING_LEN;
}
}
}
@@ -305,20 +303,18 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
for (si = 0; si < rx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
if (i >= tx_ring_cnt)
continue;
for (si = 0; si < tx_stat_cnt; si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -338,9 +334,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsc_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsc_stat_names[si], i);
- p += ETH_GSTRING_LEN;
}
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
@@ -349,10 +344,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_txsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_txsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
@@ -369,10 +363,9 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (si = 0;
si < ARRAY_SIZE(aq_macsec_rxsa_stat_names);
si++) {
- snprintf(p, ETH_GSTRING_LEN,
+ ethtool_sprintf(&p,
aq_macsec_rxsa_stat_names[si],
i, sa);
- p += ETH_GSTRING_LEN;
}
}
}
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index a38be924cdaa..db2a8ade6205 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -380,9 +380,7 @@ struct ag71xx {
int mac_idx;
struct reset_control *mdio_reset;
- struct mii_bus *mii_bus;
struct clk *clk_mdio;
- struct clk *clk_eth;
};
static int ag71xx_desc_empty(struct ag71xx_desc *desc)
@@ -447,6 +445,13 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_mii_ioctl(ag->phylink, ifr, cmd);
+}
+
static void ag71xx_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -690,31 +695,21 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
int err;
np = dev->of_node;
- ag->mii_bus = NULL;
- ag->clk_mdio = devm_clk_get(dev, "mdio");
+ ag->clk_mdio = devm_clk_get_enabled(dev, "mdio");
if (IS_ERR(ag->clk_mdio)) {
netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
return PTR_ERR(ag->clk_mdio);
}
- err = clk_prepare_enable(ag->clk_mdio);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
- return err;
- }
-
mii_bus = devm_mdiobus_alloc(dev);
- if (!mii_bus) {
- err = -ENOMEM;
- goto mdio_err_put_clk;
- }
+ if (!mii_bus)
+ return -ENOMEM;
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
if (IS_ERR(ag->mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- err = PTR_ERR(ag->mdio_reset);
- goto mdio_err_put_clk;
+ return PTR_ERR(ag->mdio_reset);
}
mii_bus->name = "ag71xx_mdio";
@@ -733,25 +728,12 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
}
mnp = of_get_child_by_name(np, "mdio");
- err = of_mdiobus_register(mii_bus, mnp);
+ err = devm_of_mdiobus_register(dev, mii_bus, mnp);
of_node_put(mnp);
if (err)
- goto mdio_err_put_clk;
-
- ag->mii_bus = mii_bus;
+ return err;
return 0;
-
-mdio_err_put_clk:
- clk_disable_unprepare(ag->clk_mdio);
- return err;
-}
-
-static void ag71xx_mdio_remove(struct ag71xx *ag)
-{
- if (ag->mii_bus)
- mdiobus_unregister(ag->mii_bus);
- clk_disable_unprepare(ag->clk_mdio);
}
static void ag71xx_hw_stop(struct ag71xx *ag)
@@ -1799,7 +1781,7 @@ static const struct net_device_ops ag71xx_netdev_ops = {
.ndo_open = ag71xx_open,
.ndo_stop = ag71xx_stop,
.ndo_start_xmit = ag71xx_hard_start_xmit,
- .ndo_eth_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = ag71xx_do_ioctl,
.ndo_tx_timeout = ag71xx_tx_timeout,
.ndo_change_mtu = ag71xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -1816,6 +1798,7 @@ static int ag71xx_probe(struct platform_device *pdev)
const struct ag71xx_dcfg *dcfg;
struct net_device *ndev;
struct resource *res;
+ struct clk *clk_eth;
int tx_size, err, i;
struct ag71xx *ag;
@@ -1846,10 +1829,10 @@ static int ag71xx_probe(struct platform_device *pdev)
return -EINVAL;
}
- ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
- if (IS_ERR(ag->clk_eth)) {
+ clk_eth = devm_clk_get_enabled(&pdev->dev, "eth");
+ if (IS_ERR(clk_eth)) {
netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
- return PTR_ERR(ag->clk_eth);
+ return PTR_ERR(clk_eth);
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1912,6 +1895,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->next = (u32)ag->stop_desc_dma;
err = of_get_ethdev_address(np, ndev);
+ if (err == -EPROBE_DEFER)
+ return err;
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
eth_hw_addr_random(ndev);
@@ -1926,33 +1911,27 @@ static int ag71xx_probe(struct platform_device *pdev)
netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
AG71XX_NAPI_WEIGHT);
- err = clk_prepare_enable(ag->clk_eth);
- if (err) {
- netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- return err;
- }
-
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
ag71xx_hw_init(ag);
err = ag71xx_mdio_probe(ag);
if (err)
- goto err_put_clk;
+ return err;
platform_set_drvdata(pdev, ndev);
err = ag71xx_phylink_setup(ag);
if (err) {
netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
- goto err_mdio_remove;
+ return err;
}
- err = register_netdev(ndev);
+ err = devm_register_netdev(&pdev->dev, ndev);
if (err) {
netif_err(ag, probe, ndev, "unable to register net device\n");
platform_set_drvdata(pdev, NULL);
- goto err_mdio_remove;
+ return err;
}
netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
@@ -1960,27 +1939,6 @@ static int ag71xx_probe(struct platform_device *pdev)
phy_modes(ag->phy_if_mode));
return 0;
-
-err_mdio_remove:
- ag71xx_mdio_remove(ag);
-err_put_clk:
- clk_disable_unprepare(ag->clk_eth);
- return err;
-}
-
-static void ag71xx_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct ag71xx *ag;
-
- if (!ndev)
- return;
-
- ag = netdev_priv(ndev);
- unregister_netdev(ndev);
- ag71xx_mdio_remove(ag);
- clk_disable_unprepare(ag->clk_eth);
- platform_set_drvdata(pdev, NULL);
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2064,10 +2022,10 @@ static const struct of_device_id ag71xx_match[] = {
{ .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
{}
};
+MODULE_DEVICE_TABLE(of, ag71xx_match);
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove_new = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4e9215bce4ad..a018f251d198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -868,6 +868,8 @@
#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+extern const u32 dmae_reg_go_c[];
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 77d4cb4ad782..12198fc3ab22 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2652,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
/* vlan configured by ndo so its in bulletin board */
- memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ ivi->vlan = bulletin->vlan;
else
/* function has not been loaded yet. Show vlans as 0s */
- memset(&ivi->vlan, 0, VLAN_HLEN);
+ ivi->vlan = 0;
mutex_unlock(&bp->vfdb->bulletin_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 2bb133ae61c3..ba6729f2f9c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -23,8 +23,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
-extern const u32 dmae_reg_go_c[];
-
/* Statistics */
/*
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 04a623b3eee2..b26a7e2aa132 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6579,7 +6579,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -6715,6 +6716,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
hwrm_req_drop(bp, req);
return rc;
@@ -10089,6 +10092,26 @@ vnic_setup_err:
return rc;
}
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid)
+{
+ struct hwrm_vnic_update_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->enables = cpu_to_le32(valid);
+
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc;
@@ -15154,7 +15177,8 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
- int rc;
+ struct bnxt_vnic_info *vnic;
+ int i, rc;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15179,11 +15203,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- napi_enable(&rxr->bnapi->napi);
-
cpr = &rxr->bnapi->cp_ring;
cpr->sw_stats->rx.rx_resets++;
+ for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
+
return 0;
err_free_hwrm_rx_ring:
@@ -15195,9 +15224,17 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = 0;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
rxr = &bp->rx_ring[idx];
- napi_disable(&rxr->bnapi->napi);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
@@ -15681,7 +15718,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
- dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
@@ -15862,6 +15898,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ if (BNXT_SUPPORTS_QUEUE_API(bp))
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
rc = register_netdev(dev);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 059a6f81c1a8..610c0fe468ad 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1250,6 +1250,7 @@ struct bnxt_vnic_info {
#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
+ u16 mru;
#define BNXT_MAX_UC_ADDRS 4
struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
@@ -2437,6 +2438,7 @@ struct bnxt {
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
+ #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
u32 fw_dbg_cap;
@@ -2449,6 +2451,9 @@ struct bnxt {
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_SUPPORTS_QUEUE_API(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2837,6 +2842,8 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid);
int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 156c2404854f..127b7015f676 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = {
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
- static char qname[16];
+ static char qname[12];
- snprintf(qname, 10, "%d", ring_idx);
+ snprintf(qname, sizeof(qname), "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 4cf9bf8b01b0..194f47316c77 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -4157,7 +4157,7 @@ static void bnxt_get_pkgver(struct net_device *dev)
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
"/pkg %s", buf);
}
}
@@ -5285,7 +5285,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
- .cap_rss_ctx_supported = 1,
+ .rxfh_per_ctx_key = 1,
.rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
.rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
.rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f219709f9563..f8ef6f1a1964 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -403,6 +403,9 @@ struct cmd_nums {
#define HWRM_FUNC_LAG_UPDATE 0x1b1UL
#define HWRM_FUNC_LAG_FREE 0x1b2UL
#define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -430,6 +433,9 @@ struct cmd_nums {
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
#define HWRM_UDCC_CFG 0x259UL
#define HWRM_UDCC_QCFG 0x25aUL
@@ -439,6 +445,9 @@ struct cmd_nums {
#define HWRM_UDCC_COMP_CFG 0x25eUL
#define HWRM_UDCC_COMP_QCFG 0x25fUL
#define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -500,10 +509,8 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
- #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -533,6 +540,9 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_RUN 0xff29UL
#define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
#define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
#define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
#define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
@@ -582,6 +592,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
#define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -613,8 +624,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 44
-#define HWRM_VERSION_STR "1.10.3.44"
+#define HWRM_VERSION_RSVD 68
+#define HWRM_VERSION_STR "1.10.3.68"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -850,7 +861,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
#define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1691,7 +1705,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:1088b/136B) */
+/* hwrm_func_qcaps_output (size:1152b/144B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1824,6 +1838,9 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1845,7 +1862,9 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_qp;
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
- u8 unused_3[3];
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ u8 unused_3[7];
u8 valid;
};
@@ -2021,7 +2040,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- u8 unused_3[2];
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
u8 unused_4[2];
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
@@ -3671,33 +3691,38 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3772,6 +3797,11 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3785,29 +3815,34 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3883,6 +3918,13 @@ struct ts_split_entries {
__le32 rsvd2[2];
};
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3891,33 +3933,38 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3928,39 +3975,45 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -4410,6 +4463,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -4941,7 +4995,9 @@ struct hwrm_port_qstats_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
u8 valid;
};
@@ -5074,6 +5130,7 @@ struct hwrm_port_qstats_ext_output {
__le16 total_active_cos_queues;
u8 flags;
#define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
u8 valid;
};
@@ -6510,6 +6567,43 @@ struct hwrm_vnic_alloc_output {
u8 valid;
};
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
__le16 req_type;
@@ -6640,6 +6734,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
#define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
#define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -7484,23 +7579,24 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
@@ -8766,7 +8862,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_events;
};
-/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8776,13 +8872,16 @@ struct hwrm_stat_ctx_alloc_input {
__le64 stats_dma_addr;
__le32 update_period_ms;
u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
u8 unused_0;
__le16 stats_dma_length;
__le16 flags;
#define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 steering_tag;
- __le32 unused_1;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -9650,10 +9749,13 @@ struct hwrm_dbg_qcaps_output {
__le32 coredump_component_disable_caps;
#define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
__le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
u8 unused_1[3];
u8 valid;
};
@@ -10092,16 +10194,19 @@ struct hwrm_nvm_erase_dir_entry_output {
u8 valid;
};
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
struct hwrm_nvm_get_dev_info_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -10135,6 +10240,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 netctrl_fw_minor;
__le16 netctrl_fw_build;
__le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
u8 unused_0[7];
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c2b4188a1ef1..a9040c42d2ff 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@@ -3015,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3036,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- tasklet_schedule(&cp->cnic_irq_task);
+ queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@@ -3140,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -4428,7 +4429,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_kill(&cp->cnic_irq_task);
+ cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@@ -4441,7 +4442,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
- tasklet_disable(&cp->cnic_irq_task);
+ disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@@ -4464,7 +4465,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4873,7 +4874,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index fedc84ada937..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -268,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
- struct tasklet_struct cnic_irq_task;
+ struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ea71612f6b36..5740c98d8c9f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -13,6 +13,7 @@
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -1330,7 +1331,7 @@ struct macb {
spinlock_t rx_fs_lock;
unsigned int max_tuples;
- struct tasklet_struct hresp_err_tasklet;
+ struct work_struct hresp_err_bh_work;
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dcd3f54ed0cf..127bb3208034 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1792,9 +1792,9 @@ static int macb_tx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(struct tasklet_struct *t)
+static void macb_hresp_error_task(struct work_struct *work)
{
- struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
+ struct macb *bp = from_work(bp, work, hresp_err_bh_work);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -1994,7 +1994,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- tasklet_schedule(&bp->hresp_err_tasklet);
+ queue_work(system_bh_wq, &bp->hresp_err_bh_work);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -5119,12 +5119,12 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_free_netdev;
}
- /* MTU range: 68 - 1500 or 10240 */
+ /* MTU range: 68 - 1518 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
- dev->max_mtu = ETH_DATA_LEN;
+ dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
@@ -5172,7 +5172,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
+ INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
@@ -5216,7 +5216,7 @@ static void macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
- tasklet_kill(&bp->hresp_err_tasklet);
+ cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
index 2d06097d3f61..40f529d0bc4c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -43,6 +43,4 @@ int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
-
-void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index 8ed57134ee0c..129c8b84f549 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -86,7 +86,6 @@ u32
lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
-void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
struct octeon_reg_list *reg_list);
u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index fb380b4f3e02..d26364c2ac81 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -804,13 +804,6 @@ int octeon_init_consoles(struct octeon_device *oct);
int octeon_add_console(struct octeon_device *oct, u32 console_num,
char *dbg_enb);
-/** write or read from a console */
-int octeon_console_write(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 write_request_size, u32 flags);
-int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-
-int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
-
/** Removes all attached consoles. */
void octeon_remove_consoles(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index c9b19e624dce..232ae72c0e37 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -395,8 +395,6 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
void *octeon_get_dispatch_arg(struct octeon_device *oct,
u16 opcode, u16 subcode);
-void octeon_droq_print_stats(void);
-
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index bebf3bd349c6..a04f36a0e1a0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -378,9 +378,6 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype);
-void octeon_dump_soft_command(struct octeon_device *oct,
- struct octeon_soft_command *sc);
-
void octeon_prepare_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc,
u8 opcode, u8 subcode,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8453defc296c..b7531041c56d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -359,8 +359,6 @@ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
/* Register access APIs */
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
-void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
-u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
u64 qidx, u64 val);
u64 nicvf_queue_reg_read(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index cdea49392185..84f16ababaee 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -219,9 +219,7 @@
void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
-void octeon_mdiobus_force_mod_depencency(void);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
-void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index 9050568a034c..64663112cad8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -242,7 +242,7 @@ struct cxgb4_next_header {
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- struct tc_u32_sel sel;
+ struct tc_u32_sel_hdr sel;
struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 854d87e1125c..2e3973a32d9d 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -342,10 +342,10 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
}
EXPORT_SYMBOL(cxgbi_ppm_release);
-static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
- unsigned int *pcpu_ppmax)
+static struct cxgbi_ppm_pool __percpu *
+ppm_alloc_cpu_pool(unsigned int *total, unsigned int *pcpu_ppmax)
{
- struct cxgbi_ppm_pool *pools;
+ struct cxgbi_ppm_pool __percpu *pools;
unsigned int ppmax = (*total) / num_possible_cpus();
unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
unsigned int bmap;
@@ -392,7 +392,7 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
unsigned int iscsi_edram_size)
{
struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
- struct cxgbi_ppm_pool *pool = NULL;
+ struct cxgbi_ppm_pool __percpu *pool = NULL;
unsigned int pool_index_max = 0;
unsigned int ppmax_pool = 0;
unsigned int ppod_bmap_size;
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
index bcfe52c11804..59ea48d4c9de 100644
--- a/drivers/net/ethernet/davicom/dm9051.c
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -1235,6 +1235,7 @@ static const struct of_device_id dm9051_match_table[] = {
{ .compatible = "davicom,dm9051" },
{}
};
+MODULE_DEVICE_TABLE(of, dm9051_match_table);
static const struct spi_device_id dm9051_id_table[] = {
{ "dm9051", 0 },
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7bfeae04b52b..d0ea92607870 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1842,7 +1842,7 @@ static int rio_resume(struct device *device)
return 0;
}
-static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
#define RIO_PM_OPS (&rio_pm_ops)
#else
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4c546c3aef0f..f3cc14cc757d 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -24,6 +24,7 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/of_net.h>
+#include <linux/phy_fixed.h>
#include <net/ip.h>
#include <net/ncsi.h>
@@ -50,6 +51,15 @@
#define FTGMAC_100MHZ 100000000
#define FTGMAC_25MHZ 25000000
+/* For NC-SI to register a fixed-link phy device */
+static struct fixed_phy_status ncsi_phy_status = {
+ .link = 1,
+ .speed = SPEED_100,
+ .duplex = DUPLEX_FULL,
+ .pause = 0,
+ .asym_pause = 0
+};
+
struct ftgmac100 {
/* Registers */
struct resource *res;
@@ -1541,7 +1551,8 @@ static int ftgmac100_open(struct net_device *netdev)
if (netdev->phydev) {
/* If we have a PHY, start polling */
phy_start(netdev->phydev);
- } else if (priv->use_ncsi) {
+ }
+ if (priv->use_ncsi) {
/* If using NC-SI, set our carrier on and start the stack */
netif_carrier_on(netdev);
@@ -1554,6 +1565,7 @@ static int ftgmac100_open(struct net_device *netdev)
return 0;
err_ncsi:
+ phy_stop(netdev->phydev);
napi_disable(&priv->napi);
netif_stop_queue(netdev);
err_alloc:
@@ -1587,7 +1599,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_napi_del(&priv->napi);
if (netdev->phydev)
phy_stop(netdev->phydev);
- else if (priv->use_ncsi)
+ if (priv->use_ncsi)
ncsi_stop_dev(priv->ndev);
ftgmac100_stop_hw(priv);
@@ -1725,6 +1737,9 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev)
phy_disconnect(netdev->phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
+
+ if (priv->use_ncsi)
+ fixed_phy_unregister(netdev->phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1802,6 +1817,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
struct resource *res;
int irq;
struct net_device *netdev;
+ struct phy_device *phydev;
struct ftgmac100 *priv;
struct device_node *np;
int err = 0;
@@ -1889,6 +1905,14 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+
+ phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, NULL);
+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (err) {
+ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_phy_connect;
+ }
} else if (np && of_phy_is_fixed_link(np)) {
struct phy_device *phy;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index cfe6b57b1da0..5d99cfb4e994 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -3156,8 +3156,9 @@ static void dpaa_napi_del(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_del(&percpu_priv->np.napi);
+ __netif_napi_del(&percpu_priv->np.napi);
}
+ synchronize_net();
}
static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a923cb95cdc6..8c3bf0faba63 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4606,7 +4606,7 @@ fec_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-static int __maybe_unused fec_suspend(struct device *dev)
+static int fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4659,7 +4659,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_resume(struct device *dev)
+static int fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4714,7 +4714,7 @@ failed_clk:
return ret;
}
-static int __maybe_unused fec_runtime_suspend(struct device *dev)
+static int fec_runtime_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4725,7 +4725,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fec_runtime_resume(struct device *dev)
+static int fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -4746,14 +4746,14 @@ failed_clk_ipg:
}
static const struct dev_pm_ops fec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
- SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+ RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
static struct platform_driver fec_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &fec_pm_ops,
+ .pm = pm_ptr(&fec_pm_ops),
.of_match_table = fec_dt_ids,
.suppress_bind_attrs = true,
},
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 2e4f3e1782a2..4cffda363a14 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -91,6 +91,30 @@
#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
@@ -136,7 +160,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = fep->cc.read(&fep->cc);
+ tempval = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -211,13 +235,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
timecounter_read(&fep->tc);
/* Get the current ptp hardware time counter */
- temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
- temp_val |= FEC_T_CTRL_CAPTURE;
- writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- ptp_hc = readl(fep->hwp + FEC_ATIME);
+ ptp_hc = fec_ptp_read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
@@ -272,30 +290,6 @@ static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
}
/**
- * fec_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc: the cyclecounter structure
- *
- * this function reads the cyclecounter registers and is called by the
- * cyclecounter structure used to construct a ns counter from the
- * arbitrary fixed point registers
- */
-static u64 fec_ptp_read(const struct cyclecounter *cc)
-{
- struct fec_enet_private *fep =
- container_of(cc, struct fec_enet_private, cc);
- u32 tempval;
-
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
- udelay(1);
-
- return readl(fep->hwp + FEC_ATIME);
-}
-
-/**
* fec_ptp_start_cyclecounter - create the cycle counter from hw
* @ndev: network device
*
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 406e75e9e5ea..f17a4e511510 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1748,7 +1748,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct resource res;
struct resource *dev_res;
u32 val;
- int err = 0, lenp;
+ int err = 0;
enum fman_port_type port_type;
u16 port_speed;
u8 port_id;
@@ -1795,7 +1795,7 @@ static int fman_port_probe(struct platform_device *of_dev)
if (of_device_is_compatible(port_node, "fsl,fman-v3-port-tx")) {
port_type = FMAN_PORT_TYPE_TX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-tx")) {
@@ -1808,7 +1808,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else if (of_device_is_compatible(port_node, "fsl,fman-v3-port-rx")) {
port_type = FMAN_PORT_TYPE_RX;
port_speed = 1000;
- if (of_find_property(port_node, "fsl,fman-10g-port", &lenp))
+ if (of_property_read_bool(port_node, "fsl,fman-10g-port"))
port_speed = 10000;
} else if (of_device_is_compatible(port_node, "fsl,fman-v2-port-rx")) {
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index a64cb6270515..9e89ac2b6ce3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -131,15 +131,14 @@ static int setup_data(struct net_device *dev)
static int allocate_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
+ struct fs_platform_info *fpi = fep->fpi;
- fep->ring_mem_addr = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
- sizeof(cbd_t), 8);
- if (IS_ERR_VALUE(fep->ring_mem_addr))
+ fpi->dpram_offset = cpm_muram_alloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fpi->dpram_offset))
return -ENOMEM;
- fep->ring_base = (void __iomem __force*)
- cpm_muram_addr(fep->ring_mem_addr);
+ fep->ring_base = cpm_muram_addr(fpi->dpram_offset);
return 0;
}
@@ -147,9 +146,10 @@ static int allocate_bd(struct net_device *dev)
static void free_bd(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
if (fep->ring_base)
- cpm_muram_free(fep->ring_mem_addr);
+ cpm_muram_free(fpi->dpram_offset);
}
static void cleanup_data(struct net_device *dev)
@@ -247,9 +247,9 @@ static void restart(struct net_device *dev)
__fs_out8((u8 __iomem *)ep + i, 0);
/* point to bds */
- W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_rbase, fpi->dpram_offset);
W16(ep, sen_genscc.scc_tbase,
- fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+ fpi->dpram_offset + sizeof(cbd_t) * fpi->rx_ring);
/* Initialize function code registers for big-endian.
*/
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
index a7fbd4cd560a..ce97b76f9ae0 100644
--- a/drivers/net/ethernet/fungible/funcore/fun_dev.c
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -546,17 +546,14 @@ int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
unsigned int id0, enum fun_admin_bind_type type1,
unsigned int id1)
{
- struct {
- struct fun_admin_bind_req req;
- struct fun_admin_bind_entry entry[2];
- } cmd = {
- .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
- sizeof(cmd)),
- .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
- .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
- };
+ DEFINE_RAW_FLEX(struct fun_admin_bind_req, cmd, entry, 2);
+
+ cmd->common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ __struct_size(cmd));
+ cmd->entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0);
+ cmd->entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1);
- return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+ return fun_submit_admin_sync_cmd(fdev, &cmd->common, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(fun_bind);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 84ac004d3953..301fa1ea4f51 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -784,6 +784,8 @@ struct gve_priv {
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
u32 adminq_cfg_flow_rule_cnt;
+ u32 adminq_cfg_rss_cnt;
+ u32 adminq_query_rss_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -831,6 +833,9 @@ struct gve_priv {
u32 num_flow_rules;
struct gve_flow_rules_cache flow_rules_cache;
+
+ u16 rss_key_size;
+ u16 rss_lut_size;
};
enum gve_service_task_flags_bit {
@@ -1148,7 +1153,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
int idx);
void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg);
-int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index c5bbc1b7524e..e44e8b139633 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -45,6 +45,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -207,6 +208,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"Flow Steering");
*dev_op_flow_steering = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_RSS_CONFIG:
+ if (option_length < sizeof(**dev_op_rss_config) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "RSS config",
+ (int)sizeof(**dev_op_rss_config),
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_rss_config))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "RSS config");
+ *dev_op_rss_config = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -227,6 +245,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_rss_config **dev_op_rss_config,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -249,7 +268,8 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
- dev_op_flow_steering, dev_op_modify_ring);
+ dev_op_flow_steering, dev_op_rss_config,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -289,6 +309,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
+ priv->adminq_cfg_rss_cnt = 0;
+ priv->adminq_query_rss_cnt = 0;
/* Setup Admin queue with the device */
if (priv->pdev->revision < 0x1) {
@@ -534,6 +556,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
priv->adminq_cfg_flow_rule_cnt++;
break;
+ case GVE_ADMINQ_CONFIGURE_RSS:
+ priv->adminq_cfg_rss_cnt++;
+ break;
+ case GVE_ADMINQ_QUERY_RSS:
+ priv->adminq_query_rss_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -867,6 +895,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_buffer_sizes,
const struct gve_device_option_flow_steering
*dev_op_flow_steering,
+ const struct gve_device_option_rss_config
+ *dev_op_rss_config,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -931,6 +961,14 @@ static void gve_enable_supported_features(struct gve_priv *priv,
priv->max_flow_rules);
}
}
+
+ if (dev_op_rss_config &&
+ (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
+ priv->rss_key_size =
+ be16_to_cpu(dev_op_rss_config->hash_key_size);
+ priv->rss_lut_size =
+ be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -939,6 +977,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
+ struct gve_device_option_rss_config *dev_op_rss_config = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -973,6 +1012,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_jumbo_frames, &dev_op_dqo_qpl,
&dev_op_buffer_sizes,
&dev_op_flow_steering,
+ &dev_op_rss_config,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1035,7 +1075,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_modify_ring);
+ dev_op_rss_config, dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1248,6 +1288,81 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
}
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ dma_addr_t lut_bus = 0, key_bus = 0;
+ u16 key_size = 0, lut_size = 0;
+ union gve_adminq_command cmd;
+ __be32 *lut = NULL;
+ u8 hash_alg = 0;
+ u8 *key = NULL;
+ int err = 0;
+ u16 i;
+
+ switch (rxfh->hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ break;
+ case ETH_RSS_HASH_TOP:
+ hash_alg = ETH_RSS_HASH_TOP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rxfh->indir) {
+ lut_size = priv->rss_lut_size;
+ lut = dma_alloc_coherent(&priv->pdev->dev,
+ lut_size * sizeof(*lut),
+ &lut_bus, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ lut[i] = cpu_to_be32(rxfh->indir[i]);
+ }
+
+ if (rxfh->key) {
+ key_size = priv->rss_key_size;
+ key = dma_alloc_coherent(&priv->pdev->dev,
+ key_size, &key_bus, GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(key, rxfh->key, key_size);
+ }
+
+ /* Zero-valued fields in the cmd.configure_rss instruct the device to
+ * not update those fields.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
+ cmd.configure_rss = (struct gve_adminq_configure_rss) {
+ .hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) |
+ BIT(GVE_RSS_HASH_UDPV4) |
+ BIT(GVE_RSS_HASH_TCPV6) |
+ BIT(GVE_RSS_HASH_UDPV6)),
+ .hash_alg = hash_alg,
+ .hash_key_size = cpu_to_be16(key_size),
+ .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_addr = cpu_to_be64(key_bus),
+ .hash_lut_addr = cpu_to_be64(lut_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+out:
+ if (lut)
+ dma_free_coherent(&priv->pdev->dev,
+ lut_size * sizeof(*lut),
+ lut, lut_bus);
+ if (key)
+ dma_free_coherent(&priv->pdev->dev,
+ key_size, key, key_bus);
+ return err;
+}
+
/* In the dma memory that the driver allocated for the device to query the flow rules, the device
* will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
* will write an array of rules or rule ids with the count that specified in the descriptor.
@@ -1325,3 +1440,66 @@ out:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
return err;
}
+
+static int gve_adminq_process_rss_query(struct gve_priv *priv,
+ struct gve_query_rss_descriptor *descriptor,
+ struct ethtool_rxfh_param *rxfh)
+{
+ u32 total_memory_length;
+ u16 hash_lut_length;
+ void *rss_info_addr;
+ __be32 *lut;
+ u16 i;
+
+ total_memory_length = be32_to_cpu(descriptor->total_length);
+ hash_lut_length = priv->rss_lut_size * sizeof(*rxfh->indir);
+
+ if (sizeof(*descriptor) + priv->rss_key_size + hash_lut_length != total_memory_length) {
+ dev_err(&priv->dev->dev,
+ "rss query desc from device has invalid length parameter.\n");
+ return -EINVAL;
+ }
+
+ rxfh->hfunc = descriptor->hash_alg;
+
+ rss_info_addr = (void *)(descriptor + 1);
+ if (rxfh->key)
+ memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+
+ rss_info_addr += priv->rss_key_size;
+ lut = (__be32 *)rss_info_addr;
+ if (rxfh->indir) {
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rxfh->indir[i] = be32_to_cpu(lut[i]);
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_query_rss_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_RSS);
+ cmd.query_rss = (struct gve_adminq_query_rss) {
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rss_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_rss_query(priv, descriptor, rxfh);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index ed1370c9b197..863683de9694 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -20,12 +20,14 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7,
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
+ GVE_ADMINQ_CONFIGURE_RSS = 0xA,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
GVE_ADMINQ_REPORT_STATS = 0xC,
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
@@ -164,6 +166,14 @@ struct gve_device_option_flow_steering {
static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+struct gve_device_option_rss_config {
+ __be32 supported_features_mask;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+};
+
+static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -182,6 +192,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
enum gve_dev_opt_req_feat_mask {
@@ -194,6 +205,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
};
enum gve_sup_feature_mask {
@@ -201,6 +213,7 @@ enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
+ GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -214,6 +227,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
gve_driver_capability_flexible_buffer_size = 5,
+ gve_driver_capability_flexible_rss_size = 6,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -226,7 +240,8 @@ enum gve_driver_capbility {
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
- GVE_CAP1(gve_driver_capability_flexible_buffer_size))
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \
+ GVE_CAP1(gve_driver_capability_flexible_rss_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -509,6 +524,44 @@ struct gve_adminq_query_flow_rules {
static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
+enum gve_rss_hash_type {
+ GVE_RSS_HASH_IPV4,
+ GVE_RSS_HASH_TCPV4,
+ GVE_RSS_HASH_IPV6,
+ GVE_RSS_HASH_IPV6_EX,
+ GVE_RSS_HASH_TCPV6,
+ GVE_RSS_HASH_TCPV6_EX,
+ GVE_RSS_HASH_UDPV4,
+ GVE_RSS_HASH_UDPV6,
+ GVE_RSS_HASH_UDPV6_EX,
+};
+
+struct gve_adminq_configure_rss {
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+ __be16 hash_key_size;
+ __be16 hash_lut_size;
+ __be64 hash_key_addr;
+ __be64 hash_lut_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_rss) == 24);
+
+struct gve_query_rss_descriptor {
+ __be32 total_length;
+ __be16 hash_types;
+ u8 hash_alg;
+ u8 reserved;
+};
+
+struct gve_adminq_query_rss {
+ __be64 available_length;
+ __be64 rss_descriptor_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_query_rss) == 16);
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -530,6 +583,8 @@ union gve_adminq_command {
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
struct gve_adminq_query_flow_rules query_flow_rules;
+ struct gve_adminq_configure_rss configure_rss;
+ struct gve_adminq_query_rss query_rss;
struct gve_adminq_extended_command extended_command;
};
};
@@ -568,6 +623,8 @@ int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule
int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
+int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 5a8b490ab3ad..bdfc6e77b2af 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -75,7 +75,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
- "adminq_query_flow_rules", "adminq_cfg_flow_rule",
+ "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
+ "adminq_query_rss_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -453,6 +454,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_get_ptype_map_cnt;
data[i++] = priv->adminq_query_flow_rules_cnt;
data[i++] = priv->adminq_cfg_flow_rule_cnt;
+ data[i++] = priv->adminq_cfg_rss_cnt;
+ data[i++] = priv->adminq_query_rss_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -838,6 +841,41 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
return err;
}
+static u32 gve_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_key_size;
+}
+
+static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ return priv->rss_lut_size;
+}
+
+static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_query_rss_config(priv, rxfh);
+}
+
+static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (!priv->rss_key_size || !priv->rss_lut_size)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_configure_rss(priv, rxfh);
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -851,6 +889,10 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_channels = gve_get_channels,
.set_rxnfc = gve_set_rxnfc,
.get_rxnfc = gve_get_rxnfc,
+ .get_rxfh_indir_size = gve_get_rxfh_indir_size,
+ .get_rxfh_key_size = gve_get_rxfh_key_size,
+ .get_rxfh = gve_get_rxfh,
+ .set_rxfh = gve_set_rxfh,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b91e7a06b97f..beb815e5289b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -947,6 +947,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->tx_coalesce_timer.function = tx_done;
priv->map = syscon_node_to_regmap(arg.np);
+ of_node_put(arg.np);
if (IS_ERR(priv->map)) {
dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
ret = PTR_ERR(priv->map);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f75668c47935..58baac7103b3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -734,7 +734,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
return -ENODATA;
phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR_OR_NULL(phy))
return -EIO;
phy->irq = mdio->irq[addr];
@@ -933,6 +933,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
mac_cb->cpld_ctrl = NULL;
} else {
syscon = syscon_node_to_regmap(cpld_args.np);
+ of_node_put(cpld_args.np);
if (IS_ERR_OR_NULL(syscon)) {
dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
mac_cb->cpld_ctrl = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 6c33195a1168..bd86efd92a5a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,8 +13,9 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
-#include <net/ipv6.h>
+
#include <net/rtnetlink.h>
+
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
@@ -6290,15 +6291,15 @@ static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule, u8 ip_proto)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst);
rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
@@ -6319,15 +6320,15 @@ static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
- be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src);
- be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
- IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst);
rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
@@ -6756,21 +6757,19 @@ static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
struct ethtool_tcpip6_spec *spec,
struct ethtool_tcpip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src,
- rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst,
- rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
- IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -6789,19 +6788,19 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
struct ethtool_usrip6_spec *spec,
struct ethtool_usrip6_spec *spec_mask)
{
- cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
- cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec->ip6src, rule->tuples.src_ip);
+ ipv6_addr_cpu_to_be32(spec->ip6dst, rule->tuples.dst_ip);
if (rule->unused_tuple & BIT(INNER_SRC_IP))
memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
else
- cpu_to_be32_array(spec_mask->ip6src,
- rule->tuples_mask.src_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6src,
+ rule->tuples_mask.src_ip);
if (rule->unused_tuple & BIT(INNER_DST_IP))
memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
else
- cpu_to_be32_array(spec_mask->ip6dst,
- rule->tuples_mask.dst_ip, IPV6_SIZE);
+ ipv6_addr_cpu_to_be32(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip);
spec->tclass = rule->tuples.ip_tos;
spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
@@ -7019,7 +7018,7 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
} else {
int i;
- for (i = 0; i < IPV6_SIZE; i++) {
+ for (i = 0; i < IPV6_ADDR_WORDS; i++) {
tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
}
@@ -7274,14 +7273,14 @@ static int hclge_get_cls_key_ip(const struct flow_rule *flow,
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(flow, &match);
- be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.src_ip,
- match.mask->src.s6_addr32, IPV6_SIZE);
- be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
- IPV6_SIZE);
- be32_to_cpu_array(rule->tuples_mask.dst_ip,
- match.mask->dst.s6_addr32, IPV6_SIZE);
+ ipv6_addr_be32_to_cpu(rule->tuples.src_ip,
+ match.key->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.src_ip,
+ match.mask->src.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples.dst_ip,
+ match.key->dst.s6_addr32);
+ ipv6_addr_be32_to_cpu(rule->tuples_mask.dst_ip,
+ match.mask->dst.s6_addr32);
} else {
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b5178b0f88b3..b9fc719880bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -8,7 +8,9 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
+
#include <net/devlink.h>
+#include <net/ipv6.h>
#include "hclge_cmd.h"
#include "hclge_ptp.h"
@@ -718,15 +720,15 @@ struct hclge_fd_cfg {
};
#define IPV4_INDEX 3
-#define IPV6_SIZE 4
+
struct hclge_fd_rule_tuples {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
/* Be compatible for ip address of both ipv4 and ipv6.
* For ipv4 address, we store it in src/dst_ip[3].
*/
- u32 src_ip[IPV6_SIZE];
- u32 dst_ip[IPV6_SIZE];
+ u32 src_ip[IPV6_ADDR_WORDS];
+ u32 dst_ip[IPV6_ADDR_WORDS];
u16 src_port;
u16 dst_port;
u16 vlan_tag1;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 65b9dcd38137..6db415d8b917 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -134,17 +134,17 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(common_reg_addr_list);
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
for (i = 0; i < reg_um; i++)
@@ -153,7 +153,7 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
HCLGEVF_RING_REG_OFFSET * j);
}
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
for (i = 0; i < reg_um; i++)
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index ed73707176c1..8a047145f0c5 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -575,6 +575,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
MDIO_SC_RESET_ST;
}
}
+ of_node_put(reg_args.np);
} else {
dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
mdio_dev->subctrl_vbase = NULL;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 0304f03d4093..c559dd4291d3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1471,7 +1471,6 @@ static void hinic_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- char *p = (char *)data;
u16 i, j;
switch (stringset) {
@@ -1479,31 +1478,19 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
- memcpy(p, hinic_function_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++)
+ ethtool_puts(&data, hinic_function_stats[i].name);
- for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
- memcpy(p, hinic_port_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++)
+ ethtool_puts(&data, hinic_port_stats[i].name);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
- sprintf(p, hinic_tx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_tx_queue_stats[j].name, i);
- for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
- sprintf(p, hinic_rx_queue_stats[j].name, i);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < nic_dev->num_qps; i++)
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++)
+ ethtool_sprintf(&data, hinic_rx_queue_stats[j].name, i);
return;
default:
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 1e29e5c9a2df..c41c3f1cc506 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3063,14 +3063,13 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
static int ehea_setup_ports(struct ehea_adapter *adapter)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
int i = 0;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (!dn_log_port_id) {
@@ -3102,12 +3101,11 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
u32 logical_port_id)
{
struct device_node *lhea_dn;
- struct device_node *eth_dn = NULL;
+ struct device_node *eth_dn;
const u32 *dn_log_port_id;
lhea_dn = adapter->ofdev->dev.of_node;
- while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
-
+ for_each_child_of_node(lhea_dn, eth_dn) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
NULL);
if (dn_log_port_id)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4c9d9badd698..b619a3ec245b 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -39,7 +39,8 @@
#include "ibmveth.h"
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse);
static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
static struct kobj_type ktype_veth_pool;
@@ -226,6 +227,16 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
for (i = 0; i < count; ++i) {
union ibmveth_buf_desc desc;
+ free_index = pool->consumer_index;
+ index = pool->free_map[free_index];
+ skb = NULL;
+
+ BUG_ON(index == IBM_VETH_INVALID_MAP);
+
+ /* are we allocating a new buffer or recycling an old one */
+ if (pool->skbuff[index])
+ goto reuse;
+
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
@@ -235,46 +246,46 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
break;
}
- free_index = pool->consumer_index;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
- index = pool->free_map[free_index];
-
- BUG_ON(index == IBM_VETH_INVALID_MAP);
- BUG_ON(pool->skbuff[index] != NULL);
-
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto failure;
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
pool->skbuff[index] = skb;
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)skb->data = correlator;
-
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
if (rx_flush) {
unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
+ adapter->netdev->mtu +
+ IBMVETH_BUFF_OH);
ibmveth_flush_buffer(skb->data, len);
}
+reuse:
+ dma_addr = pool->dma_addr[index];
+ desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
+ desc.fields.address = dma_addr;
+
+ correlator = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlator;
+
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
desc.desc);
if (lpar_rc != H_SUCCESS) {
+ netdev_warn(adapter->netdev,
+ "%sadd_logical_lan failed %lu\n",
+ skb ? "" : "When recycling: ", lpar_rc);
goto failure;
- } else {
- buffers_added++;
- adapter->replenish_add_buff_success++;
}
+
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+
+ buffers_added++;
+ adapter->replenish_add_buff_success++;
}
mb();
@@ -282,17 +293,13 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
return;
failure:
- pool->free_map[free_index] = index;
- pool->skbuff[index] = NULL;
- if (pool->consumer_index == 0)
- pool->consumer_index = pool->size - 1;
- else
- pool->consumer_index--;
- if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
+
+ if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
adapter->replenish_add_buff_failure++;
mb();
@@ -365,7 +372,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
/* remove a buffer from a pool */
static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
- u64 correlator)
+ u64 correlator, bool reuse)
{
unsigned int pool = correlator >> 32;
unsigned int index = correlator & 0xffffffffUL;
@@ -376,15 +383,23 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
BUG_ON(index >= adapter->rx_buff_pool[pool].size);
skb = adapter->rx_buff_pool[pool].skbuff[index];
-
BUG_ON(skb == NULL);
- adapter->rx_buff_pool[pool].skbuff[index] = NULL;
+ /* if we are going to reuse the buffer then keep the pointers around
+ * but mark index as available. replenish will see the skb pointer and
+ * assume it is to be recycled.
+ */
+ if (!reuse) {
+ /* remove the skb pointer to mark free. actual freeing is done
+ * by upper level networking after gro_recieve
+ */
+ adapter->rx_buff_pool[pool].skbuff[index] = NULL;
- dma_unmap_single(&adapter->vdev->dev,
- adapter->rx_buff_pool[pool].dma_addr[index],
- adapter->rx_buff_pool[pool].buff_size,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_buff_pool[pool].dma_addr[index],
+ adapter->rx_buff_pool[pool].buff_size,
+ DMA_FROM_DEVICE);
+ }
free_index = adapter->rx_buff_pool[pool].producer_index;
adapter->rx_buff_pool[pool].producer_index++;
@@ -411,51 +426,13 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
return adapter->rx_buff_pool[pool].skbuff[index];
}
-/* recycle the current buffer on the rx queue */
-static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
+static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter,
+ bool reuse)
{
- u32 q_index = adapter->rx_queue.index;
- u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
- unsigned int pool = correlator >> 32;
- unsigned int index = correlator & 0xffffffffUL;
- union ibmveth_buf_desc desc;
- unsigned long lpar_rc;
- int ret = 1;
-
- BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
- BUG_ON(index >= adapter->rx_buff_pool[pool].size);
-
- if (!adapter->rx_buff_pool[pool].active) {
- ibmveth_rxq_harvest_buffer(adapter);
- ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
- goto out;
- }
-
- desc.fields.flags_len = IBMVETH_BUF_VALID |
- adapter->rx_buff_pool[pool].buff_size;
- desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
-
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-
- if (lpar_rc != H_SUCCESS) {
- netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
- "during recycle rc=%ld", lpar_rc);
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
- ret = 0;
- }
-
- if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
- adapter->rx_queue.index = 0;
- adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
- }
-
-out:
- return ret;
-}
+ u64 cor;
-static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
-{
- ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
+ cor = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
+ ibmveth_remove_buffer_from_pool(adapter, cor, reuse);
if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
adapter->rx_queue.index = 0;
@@ -1337,6 +1314,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
+restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1346,7 +1324,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
netdev_dbg(netdev, "recycling invalid buffer\n");
- ibmveth_rxq_recycle_buffer(adapter);
+ ibmveth_rxq_harvest_buffer(adapter, true);
} else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter);
@@ -1379,11 +1357,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
if (rx_flush)
ibmveth_flush_buffer(skb->data,
length + offset);
- if (!ibmveth_rxq_recycle_buffer(adapter))
- kfree_skb(skb);
+ ibmveth_rxq_harvest_buffer(adapter, true);
skb = new_skb;
} else {
- ibmveth_rxq_harvest_buffer(adapter);
+ ibmveth_rxq_harvest_buffer(adapter, false);
skb_reserve(skb, offset);
}
@@ -1420,24 +1397,25 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
ibmveth_replenish_task(adapter);
- if (frames_processed < budget) {
- napi_complete_done(napi, frames_processed);
+ if (frames_processed == budget)
+ goto out;
- /* We think we are done - reenable interrupts,
- * then check once more to make sure we are done.
- */
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_ENABLE);
+ if (!napi_complete_done(napi, frames_processed))
+ goto out;
- BUG_ON(lpar_rc != H_SUCCESS);
+ /* We think we are done - reenable interrupts,
+ * then check once more to make sure we are done.
+ */
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
+ BUG_ON(lpar_rc != H_SUCCESS);
- if (ibmveth_rxq_pending_buffer(adapter) &&
- napi_schedule(napi)) {
- lpar_rc = h_vio_signal(adapter->vdev->unit_address,
- VIO_IRQ_DISABLE);
- }
+ if (ibmveth_rxq_pending_buffer(adapter) && napi_schedule(napi)) {
+ lpar_rc = h_vio_signal(adapter->vdev->unit_address,
+ VIO_IRQ_DISABLE);
+ goto restart_poll;
}
+out:
return frames_processed;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 23ebeb143987..87e693a81433 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -117,6 +117,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
static void flush_reset_queue(struct ibmvnic_adapter *adapter);
+static void print_subcrq_error(struct device *dev, int rc, const char *func);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -2140,63 +2141,49 @@ static int ibmvnic_close(struct net_device *netdev)
}
/**
- * build_hdr_data - creates L2/L3/L4 header data buffer
+ * get_hdr_lens - fills list of L2/L3/L4 hdr lens
* @hdr_field: bitfield determining needed headers
* @skb: socket buffer
- * @hdr_len: array of header lengths
- * @hdr_data: buffer to write the header to
+ * @hdr_len: array of header lengths to be filled
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
* lengths and total buffer length to be used to build descriptors.
+ *
+ * Return: total len of all headers
*/
-static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
- int *hdr_len, u8 *hdr_data)
+static int get_hdr_lens(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len)
{
int len = 0;
- u8 *hdr;
- if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
- hdr_len[0] = sizeof(struct vlan_ethhdr);
- else
- hdr_len[0] = sizeof(struct ethhdr);
+
+ if ((hdr_field >> 6) & 1) {
+ hdr_len[0] = skb_mac_header_len(skb);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr_len[1] = skb_network_header_len(skb);
+ len += hdr_len[1];
+ }
+
+ if (!((hdr_field >> 4) & 1))
+ return len;
if (skb->protocol == htons(ETH_P_IP)) {
- hdr_len[1] = ip_hdr(skb)->ihl * 4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- hdr_len[1] = sizeof(struct ipv6hdr);
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
hdr_len[2] = tcp_hdrlen(skb);
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
hdr_len[2] = sizeof(struct udphdr);
- } else if (skb->protocol == htons(ETH_P_ARP)) {
- hdr_len[1] = arp_hdr_len(skb->dev);
- hdr_len[2] = 0;
}
- memset(hdr_data, 0, 120);
- if ((hdr_field >> 6) & 1) {
- hdr = skb_mac_header(skb);
- memcpy(hdr_data, hdr, hdr_len[0]);
- len += hdr_len[0];
- }
-
- if ((hdr_field >> 5) & 1) {
- hdr = skb_network_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[1]);
- len += hdr_len[1];
- }
-
- if ((hdr_field >> 4) & 1) {
- hdr = skb_transport_header(skb);
- memcpy(hdr_data + len, hdr, hdr_len[2]);
- len += hdr_len[2];
- }
- return len;
+ return len + hdr_len[2];
}
/**
@@ -2209,12 +2196,14 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
+ *
+ * Return: Number of header descs
*/
static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
union sub_crq *scrq_arr)
{
- union sub_crq hdr_desc;
+ union sub_crq *hdr_desc;
int tmp_len = len;
int num_descs = 0;
u8 *data, *cur;
@@ -2223,28 +2212,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
while (tmp_len > 0) {
cur = hdr_data + len - tmp_len;
- memset(&hdr_desc, 0, sizeof(hdr_desc));
- if (cur != hdr_data) {
- data = hdr_desc.hdr_ext.data;
+ hdr_desc = &scrq_arr[num_descs];
+ if (num_descs) {
+ data = hdr_desc->hdr_ext.data;
tmp = tmp_len > 29 ? 29 : tmp_len;
- hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
- hdr_desc.hdr_ext.len = tmp;
+ hdr_desc->hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc->hdr_ext.len = tmp;
} else {
- data = hdr_desc.hdr.data;
+ data = hdr_desc->hdr.data;
tmp = tmp_len > 24 ? 24 : tmp_len;
- hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
- hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
- hdr_desc.hdr.len = tmp;
- hdr_desc.hdr.l2_len = (u8)hdr_len[0];
- hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
- hdr_desc.hdr.l4_len = (u8)hdr_len[2];
- hdr_desc.hdr.flag = hdr_field << 1;
+ hdr_desc->hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc->hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc->hdr.len = tmp;
+ hdr_desc->hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc->hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc->hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc->hdr.flag = hdr_field << 1;
}
memcpy(data, cur, tmp);
tmp_len -= tmp;
- *scrq_arr = hdr_desc;
- scrq_arr++;
num_descs++;
}
@@ -2267,13 +2254,11 @@ static void build_hdr_descs_arr(struct sk_buff *skb,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- u8 hdr_data[140] = {0};
int tot_len;
- tot_len = build_hdr_data(hdr_field, skb, hdr_len,
- hdr_data);
- *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- indir_arr + 1);
+ tot_len = get_hdr_lens(hdr_field, skb, hdr_len);
+ *num_entries += create_hdr_descs(hdr_field, skb_mac_header(skb),
+ tot_len, hdr_len, indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
@@ -2350,8 +2335,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
}
}
+static int send_subcrq_direct(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 *entry)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ dma_wmb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
+ cpu_to_be64(remote_handle),
+ cpu_to_be64(entry[0]), cpu_to_be64(entry[1]),
+ cpu_to_be64(entry[2]), cpu_to_be64(entry[3]));
+
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
+
+ return rc;
+}
+
static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *tx_scrq)
+ struct ibmvnic_sub_crq_queue *tx_scrq,
+ bool indirect)
{
struct ibmvnic_ind_xmit_queue *ind_bufp;
u64 dma_addr;
@@ -2366,7 +2372,13 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
if (!entries)
return 0;
- rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+
+ if (indirect)
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+ else
+ rc = send_subcrq_direct(adapter, handle,
+ (u64 *)ind_bufp->indir_arr);
+
if (rc)
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
else
@@ -2397,6 +2409,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ bool use_scrq_send_direct = false;
int num_entries = 1;
unsigned char *dst;
int bufidx = 0;
@@ -2424,7 +2437,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
goto out;
@@ -2442,7 +2455,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
goto out;
@@ -2456,6 +2469,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memset(dst, 0, tx_pool->buf_size);
data_dma_addr = ltb->addr + offset;
+ /* if we are going to send_subcrq_direct this then we need to
+ * update the checksum before copying the data into ltb. Essentially
+ * these packets force disable CSO so that we can guarantee that
+ * FW does not need header info and we can send direct.
+ */
+ if (!skb_is_gso(skb) && !ind_bufp->index && !netdev_xmit_more()) {
+ use_scrq_send_direct = true;
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb))
+ use_scrq_send_direct = false;
+ }
+
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -2475,9 +2500,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
- /* post changes to long_term_buff *dst before VIOS accessing it */
- dma_wmb();
-
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
@@ -2540,6 +2562,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
+ } else if (use_scrq_send_direct) {
+ /* See above comment, CSO disabled with direct xmit */
+ tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD);
+ ind_bufp->index = 1;
+ tx_buff->num_entries = 1;
+ netdev_tx_sent_queue(txq, skb->len);
+ ind_bufp->indir_arr[0] = tx_crq;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
+
+ goto early_exit;
}
if ((*hdrs >> 7) & 1)
@@ -2549,7 +2583,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
}
@@ -2557,15 +2591,17 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
indir_arr[0] = tx_crq;
memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
num_entries * sizeof(struct ibmvnic_generic_scrq));
+
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
- lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
}
+early_exit:
if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
@@ -3527,9 +3563,8 @@ restart_poll:
}
if (adapter->state != VNIC_CLOSING &&
- ((atomic_read(&adapter->rx_pool[scrq_num].available) <
- adapter->req_rx_add_entries_per_subcrq / 2) ||
- frames_processed < budget))
+ (atomic_read(&adapter->rx_pool[scrq_num].available) <
+ adapter->req_rx_add_entries_per_subcrq / 2))
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
if (napi_complete_done(napi, frames_processed)) {
@@ -4169,20 +4204,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
struct device *dev = &adapter->vdev->dev;
+ int num_packets = 0, total_bytes = 0;
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_buff *txbuff;
struct netdev_queue *txq;
union sub_crq *next;
- int index;
- int i;
+ int index, i;
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
- int total_bytes = 0;
- int num_packets = 0;
-
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
index = be32_to_cpu(next->tx_comp.correlators[i]);
@@ -4218,8 +4250,6 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
- txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
- netdev_tx_completed_queue(txq, num_packets, total_bytes);
if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
@@ -4244,6 +4274,9 @@ restart_loop:
goto restart_loop;
}
+ txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
+ netdev_tx_completed_queue(txq, num_packets, total_bytes);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 23a6557fc3db..48cd1d06761c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -33,6 +33,7 @@
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_skbedit.h>
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
@@ -393,6 +394,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_CRC)
+#define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_TC_U32)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@@ -437,6 +440,7 @@ struct iavf_adapter {
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
+ u16 raw_fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
@@ -444,6 +448,32 @@ struct iavf_adapter {
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
+/* Must be called with fdir_fltr_lock lock held */
+static inline bool iavf_fdir_max_reached(struct iavf_adapter *adapter)
+{
+ return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >=
+ IAVF_MAX_FDIR_FILTERS;
+}
+
+static inline void
+iavf_inc_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr++;
+ else
+ adapter->fdir_active_fltr++;
+}
+
+static inline void
+iavf_dec_fdir_active_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (iavf_is_raw_fdir(fltr))
+ adapter->raw_fdir_active_fltr--;
+ else
+ adapter->fdir_active_fltr--;
+}
/* Ethtool Private Flags */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 52273f7eab2c..74a1e9fe1821 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -927,7 +927,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
spin_lock_bh(&adapter->fdir_fltr_lock);
- rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
if (!rule) {
ret = -EINVAL;
goto release_lock;
@@ -1072,6 +1072,9 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (cnt == cmd->rule_cnt) {
val = -EMSGSIZE;
goto release_lock;
@@ -1263,15 +1266,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return -EINVAL;
spin_lock_bh(&adapter->fdir_fltr_lock);
- if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
- spin_unlock_bh(&adapter->fdir_fltr_lock);
- dev_err(&adapter->pdev->dev,
- "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
- IAVF_MAX_FDIR_FILTERS);
- return -ENOSPC;
- }
-
- if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
return -EEXIST;
@@ -1291,23 +1286,10 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
}
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
- if (err)
- goto ret;
-
- spin_lock_bh(&adapter->fdir_fltr_lock);
- iavf_fdir_list_add_fltr(adapter, fltr);
- adapter->fdir_active_fltr++;
-
- if (adapter->link_up)
- fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
- else
- fltr->state = IAVF_FDIR_FLTR_INACTIVE;
- spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!err)
+ err = iavf_fdir_add_fltr(adapter, fltr);
- if (adapter->link_up)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
-ret:
- if (err && fltr)
+ if (err)
kfree(fltr);
mutex_unlock(&adapter->crit_lock);
@@ -1324,34 +1306,11 @@ ret:
static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
- struct iavf_fdir_fltr *fltr = NULL;
- int err = 0;
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->fdir_fltr_lock);
- fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
- if (fltr) {
- if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
- fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
- list_del(&fltr->list);
- kfree(fltr);
- adapter->fdir_active_fltr--;
- fltr = NULL;
- } else {
- err = -EBUSY;
- }
- } else if (adapter->fdir_active_fltr) {
- err = -EINVAL;
- }
- spin_unlock_bh(&adapter->fdir_fltr_lock);
-
- if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
-
- return err;
+ return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 2d47b0b4640e..a1b3b44cc14a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -796,6 +796,9 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ continue;
+
if (tmp->flow_type != fltr->flow_type)
continue;
@@ -815,33 +818,52 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
}
/**
- * iavf_find_fdir_fltr_by_loc - find filter with location
+ * iavf_find_fdir_fltr - find FDIR filter
* @adapter: pointer to the VF adapter structure
- * @loc: location to find.
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
*
- * Returns pointer to Flow Director filter if found or null
+ * Returns: pointer to Flow Director filter if found or NULL. Lock must be held.
*/
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data)
{
struct iavf_fdir_fltr *rule;
- list_for_each_entry(rule, &adapter->fdir_list_head, list)
- if (rule->loc == loc)
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if ((is_raw && rule->cls_u32_handle == data) ||
+ (!is_raw && rule->loc == data))
return rule;
+ }
return NULL;
}
/**
- * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * iavf_fdir_add_fltr - add a new node to the flow director filter list
* @adapter: pointer to the VF adapter structure
* @fltr: filter node to add to structure
+ *
+ * Return: 0 on success or negative errno on failure.
*/
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *rule, *parent = NULL;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_fdir_max_reached(adapter)) {
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter (limit (%u) reached)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (iavf_is_raw_fdir(fltr))
+ break;
+
if (rule->loc >= fltr->loc)
break;
parent = rule;
@@ -851,4 +873,55 @@ void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr
list_add(&fltr->list, &parent->list);
else
list_add(&fltr->list, &adapter->fdir_list_head);
+
+ iavf_inc_fdir_active_fltr(adapter, fltr);
+
+ if (adapter->link_up)
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ else
+ fltr->state = IAVF_FDIR_FLTR_INACTIVE;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (adapter->link_up)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_del_fltr - delete a flow director filter from the list
+ * @adapter: pointer to the VF adapter structure
+ * @is_raw: filter type, is raw (tc u32) or not (ethtool)
+ * @data: data to ID the filter, type dependent
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data)
+{
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr(adapter, is_raw, data);
+
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
+ list_del(&fltr->list);
+ iavf_dec_fdir_active_fltr(adapter, fltr);
+ kfree(fltr);
+ fltr = NULL;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
+
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return err;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
index d31bd923ba8c..e84a5351162f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -117,17 +117,26 @@ struct iavf_fdir_fltr {
u32 flow_id;
+ u32 cls_u32_handle; /* for FDIR added via tc u32 */
u32 loc; /* Rule location inside the flow table */
u32 q_index;
struct virtchnl_fdir_add vc_add_msg;
};
+static inline bool iavf_is_raw_fdir(struct iavf_fdir_fltr *fltr)
+{
+ return !fltr->vc_add_msg.rule_cfg.proto_hdrs.count;
+}
+
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
-struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
+ struct iavf_fdir_fltr *fltr);
+int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
+ bool is_raw, u32 data);
#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index ff11bafb3b4f..f782402cd789 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4013,7 +4013,7 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @adapter: board private structure
+ * @adapter: pointer to iavf adapter structure
* @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
@@ -4032,6 +4032,154 @@ static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
}
/**
+ * iavf_add_cls_u32 - Add U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_add_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ struct netlink_ext_ack *extack = cls_u32->common.extack;
+ struct virtchnl_fdir_rule *rule_cfg;
+ struct virtchnl_filter_action *vact;
+ struct virtchnl_proto_hdrs *hdrs;
+ struct ethhdr *spec_h, *mask_h;
+ const struct tc_action *act;
+ struct iavf_fdir_fltr *fltr;
+ struct tcf_exts *exts;
+ unsigned int q_index;
+ int i, status = 0;
+ int off_base = 0;
+
+ if (cls_u32->knode.link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "Linking not supported");
+ return -EOPNOTSUPP;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ rule_cfg = &fltr->vc_add_msg.rule_cfg;
+ hdrs = &rule_cfg->proto_hdrs;
+ hdrs->count = 0;
+
+ /* The parser lib at the PF expects the packet starting with MAC hdr */
+ switch (ntohs(cls_u32->common.protocol)) {
+ case ETH_P_802_3:
+ break;
+ case ETH_P_IP:
+ spec_h = (struct ethhdr *)hdrs->raw.spec;
+ mask_h = (struct ethhdr *)hdrs->raw.mask;
+ spec_h->h_proto = htons(ETH_P_IP);
+ mask_h->h_proto = htons(0xFFFF);
+ off_base += ETH_HLEN;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Only 802_3 and ip filter protocols are supported");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ for (i = 0; i < cls_u32->knode.sel->nkeys; i++) {
+ __be32 val, mask;
+ int off;
+
+ off = off_base + cls_u32->knode.sel->keys[i].off;
+ val = cls_u32->knode.sel->keys[i].val;
+ mask = cls_u32->knode.sel->keys[i].mask;
+
+ if (off >= sizeof(hdrs->raw.spec)) {
+ NL_SET_ERR_MSG_MOD(extack, "Input exceeds maximum allowed.");
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ memcpy(&hdrs->raw.spec[off], &val, sizeof(val));
+ memcpy(&hdrs->raw.mask[off], &mask, sizeof(mask));
+ hdrs->raw.pkt_len = off + sizeof(val);
+ }
+
+ /* Only one action is allowed */
+ rule_cfg->action_set.count = 1;
+ vact = &rule_cfg->action_set.actions[0];
+ exts = cls_u32->knode.exts;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ /* FDIR queue */
+ if (is_tcf_skbedit_rx_queue_mapping(act)) {
+ q_index = tcf_skbedit_rx_queue_mapping(act);
+ if (q_index >= adapter->num_active_queues) {
+ status = -EINVAL;
+ goto free_alloc;
+ }
+
+ vact->type = VIRTCHNL_ACTION_QUEUE;
+ vact->act_conf.queue.index = q_index;
+ break;
+ }
+
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ vact->type = VIRTCHNL_ACTION_DROP;
+ break;
+ }
+
+ /* Unsupported action */
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action.");
+ status = -EOPNOTSUPP;
+ goto free_alloc;
+ }
+
+ fltr->vc_add_msg.vsi_id = adapter->vsi.id;
+ fltr->cls_u32_handle = cls_u32->knode.handle;
+ return iavf_fdir_add_fltr(adapter, fltr);
+
+free_alloc:
+ kfree(fltr);
+ return status;
+}
+
+/**
+ * iavf_del_cls_u32 - Delete U32 classifier offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_del_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ return iavf_fdir_del_fltr(adapter, true, cls_u32->knode.handle);
+}
+
+/**
+ * iavf_setup_tc_cls_u32 - U32 filter offloads
+ * @adapter: pointer to iavf adapter structure
+ * @cls_u32: pointer to tc_cls_u32_offload struct with flow info
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int iavf_setup_tc_cls_u32(struct iavf_adapter *adapter,
+ struct tc_cls_u32_offload *cls_u32)
+{
+ if (!TC_U32_SUPPORT(adapter) || !FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return iavf_add_cls_u32(adapter, cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return iavf_del_cls_u32(adapter, cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* iavf_setup_tc_block_cb - block callback for tc
* @type: type of offload
* @type_data: offload data
@@ -4050,6 +4198,8 @@ static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
return iavf_setup_tc_cls_flower(cb_priv, type_data);
+ case TC_SETUP_CLSU32:
+ return iavf_setup_tc_cls_u32(cb_priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -4332,8 +4482,8 @@ static void iavf_disable_fdir(struct iavf_adapter *adapter)
fdir->state == IAVF_FDIR_FLTR_INACTIVE) {
/* Delete filters not registered in PF */
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING ||
fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
fdir->state == IAVF_FDIR_FLTR_ACTIVE) {
@@ -4843,9 +4993,11 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* get HW VLAN features that can be toggled */
hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
- /* Enable cloud filter if ADQ is supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+ /* Enable HW TC offload if ADQ or tc U32 is supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ ||
+ TC_U32_SUPPORT(adapter))
hw_features |= NETIF_F_HW_TC;
+
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 1e543f6a7c30..7e810b65380c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -142,6 +142,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
@@ -1961,8 +1962,8 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
* list on PF is already cleared after a reset
*/
list_del(&f->list);
+ iavf_dec_fdir_active_fltr(adapter, f);
kfree(f);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2135,8 +2136,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
dev_err(&adapter->pdev->dev,
"%s\n", msg);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -2451,8 +2452,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
fdir->flow_id = add_fltr->flow_id;
} else {
@@ -2460,8 +2465,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
add_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
}
}
}
@@ -2479,11 +2484,15 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
del_fltr->status ==
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
- dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
- fdir->loc);
+ if (!iavf_is_raw_fdir(fdir))
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ else
+ dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
+ TC_U32_USERHTID(fdir->cls_u32_handle));
list_del(&fdir->list);
+ iavf_dec_fdir_active_fltr(adapter, fdir);
kfree(fdir);
- adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 03500e28ac99..b4f6fa4ba13d 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -28,6 +28,8 @@ ice-y := ice_main.o \
ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_parser.o \
+ ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
devlink/devlink_port.o \
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 66f02988d549..0be1a98d7cc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -2632,12 +2632,16 @@ struct ice_aq_desc {
/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
#define ICE_AQ_LG_BUF 512
+#define ICE_AQ_FLAG_DD_S 0
+#define ICE_AQ_FLAG_CMP_S 1
#define ICE_AQ_FLAG_ERR_S 2
#define ICE_AQ_FLAG_LB_S 9
#define ICE_AQ_FLAG_RD_S 10
#define ICE_AQ_FLAG_BUF_S 12
#define ICE_AQ_FLAG_SI_S 13
+#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
+#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 66f29bac783a..27208a60cece 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -10,6 +10,7 @@
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
+#include "ice_parser.h"
#include <linux/avf/virtchnl.h>
#include "ice_switch.h"
#include "ice_fdir.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffaa6511c455..e3959ad442a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -99,17 +99,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return -ENOMEM;
cq->sq.desc_buf.size = size;
- cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
- sizeof(struct ice_sq_cd), GFP_KERNEL);
- if (!cq->sq.cmd_buf) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
- return -ENOMEM;
- }
-
return 0;
}
@@ -188,7 +177,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->rq_buf_size > ICE_AQ_LG_BUF)
desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
desc->opcode = 0;
- /* This is in accordance with Admin queue design, there is no
+ /* This is in accordance with control queue design, there is no
* register for buffer size configuration
*/
desc->datalen = cpu_to_le16(bi->size);
@@ -338,8 +327,6 @@ do { \
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
} \
- /* free the buffer info list */ \
- devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0)
@@ -405,11 +392,11 @@ init_ctrlq_exit:
}
/**
- * ice_init_rq - initialize ARQ
+ * ice_init_rq - initialize receive side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * The main initialization routine for the Admin Receive (Event) Queue.
+ * The main initialization routine for Receive side of a control queue.
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
@@ -465,7 +452,7 @@ init_ctrlq_exit:
}
/**
- * ice_shutdown_sq - shutdown the Control ATQ
+ * ice_shutdown_sq - shutdown the transmit side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -482,7 +469,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto shutdown_sq_out;
}
- /* Stop firmware AdminQ processing */
+ /* Stop processing of the control queue */
wr32(hw, cq->sq.head, 0);
wr32(hw, cq->sq.tail, 0);
wr32(hw, cq->sq.len, 0);
@@ -501,7 +488,7 @@ shutdown_sq_out:
}
/**
- * ice_aq_ver_check - Check the reported AQ API version.
+ * ice_aq_ver_check - Check the reported AQ API version
* @hw: pointer to the hardware structure
*
* Checks if the driver should load on a given AQ API version.
@@ -521,14 +508,20 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
} else if (hw->api_maj_ver == exp_fw_api_ver_major) {
if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
} else {
/* Major API version is older than expected, log a warning */
dev_info(ice_hw_to_dev(hw),
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
}
return true;
}
@@ -855,7 +848,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
}
/**
- * ice_clean_sq - cleans Admin send queue (ATQ)
+ * ice_clean_sq - cleans send side of a control queue
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
@@ -865,21 +858,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_sq_cd *details;
struct ice_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
memset(desc, 0, sizeof(*desc));
- memset(details, 0, sizeof(*details));
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- details = ICE_CTL_Q_DETAILS(*sq, ntc);
}
sq->next_to_clean = ntc;
@@ -888,18 +877,43 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
+ * ice_ctl_q_str - Convert control queue type to string
+ * @qtype: the control queue type
+ *
+ * Return: A string name for the given control queue type.
+ */
+static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
+{
+ switch (qtype) {
+ case ICE_CTL_Q_UNKNOWN:
+ return "Unknown CQ";
+ case ICE_CTL_Q_ADMIN:
+ return "AQ";
+ case ICE_CTL_Q_MAILBOX:
+ return "MBXQ";
+ case ICE_CTL_Q_SB:
+ return "SBQ";
+ default:
+ return "Unrecognized CQ";
+ }
+}
+
+/**
* ice_debug_cq
* @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
+ * @response: true if this is the writeback response
*
* Dumps debug log about control command with descriptor contents.
*/
-static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ void *desc, void *buf, u16 buf_len, bool response)
{
struct ice_aq_desc *cq_desc = desc;
- u16 len;
+ u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
@@ -908,48 +922,63 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
if (!desc)
return;
- len = le16_to_cpu(cq_desc->datalen);
+ datalen = le16_to_cpu(cq_desc->datalen);
+ flags = le16_to_cpu(cq_desc->flags);
- ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- le16_to_cpu(cq_desc->opcode),
- le16_to_cpu(cq_desc->flags),
- le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n",
+ ice_ctl_q_str(cq->qtype), response ? "Response" : "Command",
+ le16_to_cpu(cq_desc->opcode), flags, datalen,
+ le16_to_cpu(cq_desc->retval),
le32_to_cpu(cq_desc->cookie_high),
- le32_to_cpu(cq_desc->cookie_low));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_low),
le32_to_cpu(cq_desc->params.generic.param0),
- le32_to_cpu(cq_desc->params.generic.param1));
- ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param1),
le32_to_cpu(cq_desc->params.generic.addr_high),
le32_to_cpu(cq_desc->params.generic.addr_low));
- if (buf && cq_desc->datalen != 0) {
- ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
- if (buf_len < len)
- len = buf_len;
-
- ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
+ /* Dump buffer iff 1) one exists and 2) is either a response indicated
+ * by the DD and/or CMP flag set or a command with the RD flag set.
+ */
+ if (buf && cq_desc->datalen &&
+ (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
+ char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
+
+ sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix,
+ buf,
+ min_t(u16, buf_len, datalen));
}
}
/**
- * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * ice_sq_done - poll until the last send on a control queue has completed
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
- * Returns true if the firmware has processed all descriptors on the
- * admin send queue. Returns false if there are still requests pending.
+ * Use read_poll_timeout to poll the control queue head, checking until it
+ * matches next_to_use. According to the control queue designers, this has
+ * better timing reliability than the DD bit.
+ *
+ * Return: true if all the descriptors on the send side of a control queue
+ * are finished processing, false otherwise.
*/
static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
+ u32 head;
+
+ /* Wait a short time before the initial check, to allow hardware time
+ * for completion.
*/
- return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+ udelay(5);
+
+ return !rd32_poll_timeout(hw, cq->sq.head,
+ head, head == cq->sq.next_to_use,
+ 20, ICE_CTL_Q_SQ_CMD_TIMEOUT);
}
/**
- * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * ice_sq_send_cmd - send command to a control queue
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
@@ -957,8 +986,9 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the queue,
- * cleans the queue, etc.
+ * Main command for the transmit side of a control queue. It puts the command
+ * on the queue, bumps the tail, waits for processing of the command, captures
+ * command status and results, etc.
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -968,8 +998,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- struct ice_sq_cd *details;
- unsigned long timeout;
int status = 0;
u16 retval = 0;
u32 val = 0;
@@ -1013,12 +1041,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
- if (cd)
- *details = *cd;
- else
- memset(details, 0, sizeof(*details));
-
/* Call clean and check queue available function to reclaim the
* descriptors that were processed by FW/MBX; the function returns the
* number of desc available. The clean function called here could be
@@ -1055,7 +1077,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
@@ -1063,20 +1085,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
- /* Wait a short time before initial ice_sq_done() check, to allow
- * hardware time for completion.
+ /* Wait for the command to complete. If it finishes within the
+ * timeout, copy the descriptor back to temp.
*/
- udelay(5);
-
- timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
- do {
- if (ice_sq_done(hw, cq))
- break;
-
- usleep_range(100, 150);
- } while (time_before(jiffies, timeout));
-
- /* if ready, copy the desc back to temp */
if (ice_sq_done(hw, cq)) {
memcpy(desc, desc_on_ring, sizeof(*desc));
if (buf) {
@@ -1108,12 +1119,11 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
- ice_debug_cq(hw, (void *)desc, buf, buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true);
/* save writeback AQ if requested */
- if (details->wb_desc)
- memcpy(details->wb_desc, desc_on_ring,
- sizeof(*details->wb_desc));
+ if (cd && cd->wb_desc)
+ memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc));
/* update the error if time out occurred */
if (!cmd_completed) {
@@ -1154,9 +1164,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
*
- * This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
- * left to process through 'pending'.
+ * Clean one element from the receive side of a control queue. On return 'e'
+ * contains contents of the message, and 'pending' contains the number of
+ * events left to process.
*/
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -1212,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
+ ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 1d54b1cdb1c5..ca97b7365a1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -43,14 +43,13 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */
- void *cmd_buf; /* command buffer memory */
union {
struct ice_dma_mem *sq_bi;
@@ -80,8 +79,6 @@ struct ice_sq_cd {
struct ice_aq_desc *wb_desc;
};
-#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
-
/* rq event information */
struct ice_rq_event_info {
struct ice_aq_desc desc;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index f182179529b7..953262b88a58 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -289,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
* indicates a base offset of 10, and the index for the entry is 2, then
* section handler function should set the offset to 10 + 2 = 12.
*/
-static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
- struct ice_pkg_enum *state, u32 sect_type,
- u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
+void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
{
void *entry;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 622543f08b43..97f272317475 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -261,10 +261,17 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
@@ -276,6 +283,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
+#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
@@ -451,6 +459,11 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset));
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index e92be6f130a3..cd95705d1e7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -9,6 +9,7 @@
#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
#define ICE_DPLL_PIN_IDX_INVALID 0xff
#define ICE_DPLL_RCLK_NUM_PER_PF 1
+#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -30,6 +31,10 @@ static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
};
+static const struct dpll_pin_frequency ice_esync_range[] = {
+ DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
+};
+
/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
@@ -394,8 +399,8 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
switch (pin_type) {
case ICE_DPLL_PIN_TYPE_INPUT:
- ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
- NULL, &pin->flags[0],
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, &pin->status,
+ NULL, NULL, &pin->flags[0],
&pin->freq, &pin->phase_adjust);
if (ret)
goto err;
@@ -430,7 +435,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
goto err;
parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ if (ICE_AQC_GET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
pin->state[pf->dplls.eec.dpll_idx] =
parent == pf->dplls.eec.dpll_idx ?
DPLL_PIN_STATE_CONNECTED :
@@ -1099,6 +1104,214 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_output_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN)
+ flags = ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags,
+ 0, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on output pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_esync_set - callback for setting embedded sync
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (freq == DPLL_PIN_FREQUENCY_1_HZ) {
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ ret = 0;
+ } else {
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ } else {
+ if (!(p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)) {
+ ret = 0;
+ } else {
+ flags_en &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0,
+ flags_en, 0, 0);
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_esync_get - callback for getting embedded sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync pin properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * and capabilities on input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!(p->status & ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP) ||
+ p->freq != DPLL_PIN_FREQUENCY_10_MHZ) {
+ mutex_unlock(&pf->dplls.lock);
+ return -EOPNOTSUPP;
+ }
+ esync->range = ice_esync_range;
+ esync->range_num = ARRAY_SIZE(ice_esync_range);
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) {
+ esync->freq = DPLL_PIN_FREQUENCY_1_HZ;
+ esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT;
+ } else {
+ esync->freq = 0;
+ esync->pulse = 0;
+ }
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1222,6 +1435,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_input_phase_adjust_set,
.phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_input_esync_set,
+ .esync_get = ice_dpll_input_esync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1232,6 +1447,8 @@ static const struct dpll_pin_ops ice_dpll_output_ops = {
.direction_get = ice_dpll_output_direction,
.phase_adjust_get = ice_dpll_pin_phase_adjust_get,
.phase_adjust_set = ice_dpll_output_phase_adjust_set,
+ .esync_set = ice_dpll_output_esync_set,
+ .esync_get = ice_dpll_output_esync_get,
};
static const struct dpll_device_ops ice_dpll_ops = {
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index 93172e93995b..c320f1bf7d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -31,6 +31,7 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ u8 status;
};
/** ice_dpll - store info required for DPLL control
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bc79ba974e49..c7641d326ba5 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4725,6 +4725,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.cap_rss_sym_xor_supported = true,
+ .rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_fec_stats = ice_get_fec_stats,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 20d5db88c99f..ed95072ca6e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -2981,6 +2981,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
}
/**
+ * ice_disable_fd_swap - set register appropriately to disable FD SWAP
+ * @hw: pointer to the HW struct
+ * @prof_id: profile ID
+ */
+static void
+ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
+{
+ u16 swap_val, fvw_num;
+ unsigned int i;
+
+ swap_val = ICE_SWAP_VALID;
+ fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
+
+ /* Since the SWAP Flag in the Programming Desc doesn't work,
+ * here add method to disable the SWAP Option via setting
+ * certain SWAP and INSET register sets.
+ */
+ for (i = 0; i < fvw_num ; i++) {
+ u32 raw_swap, raw_in;
+ unsigned int j;
+
+ raw_swap = 0;
+ raw_in = 0;
+
+ for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
+ raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
+ raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
+ }
+
+ /* write the FDIR swap register set */
+ wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
+
+ /* write the FDIR inset register set */
+ wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
+
+ ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
+ prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
+ }
+}
+
+/*
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -2991,6 +3035,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
* @symm: symmetric setting for RSS profiles
+ * @fd_swap: enable/disable FDIR paired src/dst fields swap option
*
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
@@ -3000,7 +3045,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm)
+ struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -3020,7 +3065,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_alloc_prof_id(hw, blk, &prof_id);
if (status)
goto err_ice_add_prof;
- if (blk == ICE_BLK_FD) {
+ if (blk == ICE_BLK_FD && fd_swap) {
/* For Flow Director block, the extraction sequence may
* need to be altered in the case where there are paired
* fields that have no match. This is necessary because
@@ -3031,6 +3076,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
status = ice_update_fd_swap(hw, prof_id, es);
if (status)
goto err_ice_add_prof;
+ } else if (blk == ICE_BLK_FD) {
+ ice_disable_fd_swap(hw, prof_id);
}
status = ice_update_prof_masking(hw, blk, prof_id, masks);
if (status)
@@ -4099,6 +4146,54 @@ err_ice_add_prof_id_flow:
}
/**
+ * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
+ * @hw: pointer to the HW struct
+ * @blk: HW block
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @hdl: profile handle
+ *
+ * Update the hardware tables to enable the FDIR profile indicated by @hdl for
+ * the VSI specified by @dest_vsi. On success, the flow will be enabled.
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl)
+{
+ u16 vsi_num;
+ int status;
+
+ if (blk != ICE_BLK_FD)
+ return -EINVAL;
+
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
+ status);
+ return status;
+ }
+
+ vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
+ status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
+ status);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
+
+ return status;
+}
+
+/**
* ice_rem_prof_from_list - remove a profile from list
* @hw: pointer to the HW struct
* @lst: list to remove the profile from
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index b39d7cdc381f..90b9b0993122 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,6 +6,8 @@
#include "ice_type.h"
+#define ICE_FDIR_REG_SET_SIZE 4
+
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
@@ -42,13 +44,16 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm);
+ struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+int
+ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
+ u16 dest_vsi, u16 fdir_vsi, u64 hdl);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fc2b58f56279..d97b751052f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -409,6 +409,29 @@ static const u32 ice_ptypes_gtpc_tid[] = {
};
/* Packet types for GTPU */
+static const struct ice_ptype_attributes ice_attr_gtpu_session[] = {
+ { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION },
+ { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION },
+};
+
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
@@ -1400,7 +1423,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->attr, params->attr_cnt, params->es,
- params->mask, symm);
+ params->mask, symm, true);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@@ -1523,6 +1546,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
+#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13)
+#define FLAG_GTP_EH_PDU BIT_ULL(14)
+
+#define HI_BYTE_IN_WORD GENMASK(15, 8)
+#define LO_BYTE_IN_WORD GENMASK(7, 0)
+
+#define FLAG_GTPU_MSK \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_UP \
+ (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK)
+#define FLAG_GTPU_DW FLAG_GTP_EH_PDU
+
+/**
+ * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info
+ * @hw: pointer to the HW struct
+ * @dest_vsi: dest VSI
+ * @fdir_vsi: fdir programming VSI
+ * @prof: stores parsed profile info from raw flow
+ * @blk: classification blk
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk)
+{
+ u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ struct ice_flow_prof_params *params __free(kfree);
+ u8 fv_words = hw->blk[blk].es.fvw;
+ int status;
+ int i, idx;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ for (i = 0; i < prof->fv_num; i++) {
+ if (hw->blk[blk].es.reverse)
+ idx = fv_words - i - 1;
+ else
+ idx = i;
+ params->es[idx].prot_id = prof->fv[i].proto_id;
+ params->es[idx].off = prof->fv[i].offset;
+ params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) &
+ HI_BYTE_IN_WORD) |
+ (((prof->fv[i].msk) >> BITS_PER_BYTE) &
+ LO_BYTE_IN_WORD);
+ }
+
+ switch (prof->flags) {
+ case FLAG_GTPU_DW:
+ params->attr = ice_attr_gtpu_down;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
+ break;
+ case FLAG_GTPU_UP:
+ params->attr = ice_attr_gtpu_up;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
+ break;
+ default:
+ if (prof->flags_msk & FLAG_GTPU_MSK) {
+ params->attr = ice_attr_gtpu_session;
+ params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session);
+ }
+ break;
+ }
+
+ status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ params->attr, params->attr_cnt,
+ params->es, params->mask, false, false);
+ if (status)
+ return status;
+
+ status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id);
+ if (status)
+ ice_rem_prof(hw, blk, id);
+
+ return status;
+}
+
/**
* ice_flow_add_prof - Add a flow profile for packet segments and matched fields
* @hw: pointer to the HW struct
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2fd2e0cb483d..6cb7bb879c98 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -5,6 +5,7 @@
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
+#include "ice_parser.h"
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
@@ -326,6 +327,7 @@ enum ice_rss_cfg_hdr_type {
ICE_RSS_ANY_HEADERS
};
+struct ice_vsi;
struct ice_rss_hash_cfg {
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
@@ -445,6 +447,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
bool symm, struct ice_flow_prof **prof);
int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
int
+ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
+ struct ice_parser_profile *prof, enum ice_block blk);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index a2562f04267f..b9f383494b3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -12,6 +12,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <linux/iopoll.h>
#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -23,6 +24,9 @@
#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr)
+
#define ice_flush(a) rd32((a), GLGEN_STAT)
#define ICE_M(m, s) ((m ## U) << (s))
@@ -39,11 +43,10 @@ struct device *ice_hw_to_dev(struct ice_hw *hw);
#define ice_debug(hw, type, fmt, args...) \
dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
- print_hex_dump_debug(KBUILD_MODNAME " ", \
- DUMP_PREFIX_OFFSET, rowsize, \
- groupsize, buf, len, false)
-#else
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \
+ rowsize, groupsize, buf, len, false)
+#else /* CONFIG_DYNAMIC_DEBUG */
#define ice_debug(hw, type, fmt, args...) \
do { \
if ((type) & (hw)->debug_mask) \
@@ -51,16 +54,15 @@ do { \
} while (0)
#ifdef DEBUG
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
if ((type) & (hw)->debug_mask) \
- print_hex_dump_debug(KBUILD_MODNAME, \
- DUMP_PREFIX_OFFSET, \
+ print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\
rowsize, groupsize, buf, \
len, false); \
} while (0)
-#else
-#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+#else /* DEBUG */
+#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \
do { \
struct ice_hw *hw_l = hw; \
if ((type) & (hw_l)->debug_mask) { \
@@ -78,4 +80,10 @@ do { \
#endif /* DEBUG */
#endif /* CONFIG_DYNAMIC_DEBUG */
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+ _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len)
+
+#define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \
+ _ice_debug_array(hw, type, prefix, 16, 1, buf, len)
+
#endif /* _ICE_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.c b/drivers/net/ethernet/intel/ice/ice_parser.c
new file mode 100644
index 000000000000..664beb64f557
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.c
@@ -0,0 +1,2430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+struct ice_pkg_sect_hdr {
+ __le16 count;
+ __le16 offset;
+};
+
+/**
+ * ice_parser_sect_item_get - parse an item from a section
+ * @sect_type: section type
+ * @section: section object
+ * @index: index of the item to get
+ * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter
+ *
+ * Return: a pointer to the item or NULL.
+ */
+static void *ice_parser_sect_item_get(u32 sect_type, void *section,
+ u32 index, u32 __maybe_unused *offset)
+{
+ size_t data_off = ICE_SEC_DATA_OFFSET;
+ struct ice_pkg_sect_hdr *hdr;
+ size_t size;
+
+ if (!section)
+ return NULL;
+
+ switch (sect_type) {
+ case ICE_SID_RXPARSER_IMEM:
+ size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_METADATA_INIT:
+ size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_CAM:
+ size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PG_SPILL:
+ size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_CAM:
+ size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_NOMATCH_SPILL:
+ size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_BOOST_TCAM:
+ size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE;
+ break;
+ case ICE_SID_LBL_RXPARSER_TMEM:
+ data_off = ICE_SEC_LBL_DATA_OFFSET;
+ size = ICE_SID_LBL_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_PTYPE:
+ size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_MARKER_GRP:
+ size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_PROTO_GRP:
+ size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE;
+ break;
+ case ICE_SID_RXPARSER_FLAG_REDIR:
+ size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE;
+ break;
+ default:
+ return NULL;
+ }
+
+ hdr = section;
+ if (index >= le16_to_cpu(hdr->count))
+ return NULL;
+
+ return section + data_off + index * size;
+}
+
+/**
+ * ice_parser_create_table - create an item table from a section
+ * @hw: pointer to the hardware structure
+ * @sect_type: section type
+ * @item_size: item size in bytes
+ * @length: number of items in the table to create
+ * @parse_item: the function to parse the item
+ * @no_offset: ignore header offset, calculate index from 0
+ *
+ * Return: a pointer to the allocated table or ERR_PTR.
+ */
+static void *
+ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
+ u32 item_size, u32 length,
+ void (*parse_item)(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int size), bool no_offset)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ void *table, *data, *item;
+ u16 idx = 0;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ table = kzalloc(item_size * length, GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ data = ice_pkg_enum_entry(seg, &state, sect_type, NULL,
+ ice_parser_sect_item_get);
+ seg = NULL;
+ if (data) {
+ struct ice_pkg_sect_hdr *hdr = state.sect;
+
+ if (!no_offset)
+ idx = le16_to_cpu(hdr->offset) +
+ state.entry_idx;
+
+ item = (void *)((uintptr_t)table + idx * item_size);
+ parse_item(hw, idx, item, data, item_size);
+
+ if (no_offset)
+ idx++;
+ }
+ } while (data);
+
+ return table;
+}
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost main:\n");
+ dev_info(dev, "\talu0 = %d\n", bm->alu0);
+ dev_info(dev, "\talu1 = %d\n", bm->alu1);
+ dev_info(dev, "\talu2 = %d\n", bm->alu2);
+ dev_info(dev, "\tpg = %d\n", bm->pg);
+}
+
+static void ice_imem_bst_kb_dump(struct ice_hw *hw,
+ struct ice_bst_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "boost key builder:\n");
+ dev_info(dev, "\tpriority = %d\n", kb->prio);
+ dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl);
+}
+
+static void ice_imem_np_kb_dump(struct ice_hw *hw,
+ struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_imem_pg_kb_dump(struct ice_hw *hw,
+ struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_imem_alu_dump(struct ice_hw *hw,
+ struct ice_alu *alu, int index)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", index);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_imem_dump - dump an imem item info
+ * @hw: pointer to the hardware structure
+ * @item: imem item to dump
+ */
+static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ ice_imem_bst_bm_dump(hw, &item->b_m);
+ ice_imem_bst_kb_dump(hw, &item->b_kb);
+ dev_info(dev, "pg priority = %d\n", item->pg_prio);
+ ice_imem_np_kb_dump(hw, &item->np_kb);
+ ice_imem_pg_kb_dump(hw, &item->pg_kb);
+ ice_imem_alu_dump(hw, &item->alu0, 0);
+ ice_imem_alu_dump(hw, &item->alu1, 1);
+ ice_imem_alu_dump(hw, &item->alu2, 2);
+}
+
+#define ICE_IM_BM_ALU0 BIT(0)
+#define ICE_IM_BM_ALU1 BIT(1)
+#define ICE_IM_BM_ALU2 BIT(2)
+#define ICE_IM_BM_PG BIT(3)
+
+/**
+ * ice_imem_bm_init - parse 4 bits of Boost Main
+ * @bm: pointer to the Boost Main structure
+ * @data: Boost Main data to be parsed
+ */
+static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data)
+{
+ bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data);
+ bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data);
+ bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data);
+ bm->pg = FIELD_GET(ICE_IM_BM_PG, data);
+}
+
+#define ICE_IM_BKB_PRIO GENMASK(7, 0)
+#define ICE_IM_BKB_TSR_CTRL BIT(8)
+
+/**
+ * ice_imem_bkb_init - parse 10 bits of Boost Main Build
+ * @bkb: pointer to the Boost Main Build structure
+ * @data: Boost Main Build data to be parsed
+ */
+static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data)
+{
+ bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data);
+ bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data);
+}
+
+#define ICE_IM_NPKB_OPC GENMASK(1, 0)
+#define ICE_IM_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_IM_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data);
+}
+
+#define ICE_IM_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_IM_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_IM_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_IM_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data);
+}
+
+#define ICE_IM_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_IM_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_IM_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_IM_ALU_SXS BIT_ULL(19)
+#define ICE_IM_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_IM_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_IM_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_IM_ALU_INC0 BIT_ULL(38)
+#define ICE_IM_ALU_INC1 BIT_ULL(39)
+#define ICE_IM_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_IM_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \
+ 50 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \
+ 58 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \
+ 75 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \
+ 81 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S)
+#define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \
+ 88 - ICE_IM_ALU_BA_S)
+
+/**
+ * ice_imem_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64);
+
+ idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64);
+}
+
+#define ICE_IMEM_BM_S 0
+#define ICE_IMEM_BKB_S 4
+#define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGP GENMASK(15, 14)
+#define ICE_IMEM_NPKB_S 16
+#define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_S 34
+#define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE)
+#define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_S 69
+#define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_S 165
+#define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_S 357
+#define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE)
+#define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_imem_parse_item - parse 384 bits of IMEM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of IMEM entry
+ * @item: item of IMEM entry
+ * @data: IMEM entry data to be parsed
+ * @size: size of IMEM entry
+ */
+static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_imem_item *ii = item;
+ u8 *buf = data;
+
+ ii->idx = idx;
+
+ ice_imem_bm_init(&ii->b_m, *(u8 *)buf);
+ ice_imem_bkb_init(&ii->b_kb,
+ *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >>
+ ICE_IMEM_BKB_OFF);
+
+ ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf);
+
+ ice_imem_npkb_init(&ii->np_kb,
+ *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >>
+ ICE_IMEM_NPKB_OFF);
+ ice_imem_pgkb_init(&ii->pg_kb,
+ *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >>
+ ICE_IMEM_PGKB_OFF);
+
+ ice_imem_alu_init(&ii->alu0,
+ &buf[ICE_IMEM_ALU0_IDD],
+ ICE_IMEM_ALU0_OFF);
+ ice_imem_alu_init(&ii->alu1,
+ &buf[ICE_IMEM_ALU1_IDD],
+ ICE_IMEM_ALU1_OFF);
+ ice_imem_alu_init(&ii->alu2,
+ &buf[ICE_IMEM_ALU2_IDD],
+ ICE_IMEM_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_imem_dump(hw, ii);
+}
+
+/**
+ * ice_imem_table_get - create an imem table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated IMEM table.
+ */
+static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM,
+ sizeof(struct ice_imem_item),
+ ICE_IMEM_TABLE_SIZE,
+ ice_imem_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+/**
+ * ice_metainit_dump - dump an metainit item info
+ * @hw: pointer to the hardware structure
+ * @item: metainit item to dump
+ */
+static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "tsr = %d\n", item->tsr);
+ dev_info(dev, "ho = %d\n", item->ho);
+ dev_info(dev, "pc = %d\n", item->pc);
+ dev_info(dev, "pg_rn = %d\n", item->pg_rn);
+ dev_info(dev, "cd = %d\n", item->cd);
+
+ dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl);
+ dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid);
+ dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start);
+ dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len);
+ dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id);
+
+ dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl);
+ dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid);
+ dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start);
+ dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len);
+ dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id);
+
+ dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl);
+ dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid);
+ dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start);
+ dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len);
+ dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id);
+
+ dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl);
+ dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid);
+ dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start);
+ dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len);
+ dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id);
+
+ dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags));
+}
+
+#define ICE_MI_TSR GENMASK_ULL(7, 0)
+#define ICE_MI_HO GENMASK_ULL(16, 8)
+#define ICE_MI_PC GENMASK_ULL(24, 17)
+#define ICE_MI_PGRN GENMASK_ULL(35, 25)
+#define ICE_MI_CD GENMASK_ULL(38, 36)
+#define ICE_MI_GAC BIT_ULL(39)
+#define ICE_MI_GADM GENMASK_ULL(44, 40)
+#define ICE_MI_GADS GENMASK_ULL(48, 45)
+#define ICE_MI_GADL GENMASK_ULL(53, 49)
+#define ICE_MI_GAI GENMASK_ULL(59, 56)
+#define ICE_MI_GBC BIT_ULL(60)
+#define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */
+#define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE)
+#define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE)
+
+#define ICE_MI_GBDM_GENMASK_ULL(high, low) \
+ GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S)
+#define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61)
+#define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66)
+#define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70)
+#define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77)
+#define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S)
+#define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82)
+#define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87)
+#define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91)
+#define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98)
+#define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S)
+#define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103)
+#define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108)
+#define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112)
+#define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119)
+#define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */
+#define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE)
+#define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE)
+#define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \
+ 123 - ICE_MI_FLAG_S)
+
+/**
+ * ice_metainit_parse_item - parse 192 bits of Metadata Init entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Metadata Init entry
+ * @item: item of Metadata Init entry
+ * @data: Metadata Init entry data to be parsed
+ * @size: size of Metadata Init entry
+ */
+static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_metainit_item *mi = item;
+ u8 *buf = data;
+ u64 d64;
+
+ mi->idx = idx;
+
+ d64 = *(u64 *)buf;
+
+ mi->tsr = FIELD_GET(ICE_MI_TSR, d64);
+ mi->ho = FIELD_GET(ICE_MI_HO, d64);
+ mi->pc = FIELD_GET(ICE_MI_PC, d64);
+ mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64);
+ mi->cd = FIELD_GET(ICE_MI_CD, d64);
+
+ mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64);
+ mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64);
+ mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64);
+ mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64);
+ mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64);
+
+ mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF;
+
+ mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64);
+ mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64);
+ mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64);
+ mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64);
+
+ mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64);
+ mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64);
+ mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64);
+ mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64);
+ mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64);
+
+ mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64);
+ mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64);
+ mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64);
+ mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64);
+ mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64);
+
+ d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF;
+
+ mi->flags = FIELD_GET(ICE_MI_FLAG, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_metainit_dump(hw, mi);
+}
+
+/**
+ * ice_metainit_table_get - create a metainit table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Metadata initialization table.
+ */
+static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT,
+ sizeof(struct ice_metainit_item),
+ ICE_METAINIT_TABLE_SIZE,
+ ice_metainit_parse_item, false);
+}
+
+/**
+ * ice_bst_tcam_search - find a TCAM item with specific type
+ * @tcam_table: the TCAM table
+ * @lbl_table: the lbl table to search
+ * @type: the type we need to match against
+ * @start: start searching from this index
+ *
+ * Return: a pointer to the matching BOOST TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start)
+{
+ u16 i = *start;
+
+ for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ if (lbl_table[i].type == type) {
+ *start = i;
+ return &tcam_table[lbl_table[i].idx];
+ }
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+ dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto);
+}
+
+static void ice_pg_nm_cam_key_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_key *key)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "key:\n");
+ dev_info(dev, "\tvalid = %d\n", key->valid);
+ dev_info(dev, "\tnode_id = %d\n", key->node_id);
+ dev_info(dev, "\tflag0 = %d\n", key->flag0);
+ dev_info(dev, "\tflag1 = %d\n", key->flag1);
+ dev_info(dev, "\tflag2 = %d\n", key->flag2);
+ dev_info(dev, "\tflag3 = %d\n", key->flag3);
+ dev_info(dev, "\tboost_idx = %d\n", key->boost_idx);
+ dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg);
+}
+
+static void ice_pg_cam_action_dump(struct ice_hw *hw,
+ struct ice_pg_cam_action *action)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "action:\n");
+ dev_info(dev, "\tnext_node = %d\n", action->next_node);
+ dev_info(dev, "\tnext_pc = %d\n", action->next_pc);
+ dev_info(dev, "\tis_pg = %d\n", action->is_pg);
+ dev_info(dev, "\tproto_id = %d\n", action->proto_id);
+ dev_info(dev, "\tis_mg = %d\n", action->is_mg);
+ dev_info(dev, "\tmarker_id = %d\n", action->marker_id);
+ dev_info(dev, "\tis_last_round = %d\n", action->is_last_round);
+ dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity);
+ dev_info(dev, "\tho_inc = %d\n", action->ho_inc);
+}
+
+/**
+ * ice_pg_cam_dump - dump an parse graph cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph cam to dump
+ */
+static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+/**
+ * ice_pg_nm_cam_dump - dump an parse graph no match cam info
+ * @hw: pointer to the hardware structure
+ * @item: parse graph no match cam to dump
+ */
+static void ice_pg_nm_cam_dump(struct ice_hw *hw,
+ struct ice_pg_nm_cam_item *item)
+{
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+ ice_pg_nm_cam_key_dump(hw, &item->key);
+ ice_pg_cam_action_dump(hw, &item->action);
+}
+
+#define ICE_PGCA_NN GENMASK_ULL(10, 0)
+#define ICE_PGCA_NPC GENMASK_ULL(18, 11)
+#define ICE_PGCA_IPG BIT_ULL(19)
+#define ICE_PGCA_PID GENMASK_ULL(30, 23)
+#define ICE_PGCA_IMG BIT_ULL(31)
+#define ICE_PGCA_MID GENMASK_ULL(39, 32)
+#define ICE_PGCA_ILR BIT_ULL(40)
+#define ICE_PGCA_HOP BIT_ULL(41)
+#define ICE_PGCA_HOI GENMASK_ULL(50, 42)
+
+/**
+ * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action
+ * @action: pointer to the Parse Graph CAM Action structure
+ * @data: Parse Graph CAM Action data to be parsed
+ */
+static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data)
+{
+ action->next_node = FIELD_GET(ICE_PGCA_NN, data);
+ action->next_pc = FIELD_GET(ICE_PGCA_NPC, data);
+ action->is_pg = FIELD_GET(ICE_PGCA_IPG, data);
+ action->proto_id = FIELD_GET(ICE_PGCA_PID, data);
+ action->is_mg = FIELD_GET(ICE_PGCA_IMG, data);
+ action->marker_id = FIELD_GET(ICE_PGCA_MID, data);
+ action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data);
+ action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data);
+ action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data);
+}
+
+#define ICE_PGNCK_VLD BIT_ULL(0)
+#define ICE_PGNCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGNCK_F0 BIT_ULL(12)
+#define ICE_PGNCK_F1 BIT_ULL(13)
+#define ICE_PGNCK_F2 BIT_ULL(14)
+#define ICE_PGNCK_F3 BIT_ULL(15)
+#define ICE_PGNCK_BH BIT_ULL(16)
+#define ICE_PGNCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGNCK_AR GENMASK_ULL(40, 25)
+
+/**
+ * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key
+ * @key: pointer to the Parse Graph NoMatch CAM Key structure
+ * @data: Parse Graph NoMatch CAM Key data to be parsed
+ */
+static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data)
+{
+ key->valid = FIELD_GET(ICE_PGNCK_VLD, data);
+ key->node_id = FIELD_GET(ICE_PGNCK_NID, data);
+ key->flag0 = FIELD_GET(ICE_PGNCK_F0, data);
+ key->flag1 = FIELD_GET(ICE_PGNCK_F1, data);
+ key->flag2 = FIELD_GET(ICE_PGNCK_F2, data);
+ key->flag3 = FIELD_GET(ICE_PGNCK_F3, data);
+
+ if (FIELD_GET(ICE_PGNCK_BH, data))
+ key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data);
+}
+
+#define ICE_PGCK_VLD BIT_ULL(0)
+#define ICE_PGCK_NID GENMASK_ULL(11, 1)
+#define ICE_PGCK_F0 BIT_ULL(12)
+#define ICE_PGCK_F1 BIT_ULL(13)
+#define ICE_PGCK_F2 BIT_ULL(14)
+#define ICE_PGCK_F3 BIT_ULL(15)
+#define ICE_PGCK_BH BIT_ULL(16)
+#define ICE_PGCK_BI GENMASK_ULL(24, 17)
+#define ICE_PGCK_AR GENMASK_ULL(40, 25)
+#define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */
+#define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE)
+#define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE)
+#define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \
+ 41 - ICE_PGCK_NPK_S)
+
+/**
+ * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key
+ * @key: pointer to the Parse Graph CAM Key structure
+ * @data: Parse Graph CAM Key data to be parsed
+ */
+static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data)
+{
+ u64 d64 = *(u64 *)data;
+
+ key->valid = FIELD_GET(ICE_PGCK_VLD, d64);
+ key->node_id = FIELD_GET(ICE_PGCK_NID, d64);
+ key->flag0 = FIELD_GET(ICE_PGCK_F0, d64);
+ key->flag1 = FIELD_GET(ICE_PGCK_F1, d64);
+ key->flag2 = FIELD_GET(ICE_PGCK_F2, d64);
+ key->flag3 = FIELD_GET(ICE_PGCK_F3, d64);
+
+ if (FIELD_GET(ICE_PGCK_BH, d64))
+ key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64);
+ else
+ key->boost_idx = 0;
+
+ key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64);
+
+ d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF;
+
+ key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64);
+}
+
+#define ICE_PG_CAM_ACT_S 73
+#define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph CAM Entry
+ * @item: item of Parse Graph CAM Entry
+ * @data: Parse Graph CAM Entry data to be parsed
+ * @size: size of Parse Graph CAM Entry
+ */
+static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ ice_pg_cam_key_init(&ci->key, buf);
+
+ d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_SP_CAM_KEY_S 56
+#define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE)
+
+/**
+ * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph Spill CAM Entry
+ * @item: item of Parse Graph Spill CAM Entry
+ * @data: Parse Graph Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph Spill CAM Entry
+ */
+static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_CAM_ACT_S 41
+#define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch CAM Entry
+ * @item: item of Parse Graph NoMatch CAM Entry
+ * @data: Parse Graph NoMatch CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch CAM Entry
+ */
+static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+#define ICE_PG_NM_SP_CAM_ACT_S 56
+#define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE)
+#define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE)
+
+/**
+ * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill
+ * CAM Entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Parse Graph NoMatch Spill CAM Entry
+ * @item: item of Parse Graph NoMatch Spill CAM Entry
+ * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed
+ * @size: size of Parse Graph NoMatch Spill CAM Entry
+ */
+static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data,
+ int __maybe_unused size)
+{
+ struct ice_pg_nm_cam_item *ci = item;
+ u8 *buf = data;
+ u64 d64;
+
+ ci->idx = idx;
+
+ d64 = *(u64 *)buf;
+ ice_pg_cam_action_init(&ci->action, d64);
+
+ d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >>
+ ICE_PG_NM_SP_CAM_ACT_OFF;
+ ice_pg_nm_cam_key_init(&ci->key, d64);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_pg_nm_cam_dump(hw, ci);
+}
+
+/**
+ * ice_pg_cam_table_get - create a parse graph cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_CAM_TABLE_SIZE,
+ ice_pg_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_sp_cam_table_get - create a parse graph spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph Spill CAM table.
+ */
+static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL,
+ sizeof(struct ice_pg_cam_item),
+ ICE_PG_SP_CAM_TABLE_SIZE,
+ ice_pg_sp_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_cam_table_get - create a parse graph no match cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_CAM_TABLE_SIZE,
+ ice_pg_nm_cam_parse_item, false);
+}
+
+/**
+ * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Parse Graph No Match Spill CAM table.
+ */
+static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL,
+ sizeof(struct ice_pg_nm_cam_item),
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ ice_pg_nm_sp_cam_parse_item, false);
+}
+
+static bool __ice_pg_cam_match(struct ice_pg_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(key->val)));
+}
+
+static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item,
+ struct ice_pg_cam_key *key)
+{
+ return (item->key.valid &&
+ !memcmp(&item->key.val, &key->val, sizeof(item->key.val)));
+}
+
+/**
+ * ice_pg_cam_match - search parse graph cam table by key
+ * @table: parse graph cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG CAM item or NULL.
+ */
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_cam_item *item = &table[i];
+
+ if (__ice_pg_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_pg_nm_cam_match - search parse graph no match cam table by key
+ * @table: parse graph no match cam table to search
+ * @size: cam table size
+ * @key: search key
+ *
+ * Return: a pointer to the matching PG No Match CAM item or NULL.
+ */
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct ice_pg_nm_cam_item *item = &table[i];
+
+ if (__ice_pg_nm_cam_match(item, key))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** Ternary match ***/
+/* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv
+ * Rules (per bit):
+ * Key == 0 and Key_inv == 0 : Never match (Don't care)
+ * Key == 0 and Key_inv == 1 : Match on bit == 1
+ * Key == 1 and Key_inv == 0 : Match on bit == 0
+ * Key == 1 and Key_inv == 1 : Always match (Don't care)
+ *
+ * Return: true if all bits match, false otherwise.
+ */
+static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat)
+{
+ u8 bit_key, bit_key_inv, bit_pat;
+ int i;
+
+ for (i = 0; i < BITS_PER_BYTE; i++) {
+ bit_key = key & BIT(i);
+ bit_key_inv = key_inv & BIT(i);
+ bit_pat = pat & BIT(i);
+
+ if (bit_key != 0 && bit_key_inv != 0)
+ continue;
+
+ if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat)
+ return false;
+ }
+
+ return true;
+}
+
+static bool ice_ternary_match(const u8 *key, const u8 *key_inv,
+ const u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i]))
+ return false;
+
+ return true;
+}
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "next proto key builder:\n");
+ dev_info(dev, "\topc = %d\n", kb->opc);
+ dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0);
+ dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1);
+}
+
+static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "parse graph key builder:\n");
+ dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena);
+ dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena);
+ dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena);
+ dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena);
+ dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx);
+ dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx);
+ dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx);
+ dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx);
+ dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx);
+}
+
+static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "alu%d:\n", idx);
+ dev_info(dev, "\topc = %d\n", alu->opc);
+ dev_info(dev, "\tsrc_start = %d\n", alu->src_start);
+ dev_info(dev, "\tsrc_len = %d\n", alu->src_len);
+ dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel);
+ dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key);
+ dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id);
+ dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id);
+ dev_info(dev, "\tinc0 = %d\n", alu->inc0);
+ dev_info(dev, "\tinc1 = %d\n", alu->inc1);
+ dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc);
+ dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset);
+ dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr);
+ dev_info(dev, "\timm = %d\n", alu->imm);
+ dev_info(dev, "\tdst_start = %d\n", alu->dst_start);
+ dev_info(dev, "\tdst_len = %d\n", alu->dst_len);
+ dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm);
+ dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm);
+}
+
+/**
+ * ice_bst_tcam_dump - dump a boost tcam info
+ * @hw: pointer to the hardware structure
+ * @item: boost tcam to dump
+ */
+static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "addr = %d\n", item->addr);
+
+ dev_info(dev, "key : ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv: ");
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp);
+ dev_info(dev, "pg_prio = %d\n", item->pg_prio);
+
+ ice_bst_np_kb_dump(hw, &item->np_kb);
+ ice_bst_pg_kb_dump(hw, &item->pg_kb);
+
+ ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX);
+ ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX);
+ ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX);
+}
+
+static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %u\n", item->idx);
+ dev_info(dev, "type = %u\n", item->type);
+ dev_info(dev, "label = %s\n", item->label);
+}
+
+#define ICE_BST_ALU_OPC GENMASK_ULL(5, 0)
+#define ICE_BST_ALU_SS GENMASK_ULL(13, 6)
+#define ICE_BST_ALU_SL GENMASK_ULL(18, 14)
+#define ICE_BST_ALU_SXS BIT_ULL(19)
+#define ICE_BST_ALU_SXK GENMASK_ULL(23, 20)
+#define ICE_BST_ALU_SRID GENMASK_ULL(30, 24)
+#define ICE_BST_ALU_DRID GENMASK_ULL(37, 31)
+#define ICE_BST_ALU_INC0 BIT_ULL(38)
+#define ICE_BST_ALU_INC1 BIT_ULL(39)
+#define ICE_BST_ALU_POO GENMASK_ULL(41, 40)
+#define ICE_BST_ALU_PO GENMASK_ULL(49, 42)
+#define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */
+#define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \
+ 50 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \
+ 58 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \
+ 75 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \
+ 81 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S)
+#define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \
+ 88 - ICE_BST_ALU_BA_S)
+
+/**
+ * ice_bst_alu_init - parse 96 bits of ALU entry
+ * @alu: pointer to the ALU entry structure
+ * @data: ALU entry data to be parsed
+ * @off: offset of the ALU entry data
+ */
+static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off)
+{
+ u64 d64;
+ u8 idd;
+
+ d64 = *((u64 *)data) >> off;
+
+ alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64);
+ alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64);
+ alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64);
+ alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64);
+ alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64);
+ alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64);
+ alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64);
+ alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64);
+ alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64);
+ alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64);
+ alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64);
+
+ idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE;
+ off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE;
+ d64 = *((u64 *)(&data[idd])) >> off;
+
+ alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64);
+ alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64);
+ alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64);
+ alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64);
+ alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64);
+ alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64);
+ alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64);
+}
+
+#define ICE_BST_PGKB_F0_ENA BIT_ULL(0)
+#define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1)
+#define ICE_BST_PGKB_F1_ENA BIT_ULL(7)
+#define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8)
+#define ICE_BST_PGKB_F2_ENA BIT_ULL(14)
+#define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15)
+#define ICE_BST_PGKB_F3_ENA BIT_ULL(21)
+#define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22)
+#define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28)
+
+/**
+ * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build
+ * @kb: pointer to the Parse Graph Key Build structure
+ * @data: Parse Graph Key Build data to be parsed
+ */
+static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data)
+{
+ kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data);
+ kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data);
+ kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data);
+ kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data);
+ kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data);
+ kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data);
+ kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data);
+ kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data);
+ kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data);
+}
+
+#define ICE_BST_NPKB_OPC GENMASK(1, 0)
+#define ICE_BST_NPKB_S_R0 GENMASK(9, 2)
+#define ICE_BST_NPKB_L_R1 GENMASK(17, 10)
+
+/**
+ * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build
+ * @kb: pointer to the Next Protocol Key Build structure
+ * @data: Next Protocol Key Build data to be parsed
+ */
+static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data)
+{
+ kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data);
+ kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data);
+ kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data);
+}
+
+#define ICE_BT_KEY_S 32
+#define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE)
+#define ICE_BT_KIV_S 192
+#define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE)
+#define ICE_BT_HIG_S 352
+#define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_S 360
+#define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE)
+#define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S)
+#define ICE_BT_NPKB_S 362
+#define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE)
+#define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE)
+#define ICE_BT_PGKB_S 380
+#define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE)
+#define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE)
+#define ICE_BT_ALU0_S 415
+#define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE)
+#define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE)
+#define ICE_BT_ALU1_S 511
+#define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE)
+#define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE)
+#define ICE_BT_ALU2_S 607
+#define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE)
+#define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE)
+
+/**
+ * ice_bst_parse_item - parse 704 bits of Boost TCAM entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Boost TCAM entry
+ * @item: item of Boost TCAM entry
+ * @data: Boost TCAM entry data to be parsed
+ * @size: size of Boost TCAM entry
+ */
+static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_bst_tcam_item *ti = item;
+ u8 *buf = (u8 *)data;
+ int i;
+
+ ti->addr = *(u16 *)buf;
+
+ for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) {
+ ti->key[i] = buf[ICE_BT_KEY_IDD + i];
+ ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i];
+ }
+ ti->hit_idx_grp = buf[ICE_BT_HIG_IDD];
+ ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M;
+
+ ice_bst_npkb_init(&ti->np_kb,
+ *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >>
+ ICE_BT_NPKB_OFF);
+ ice_bst_pgkb_init(&ti->pg_kb,
+ *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >>
+ ICE_BT_PGKB_OFF);
+
+ ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF);
+ ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF);
+ ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_bst_tcam_dump(hw, ti);
+}
+
+/**
+ * ice_bst_tcam_table_get - create a boost tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost TCAM table.
+ */
+static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(struct ice_bst_tcam_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_bst_parse_item, true);
+}
+
+static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_lbl_item *lbl_item = item;
+ struct ice_lbl_item *lbl_data = data;
+
+ lbl_item->idx = lbl_data->idx;
+ memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label));
+
+ if (strstarts(lbl_item->label, ICE_LBL_BST_DVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_DVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM))
+ lbl_item->type = ICE_LBL_BST_TYPE_SVM;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN))
+ lbl_item->type = ICE_LBL_BST_TYPE_VXLAN;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE))
+ lbl_item->type = ICE_LBL_BST_TYPE_GENEVE;
+ else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI))
+ lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_lbl_dump(hw, lbl_item);
+}
+
+/**
+ * ice_bst_lbl_table_get - create a boost label table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Boost label table.
+ */
+static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM,
+ sizeof(struct ice_lbl_item),
+ ICE_BST_TCAM_TABLE_SIZE,
+ ice_parse_lbl_item, true);
+}
+
+/**
+ * ice_bst_tcam_match - match a pattern on the boost tcam table
+ * @tcam_table: boost tcam table to search
+ * @pat: pattern to match
+ *
+ * Return: a pointer to the matching Boost TCAM item or NULL.
+ */
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat)
+{
+ int i;
+
+ for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) {
+ struct ice_bst_tcam_item *item = &tcam_table[i];
+
+ if (item->hit_idx_grp == 0)
+ continue;
+ if (ice_ternary_match(item->key, item->key_inv, pat,
+ ICE_BST_TCAM_KEY_SIZE))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+/**
+ * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info
+ * @hw: pointer to the hardware structure
+ * @item: ptype marker tcam to dump
+ */
+static void ice_ptype_mk_tcam_dump(struct ice_hw *hw,
+ struct ice_ptype_mk_tcam_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "address = %d\n", item->address);
+ dev_info(dev, "ptype = %d\n", item->ptype);
+
+ dev_info(dev, "key :");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key[i]);
+
+ dev_info(dev, "\n");
+
+ dev_info(dev, "key_inv:");
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++)
+ dev_info(dev, "%02x ", item->key_inv[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx,
+ void *item, void *data, int size)
+{
+ memcpy(item, data, size);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_ptype_mk_tcam_dump(hw,
+ (struct ice_ptype_mk_tcam_item *)item);
+}
+
+/**
+ * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker PType TCAM table.
+ */
+static
+struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE,
+ sizeof(struct ice_ptype_mk_tcam_item),
+ ICE_PTYPE_MK_TCAM_TABLE_SIZE,
+ ice_parse_ptype_mk_tcam_item, true);
+}
+
+/**
+ * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table
+ * @table: ptype marker tcam table to search
+ * @pat: pattern to match
+ * @len: length of the pattern
+ *
+ * Return: a pointer to the matching Marker PType item or NULL.
+ */
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len)
+{
+ int i;
+
+ for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) {
+ struct ice_ptype_mk_tcam_item *item = &table[i];
+
+ if (ice_ternary_match(item->key, item->key_inv, pat, len))
+ return item;
+ }
+
+ return NULL;
+}
+
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+/**
+ * ice_mk_grp_dump - dump an marker group item info
+ * @hw: pointer to the hardware structure
+ * @item: marker group item to dump
+ */
+static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "index = %d\n", item->idx);
+
+ dev_info(dev, "markers: ");
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ dev_info(dev, "%d ", item->markers[i]);
+
+ dev_info(dev, "\n");
+}
+
+static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_mk_grp_item *grp = item;
+ u8 *buf = data;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++)
+ grp->markers[i] = buf[i];
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_mk_grp_dump(hw, grp);
+}
+
+/**
+ * ice_mk_grp_table_get - create a marker group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Marker Group ID table.
+ */
+static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP,
+ sizeof(struct ice_mk_grp_item),
+ ICE_MK_GRP_TABLE_SIZE,
+ ice_mk_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+static void ice_proto_off_dump(struct ice_hw *hw,
+ struct ice_proto_off *po, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "proto %d\n", idx);
+ dev_info(dev, "\tpolarity = %d\n", po->polarity);
+ dev_info(dev, "\tproto_id = %d\n", po->proto_id);
+ dev_info(dev, "\toffset = %d\n", po->offset);
+}
+
+/**
+ * ice_proto_grp_dump - dump a proto group item info
+ * @hw: pointer to the hardware structure
+ * @item: proto group item to dump
+ */
+static void ice_proto_grp_dump(struct ice_hw *hw,
+ struct ice_proto_grp_item *item)
+{
+ int i;
+
+ dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx);
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++)
+ ice_proto_off_dump(hw, &item->po[i], i);
+}
+
+#define ICE_PO_POL BIT(0)
+#define ICE_PO_PID GENMASK(8, 1)
+#define ICE_PO_OFF GENMASK(21, 12)
+
+/**
+ * ice_proto_off_parse - parse 22 bits of Protocol entry
+ * @po: pointer to the Protocol entry structure
+ * @data: Protocol entry data to be parsed
+ */
+static void ice_proto_off_parse(struct ice_proto_off *po, u32 data)
+{
+ po->polarity = FIELD_GET(ICE_PO_POL, data);
+ po->proto_id = FIELD_GET(ICE_PO_PID, data);
+ po->offset = FIELD_GET(ICE_PO_OFF, data);
+}
+
+/**
+ * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Protocol Group Table entry
+ * @item: item of Protocol Group Table entry
+ * @data: Protocol Group Table entry data to be parsed
+ * @size: size of Protocol Group Table entry
+ */
+static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_proto_grp_item *grp = item;
+ u8 *buf = (u8 *)data;
+ u8 idd, off;
+ u32 d32;
+ int i;
+
+ grp->idx = idx;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE;
+ off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE;
+ d32 = *((u32 *)&buf[idd]) >> off;
+ ice_proto_off_parse(&grp->po[i], d32);
+ }
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_proto_grp_dump(hw, grp);
+}
+
+/**
+ * ice_proto_grp_table_get - create a proto group table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Protocol Group table.
+ */
+static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP,
+ sizeof(struct ice_proto_grp_item),
+ ICE_PROTO_GRP_TABLE_SIZE,
+ ice_proto_grp_parse_item, false);
+}
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+/**
+ * ice_flg_rd_dump - dump a flag redirect item info
+ * @hw: pointer to the hardware structure
+ * @item: flag redirect item to dump
+ */
+static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+
+ dev_info(dev, "index = %d\n", item->idx);
+ dev_info(dev, "expose = %d\n", item->expose);
+ dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id);
+}
+
+#define ICE_FRT_EXPO BIT(0)
+#define ICE_FRT_IFID GENMASK(6, 1)
+
+/**
+ * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry
+ * @hw: pointer to the hardware structure
+ * @idx: index of Flag Redirect Table entry
+ * @item: item of Flag Redirect Table entry
+ * @data: Flag Redirect Table entry data to be parsed
+ * @size: size of Flag Redirect Table entry
+ */
+static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item,
+ void *data, int __maybe_unused size)
+{
+ struct ice_flg_rd_item *rdi = item;
+ u8 d8 = *(u8 *)data;
+
+ rdi->idx = idx;
+ rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8);
+ rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_flg_rd_dump(hw, rdi);
+}
+
+/**
+ * ice_flg_rd_table_get - create a flag redirect table
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Flags Redirection table.
+ */
+static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw)
+{
+ return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR,
+ sizeof(struct ice_flg_rd_item),
+ ICE_FLG_RD_TABLE_SIZE,
+ ice_flg_rd_parse_item, false);
+}
+
+/**
+ * ice_flg_redirect - redirect a parser flag to packet flag
+ * @table: flag redirect table
+ * @psr_flg: parser flag to redirect
+ *
+ * Return: flag or 0 if @psr_flag = 0.
+ */
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg)
+{
+ u64 flg = 0;
+ int i;
+
+ for (i = 0; i < ICE_FLG_RDT_SIZE; i++) {
+ struct ice_flg_rd_item *item = &table[i];
+
+ if (!item->expose)
+ continue;
+
+ if (psr_flg & BIT(item->intr_flg_id))
+ flg |= BIT(i);
+ }
+
+ return flg;
+}
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+static void ice_xlt_kb_entry_dump(struct ice_hw *hw,
+ struct ice_xlt_kb_entry *entry, int idx)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "key builder entry %d\n", idx);
+ dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel);
+ dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel);
+
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++)
+ dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]);
+
+ dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel);
+ dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel);
+}
+
+/**
+ * ice_xlt_kb_dump - dump a xlt key build info
+ * @hw: pointer to the hardware structure
+ * @kb: key build to dump
+ */
+static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm);
+ dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm);
+ dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm);
+ dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15);
+ dev_info(dev, "flag15 hi = 0x%08x\n",
+ (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE)));
+
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_xlt_kb_entry_dump(hw, &kb->entries[i], i);
+}
+
+#define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */
+#define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \
+ 32 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \
+ 35 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \
+ 38 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \
+ 47 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \
+ 56 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \
+ 65 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \
+ 74 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \
+ 83 - ICE_XLT_KB_X1AS_S)
+#define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */
+#define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \
+ 92 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \
+ 101 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \
+ 110 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \
+ 119 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \
+ 128 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \
+ 137 - ICE_XLT_KB_FL06_S)
+#define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */
+#define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE)
+#define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \
+ 146 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \
+ 155 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \
+ 164 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \
+ 182 - ICE_XLT_KB_FL12_S)
+#define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \
+ 187 - ICE_XLT_KB_FL12_S)
+
+/**
+ * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry
+ * @entry: pointer to the XLT Key Builder entry structure
+ * @data: XLT Key Builder entry data to be parsed
+ */
+static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data)
+{
+ u8 i = 0;
+ u64 d64;
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF;
+
+ entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64);
+ entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64);
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64);
+
+ d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF;
+
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64);
+ entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64);
+ entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64);
+
+ entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64);
+ entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64);
+}
+
+#define ICE_XLT_KB_X1PM_OFF 0
+#define ICE_XLT_KB_X2PM_OFF 1
+#define ICE_XLT_KB_PIPM_OFF 2
+#define ICE_XLT_KB_FL15_OFF 4
+#define ICE_XLT_KB_TBL_OFF 12
+
+/**
+ * ice_parse_kb_data - parse 204 bits of XLT Key Build Table
+ * @hw: pointer to the hardware structure
+ * @kb: pointer to the XLT Key Build Table structure
+ * @data: XLT Key Build Table data to be parsed
+ */
+static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb,
+ void *data)
+{
+ u8 *buf = data;
+ int i;
+
+ kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF];
+ kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF];
+ kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF];
+
+ kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF];
+ for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++)
+ ice_kb_entry_init(&kb->entries[i],
+ &buf[ICE_XLT_KB_TBL_OFF +
+ i * ICE_XLT_KB_TBL_ENTRY_SIZE]);
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_xlt_kb_dump(hw, kb);
+}
+
+static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type)
+{
+ struct ice_pkg_enum state = {};
+ struct ice_seg *seg = hw->seg;
+ struct ice_xlt_kb *kb;
+ void *data;
+
+ if (!seg)
+ return ERR_PTR(-EINVAL);
+
+ kb = kzalloc(sizeof(*kb), GFP_KERNEL);
+ if (!kb)
+ return ERR_PTR(-ENOMEM);
+
+ data = ice_pkg_enum_section(seg, &state, sect_type);
+ if (!data) {
+ ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n",
+ sect_type);
+ kfree(kb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ice_parse_kb_data(hw, kb, data);
+
+ return kb;
+}
+
+/**
+ * ice_xlt_kb_get_sw - create switch xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Switch.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW);
+}
+
+/**
+ * ice_xlt_kb_get_acl - create acl xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for ACL.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL);
+}
+
+/**
+ * ice_xlt_kb_get_fd - create fdir xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for Flow Director.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD);
+}
+
+/**
+ * ice_xlt_kb_get_rss - create rss xlt key build
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated Key Builder table for RSS.
+ */
+static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw)
+{
+ return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS);
+}
+
+#define ICE_XLT_KB_MASK GENMASK_ULL(5, 0)
+
+/**
+ * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag
+ * @kb: xlt key build
+ * @pkt_flag: 64 bits packet flag
+ *
+ * Return: XLT flag or 0 if @pkt_flag = 0.
+ */
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag)
+{
+ struct ice_xlt_kb_entry *entry = &kb->entries[0];
+ u16 flag = 0;
+ int i;
+
+ /* check flag 15 */
+ if (kb->flag15 & pkt_flag)
+ flag = BIT(ICE_XLT_KB_FLAG0_14_CNT);
+
+ /* check flag 0 - 14 */
+ for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) {
+ /* only check first entry */
+ u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK;
+
+ if (pkt_flag & BIT(idx))
+ flag |= (u16)BIT(i);
+ }
+
+ return flag;
+}
+
+/*** Parser API ***/
+/**
+ * ice_parser_create - create a parser instance
+ * @hw: pointer to the hardware structure
+ *
+ * Return: a pointer to the allocated parser instance or ERR_PTR
+ * in case of error.
+ */
+struct ice_parser *ice_parser_create(struct ice_hw *hw)
+{
+ struct ice_parser *p;
+ void *err;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ p->hw = hw;
+ p->rt.psr = p;
+
+ p->imem_table = ice_imem_table_get(hw);
+ if (IS_ERR(p->imem_table)) {
+ err = p->imem_table;
+ goto err;
+ }
+
+ p->mi_table = ice_metainit_table_get(hw);
+ if (IS_ERR(p->mi_table)) {
+ err = p->mi_table;
+ goto err;
+ }
+
+ p->pg_cam_table = ice_pg_cam_table_get(hw);
+ if (IS_ERR(p->pg_cam_table)) {
+ err = p->pg_cam_table;
+ goto err;
+ }
+
+ p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_sp_cam_table)) {
+ err = p->pg_sp_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_cam_table)) {
+ err = p->pg_nm_cam_table;
+ goto err;
+ }
+
+ p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw);
+ if (IS_ERR(p->pg_nm_sp_cam_table)) {
+ err = p->pg_nm_sp_cam_table;
+ goto err;
+ }
+
+ p->bst_tcam_table = ice_bst_tcam_table_get(hw);
+ if (IS_ERR(p->bst_tcam_table)) {
+ err = p->bst_tcam_table;
+ goto err;
+ }
+
+ p->bst_lbl_table = ice_bst_lbl_table_get(hw);
+ if (IS_ERR(p->bst_lbl_table)) {
+ err = p->bst_lbl_table;
+ goto err;
+ }
+
+ p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw);
+ if (IS_ERR(p->ptype_mk_tcam_table)) {
+ err = p->ptype_mk_tcam_table;
+ goto err;
+ }
+
+ p->mk_grp_table = ice_mk_grp_table_get(hw);
+ if (IS_ERR(p->mk_grp_table)) {
+ err = p->mk_grp_table;
+ goto err;
+ }
+
+ p->proto_grp_table = ice_proto_grp_table_get(hw);
+ if (IS_ERR(p->proto_grp_table)) {
+ err = p->proto_grp_table;
+ goto err;
+ }
+
+ p->flg_rd_table = ice_flg_rd_table_get(hw);
+ if (IS_ERR(p->flg_rd_table)) {
+ err = p->flg_rd_table;
+ goto err;
+ }
+
+ p->xlt_kb_sw = ice_xlt_kb_get_sw(hw);
+ if (IS_ERR(p->xlt_kb_sw)) {
+ err = p->xlt_kb_sw;
+ goto err;
+ }
+
+ p->xlt_kb_acl = ice_xlt_kb_get_acl(hw);
+ if (IS_ERR(p->xlt_kb_acl)) {
+ err = p->xlt_kb_acl;
+ goto err;
+ }
+
+ p->xlt_kb_fd = ice_xlt_kb_get_fd(hw);
+ if (IS_ERR(p->xlt_kb_fd)) {
+ err = p->xlt_kb_fd;
+ goto err;
+ }
+
+ p->xlt_kb_rss = ice_xlt_kb_get_rss(hw);
+ if (IS_ERR(p->xlt_kb_rss)) {
+ err = p->xlt_kb_rss;
+ goto err;
+ }
+
+ return p;
+err:
+ ice_parser_destroy(p);
+ return err;
+}
+
+/**
+ * ice_parser_destroy - destroy a parser instance
+ * @psr: pointer to a parser instance
+ */
+void ice_parser_destroy(struct ice_parser *psr)
+{
+ kfree(psr->imem_table);
+ kfree(psr->mi_table);
+ kfree(psr->pg_cam_table);
+ kfree(psr->pg_sp_cam_table);
+ kfree(psr->pg_nm_cam_table);
+ kfree(psr->pg_nm_sp_cam_table);
+ kfree(psr->bst_tcam_table);
+ kfree(psr->bst_lbl_table);
+ kfree(psr->ptype_mk_tcam_table);
+ kfree(psr->mk_grp_table);
+ kfree(psr->proto_grp_table);
+ kfree(psr->flg_rd_table);
+ kfree(psr->xlt_kb_sw);
+ kfree(psr->xlt_kb_acl);
+ kfree(psr->xlt_kb_fd);
+ kfree(psr->xlt_kb_rss);
+
+ kfree(psr);
+}
+
+/**
+ * ice_parser_run - parse on a packet in binary and return the result
+ * @psr: pointer to a parser instance
+ * @pkt_buf: packet data
+ * @pkt_len: packet length
+ * @rslt: input/output parameter to save parser result.
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt)
+{
+ ice_parser_rt_reset(&psr->rt);
+ ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len);
+
+ return ice_parser_rt_execute(&psr->rt, rslt);
+}
+
+/**
+ * ice_parser_result_dump - dump a parser result info
+ * @hw: pointer to the hardware structure
+ * @rslt: parser result info to dump
+ */
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ int i;
+
+ dev_info(dev, "ptype = %d\n", rslt->ptype);
+ for (i = 0; i < rslt->po_num; i++)
+ dev_info(dev, "proto = %d, offset = %d\n",
+ rslt->po[i].proto_id, rslt->po[i].offset);
+
+ dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr);
+ dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt);
+ dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw);
+ dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd);
+ dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss);
+}
+
+#define ICE_BT_VLD_KEY 0xFF
+#define ICE_BT_INV_KEY 0xFE
+
+static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type,
+ bool on)
+{
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+ u8 key;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY;
+ item->key[ICE_BT_VM_OFF] = key;
+ item->key_inv[ICE_BT_VM_OFF] = key;
+ i++;
+ }
+}
+
+/**
+ * ice_parser_dvm_set - configure double vlan mode for parser
+ * @psr: pointer to a parser instance
+ * @on: true to turn on; false to turn off
+ */
+void ice_parser_dvm_set(struct ice_parser *psr, bool on)
+{
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on);
+ ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on);
+}
+
+static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type,
+ u16 udp_port, bool on)
+{
+ u8 *buf = (u8 *)&udp_port;
+ u16 i = 0;
+
+ while (true) {
+ struct ice_bst_tcam_item *item;
+
+ item = ice_bst_tcam_search(psr->bst_tcam_table,
+ psr->bst_lbl_table,
+ type, &i);
+ if (!item)
+ break;
+
+ /* found empty slot to add */
+ if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY &&
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] =
+ buf[ICE_UDP_PORT_OFF_L];
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] =
+ buf[ICE_UDP_PORT_OFF_H];
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L];
+ item->key[ICE_BT_TUN_PORT_OFF_H] =
+ ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H];
+
+ return 0;
+ /* found a matched slot to delete */
+ } else if (!on &&
+ (item->key_inv[ICE_BT_TUN_PORT_OFF_L] ==
+ buf[ICE_UDP_PORT_OFF_L] ||
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] ==
+ buf[ICE_UDP_PORT_OFF_H])) {
+ item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY;
+ item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY;
+
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: vxlan tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on);
+}
+
+/**
+ * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: geneve tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on);
+}
+
+/**
+ * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser
+ * @psr: pointer to a parser instance
+ * @udp_port: ecpri tunnel port in UDP header
+ * @on: true to turn on; false to turn off
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr,
+ u16 udp_port, bool on)
+{
+ return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI,
+ udp_port, on);
+}
+
+/**
+ * ice_nearest_proto_id - find nearest protocol ID
+ * @rslt: pointer to a parser result instance
+ * @offset: a min value for the protocol offset
+ * @proto_id: the protocol ID (output)
+ * @proto_off: the protocol offset (output)
+ *
+ * From the protocols in @rslt, find the nearest protocol that has offset
+ * larger than @offset.
+ *
+ * Return: if true, the protocol's ID and offset
+ */
+static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset,
+ u8 *proto_id, u16 *proto_off)
+{
+ u16 dist = U16_MAX;
+ u8 proto = 0;
+ int i;
+
+ for (i = 0; i < rslt->po_num; i++) {
+ if (offset < rslt->po[i].offset)
+ continue;
+ if (offset - rslt->po[i].offset < dist) {
+ proto = rslt->po[i].proto_id;
+ dist = offset - rslt->po[i].offset;
+ }
+ }
+
+ if (dist % 2)
+ return false;
+
+ *proto_id = proto;
+ *proto_off = dist;
+
+ return true;
+}
+
+/* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2
+ * In future, the flag masks should learn from DDP
+ */
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080
+#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010
+
+/**
+ * ice_parser_profile_init - initialize a FXP profile based on parser result
+ * @rslt: a instance of a parser result
+ * @pkt_buf: packet data buffer
+ * @msk_buf: packet mask buffer
+ * @buf_len: packet length
+ * @blk: FXP pipeline stage
+ * @prof: input/output parameter to save the profile
+ *
+ * Return: 0 on success or errno on failure.
+ */
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof)
+{
+ u8 proto_id = U8_MAX;
+ u16 proto_off = 0;
+ u16 off;
+
+ memset(prof, 0, sizeof(*prof));
+ set_bit(rslt->ptype, prof->ptypes);
+ if (blk == ICE_BLK_SW) {
+ prof->flags = rslt->flags_sw;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW;
+ } else if (blk == ICE_BLK_ACL) {
+ prof->flags = rslt->flags_acl;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL;
+ } else if (blk == ICE_BLK_FD) {
+ prof->flags = rslt->flags_fd;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD;
+ } else if (blk == ICE_BLK_RSS) {
+ prof->flags = rslt->flags_rss;
+ prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS;
+ } else {
+ return -EINVAL;
+ }
+
+ for (off = 0; off < buf_len - 1; off++) {
+ if (msk_buf[off] == 0 && msk_buf[off + 1] == 0)
+ continue;
+ if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off))
+ continue;
+ if (prof->fv_num >= ICE_PARSER_FV_MAX)
+ return -EINVAL;
+
+ prof->fv[prof->fv_num].proto_id = proto_id;
+ prof->fv[prof->fv_num].offset = proto_off;
+ prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off];
+ prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off];
+ prof->fv_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_parser_profile_dump - dump an FXP profile info
+ * @hw: pointer to the hardware structure
+ * @prof: profile info to dump
+ */
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof)
+{
+ struct device *dev = ice_hw_to_dev(hw);
+ u16 i;
+
+ dev_info(dev, "ptypes:\n");
+ for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++)
+ if (test_bit(i, prof->ptypes))
+ dev_info(dev, "\t%u\n", i);
+
+ for (i = 0; i < prof->fv_num; i++)
+ dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n",
+ prof->fv[i].proto_id, prof->fv[i].offset,
+ prof->fv[i].spec, prof->fv[i].msk);
+
+ dev_info(dev, "flags = 0x%04x\n", prof->flags);
+ dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
new file mode 100644
index 000000000000..6509d807627c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _ICE_PARSER_H_
+#define _ICE_PARSER_H_
+
+#define ICE_SEC_DATA_OFFSET 4
+#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48
+#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16
+#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17
+#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12
+#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13
+#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88
+#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8
+#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24
+#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1
+
+#define ICE_SEC_LBL_DATA_OFFSET 2
+#define ICE_SID_LBL_ENTRY_SIZE 66
+
+/*** ICE_SID_RXPARSER_IMEM section ***/
+#define ICE_IMEM_TABLE_SIZE 192
+
+/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM
+ * output.
+ */
+struct ice_bst_main {
+ bool alu0;
+ bool alu1;
+ bool alu2;
+ bool pg;
+};
+
+struct ice_bst_keybuilder {
+ u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */
+ bool tsr_ctrl; /* TCAM Search Register control */
+};
+
+/* Next protocol Key builder */
+struct ice_np_keybuilder {
+ u8 opc;
+ u8 start_reg0;
+ u8 len_reg1;
+};
+
+enum ice_np_keybuilder_opcode {
+ ICE_NPKB_OPC_EXTRACT = 0,
+ ICE_NPKB_OPC_BUILD = 1,
+ ICE_NPKB_OPC_BYPASS = 2,
+};
+
+/* Parse Graph Key builder */
+struct ice_pg_keybuilder {
+ bool flag0_ena;
+ bool flag1_ena;
+ bool flag2_ena;
+ bool flag3_ena;
+ u8 flag0_idx;
+ u8 flag1_idx;
+ u8 flag2_idx;
+ u8 flag3_idx;
+ u8 alu_reg_idx;
+};
+
+enum ice_alu_idx {
+ ICE_ALU0_IDX = 0,
+ ICE_ALU1_IDX = 1,
+ ICE_ALU2_IDX = 2,
+};
+
+enum ice_alu_opcode {
+ ICE_ALU_PARK = 0,
+ ICE_ALU_MOV_ADD = 1,
+ ICE_ALU_ADD = 2,
+ ICE_ALU_MOV_AND = 4,
+ ICE_ALU_AND = 5,
+ ICE_ALU_AND_IMM = 6,
+ ICE_ALU_MOV_OR = 7,
+ ICE_ALU_OR = 8,
+ ICE_ALU_MOV_XOR = 9,
+ ICE_ALU_XOR = 10,
+ ICE_ALU_NOP = 11,
+ ICE_ALU_BR = 12,
+ ICE_ALU_BREQ = 13,
+ ICE_ALU_BRNEQ = 14,
+ ICE_ALU_BRGT = 15,
+ ICE_ALU_BRLT = 16,
+ ICE_ALU_BRGEQ = 17,
+ ICE_ALU_BRLEG = 18,
+ ICE_ALU_SETEQ = 19,
+ ICE_ALU_ANDEQ = 20,
+ ICE_ALU_OREQ = 21,
+ ICE_ALU_SETNEQ = 22,
+ ICE_ALU_ANDNEQ = 23,
+ ICE_ALU_ORNEQ = 24,
+ ICE_ALU_SETGT = 25,
+ ICE_ALU_ANDGT = 26,
+ ICE_ALU_ORGT = 27,
+ ICE_ALU_SETLT = 28,
+ ICE_ALU_ANDLT = 29,
+ ICE_ALU_ORLT = 30,
+ ICE_ALU_MOV_SUB = 31,
+ ICE_ALU_SUB = 32,
+ ICE_ALU_INVALID = 64,
+};
+
+enum ice_proto_off_opcode {
+ ICE_PO_OFF_REMAIN = 0,
+ ICE_PO_OFF_HDR_ADD = 1,
+ ICE_PO_OFF_HDR_SUB = 2,
+};
+
+struct ice_alu {
+ enum ice_alu_opcode opc;
+ u8 src_start;
+ u8 src_len;
+ bool shift_xlate_sel;
+ u8 shift_xlate_key;
+ u8 src_reg_id;
+ u8 dst_reg_id;
+ bool inc0;
+ bool inc1;
+ u8 proto_offset_opc;
+ u8 proto_offset;
+ u8 branch_addr;
+ u16 imm;
+ bool dedicate_flags_ena;
+ u8 dst_start;
+ u8 dst_len;
+ bool flags_extr_imm;
+ u8 flags_start_imm;
+};
+
+/* Parser program code (iMEM) */
+struct ice_imem_item {
+ u16 idx;
+ struct ice_bst_main b_m;
+ struct ice_bst_keybuilder b_kb;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+/*** ICE_SID_RXPARSER_METADATA_INIT section ***/
+#define ICE_METAINIT_TABLE_SIZE 16
+
+/* Metadata Initialization item */
+struct ice_metainit_item {
+ u16 idx;
+
+ u8 tsr; /* TCAM Search key Register */
+ u16 ho; /* Header Offset register */
+ u16 pc; /* Program Counter register */
+ u16 pg_rn; /* Parse Graph Root Node */
+ u8 cd; /* Control Domain ID */
+
+ /* General Purpose Registers */
+ bool gpr_a_ctrl;
+ u8 gpr_a_data_mdid;
+ u8 gpr_a_data_start;
+ u8 gpr_a_data_len;
+ u8 gpr_a_id;
+
+ bool gpr_b_ctrl;
+ u8 gpr_b_data_mdid;
+ u8 gpr_b_data_start;
+ u8 gpr_b_data_len;
+ u8 gpr_b_id;
+
+ bool gpr_c_ctrl;
+ u8 gpr_c_data_mdid;
+ u8 gpr_c_data_start;
+ u8 gpr_c_data_len;
+ u8 gpr_c_id;
+
+ bool gpr_d_ctrl;
+ u8 gpr_d_data_mdid;
+ u8 gpr_d_data_start;
+ u8 gpr_d_data_len;
+ u8 gpr_d_id;
+
+ u64 flags; /* Initial value for all flags */
+};
+
+/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL,
+ * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM
+ * sections ***/
+#define ICE_PG_CAM_TABLE_SIZE 2048
+#define ICE_PG_SP_CAM_TABLE_SIZE 128
+#define ICE_PG_NM_CAM_TABLE_SIZE 1024
+#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64
+
+struct ice_pg_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id; /* Node ID of protocol in parse graph */
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx; /* Boost TCAM match index */
+ u16 alu_reg;
+ u32 next_proto; /* next Protocol value (must be last) */
+ );
+};
+
+struct ice_pg_nm_cam_key {
+ bool valid;
+ struct_group_attr(val, __packed,
+ u16 node_id;
+ bool flag0;
+ bool flag1;
+ bool flag2;
+ bool flag3;
+ u8 boost_idx;
+ u16 alu_reg;
+ );
+};
+
+struct ice_pg_cam_action {
+ u16 next_node; /* Parser Node ID for the next round */
+ u8 next_pc; /* next Program Counter */
+ bool is_pg; /* is protocol group */
+ u8 proto_id; /* protocol ID or proto group ID */
+ bool is_mg; /* is marker group */
+ u8 marker_id; /* marker ID or marker group ID */
+ bool is_last_round;
+ bool ho_polarity; /* header offset polarity */
+ u16 ho_inc;
+};
+
+/* Parse Graph item */
+struct ice_pg_cam_item {
+ u16 idx;
+ struct ice_pg_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+/* Parse Graph No Match item */
+struct ice_pg_nm_cam_item {
+ u16 idx;
+ struct ice_pg_nm_cam_key key;
+ struct ice_pg_cam_action action;
+};
+
+struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table,
+ int size, struct ice_pg_cam_key *key);
+struct ice_pg_nm_cam_item *
+ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
+ struct ice_pg_cam_key *key);
+
+/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
+#define ICE_BST_TCAM_TABLE_SIZE 256
+#define ICE_BST_TCAM_KEY_SIZE 20
+#define ICE_BST_KEY_TCAM_SIZE 19
+
+/* Boost TCAM item */
+struct ice_bst_tcam_item {
+ u16 addr;
+ u8 key[ICE_BST_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_BST_TCAM_KEY_SIZE];
+ u8 hit_idx_grp;
+ u8 pg_prio;
+ struct ice_np_keybuilder np_kb;
+ struct ice_pg_keybuilder pg_kb;
+ struct ice_alu alu0;
+ struct ice_alu alu1;
+ struct ice_alu alu2;
+};
+
+#define ICE_LBL_LEN 64
+#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM"
+#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM"
+#define ICE_LBL_TNL_VXLAN "TNL_VXLAN"
+#define ICE_LBL_TNL_GENEVE "TNL_GENEVE"
+#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI"
+
+enum ice_lbl_type {
+ ICE_LBL_BST_TYPE_UNKNOWN,
+ ICE_LBL_BST_TYPE_DVM,
+ ICE_LBL_BST_TYPE_SVM,
+ ICE_LBL_BST_TYPE_VXLAN,
+ ICE_LBL_BST_TYPE_GENEVE,
+ ICE_LBL_BST_TYPE_UDP_ECPRI,
+};
+
+struct ice_lbl_item {
+ u16 idx;
+ char label[ICE_LBL_LEN];
+
+ /* must be at the end, not part of the DDP section */
+ enum ice_lbl_type type;
+};
+
+struct ice_bst_tcam_item *
+ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat);
+struct ice_bst_tcam_item *
+ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table,
+ struct ice_lbl_item *lbl_table,
+ enum ice_lbl_type type, u16 *start);
+
+/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/
+#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024
+#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10
+
+struct ice_ptype_mk_tcam_item {
+ u16 address;
+ u16 ptype;
+ u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+ u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE];
+} __packed;
+
+struct ice_ptype_mk_tcam_item *
+ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table,
+ u8 *pat, int len);
+/*** ICE_SID_RXPARSER_MARKER_GRP section ***/
+#define ICE_MK_GRP_TABLE_SIZE 128
+#define ICE_MK_COUNT_PER_GRP 8
+
+/* Marker Group item */
+struct ice_mk_grp_item {
+ int idx;
+ u8 markers[ICE_MK_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_PROTO_GRP section ***/
+#define ICE_PROTO_COUNT_PER_GRP 8
+#define ICE_PROTO_GRP_TABLE_SIZE 192
+#define ICE_PROTO_GRP_ITEM_SIZE 22
+struct ice_proto_off {
+ bool polarity; /* true: positive, false: negative */
+ u8 proto_id;
+ u16 offset; /* 10 bit protocol offset */
+};
+
+/* Protocol Group item */
+struct ice_proto_grp_item {
+ u16 idx;
+ struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP];
+};
+
+/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/
+#define ICE_FLG_RD_TABLE_SIZE 64
+#define ICE_FLG_RDT_SIZE 64
+
+/* Flags Redirection item */
+struct ice_flg_rd_item {
+ u16 idx;
+ bool expose;
+ u8 intr_flg_id; /* Internal Flag ID */
+};
+
+u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg);
+
+/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL,
+ * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS
+ * sections ***/
+#define ICE_XLT_KB_FLAG0_14_CNT 15
+#define ICE_XLT_KB_TBL_CNT 8
+#define ICE_XLT_KB_TBL_ENTRY_SIZE 24
+
+struct ice_xlt_kb_entry {
+ u8 xlt1_ad_sel;
+ u8 xlt2_ad_sel;
+ u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT];
+ u8 xlt1_md_sel;
+ u8 xlt2_md_sel;
+};
+
+/* XLT Key Builder */
+struct ice_xlt_kb {
+ u8 xlt1_pm; /* XLT1 Partition Mode */
+ u8 xlt2_pm; /* XLT2 Partition Mode */
+ u8 prof_id_pm; /* Profile ID Partition Mode */
+ u64 flag15;
+
+ struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT];
+};
+
+u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
+
+/*** Parser API ***/
+#define ICE_GPR_HV_IDX 64
+#define ICE_GPR_HV_SIZE 32
+#define ICE_GPR_ERR_IDX 84
+#define ICE_GPR_FLG_IDX 104
+#define ICE_GPR_FLG_SIZE 16
+
+#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */
+#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */
+#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */
+#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */
+
+#define ICE_PARSER_MAX_PKT_LEN 504
+#define ICE_PARSER_PKT_REV 32
+#define ICE_PARSER_GPR_NUM 128
+#define ICE_PARSER_FLG_NUM 64
+#define ICE_PARSER_ERR_NUM 16
+#define ICE_BST_KEY_SIZE 10
+#define ICE_MARKER_ID_SIZE 9
+#define ICE_MARKER_MAX_SIZE \
+ (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
+#define ICE_MARKER_ID_NUM 8
+#define ICE_PO_PAIR_SIZE 256
+
+struct ice_gpr_pu {
+ /* array of flags to indicate if GRP needs to be updated */
+ bool gpr_val_upd[ICE_PARSER_GPR_NUM];
+ u16 gpr_val[ICE_PARSER_GPR_NUM];
+ u64 flg_msk;
+ u64 flg_val;
+ u16 err_msk;
+ u16 err_val;
+};
+
+enum ice_pg_prio {
+ ICE_PG_P0 = 0,
+ ICE_PG_P1 = 1,
+ ICE_PG_P2 = 2,
+ ICE_PG_P3 = 3,
+};
+
+struct ice_parser_rt {
+ struct ice_parser *psr;
+ u16 gpr[ICE_PARSER_GPR_NUM];
+ u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
+ u16 pkt_len;
+ u16 po;
+ u8 bst_key[ICE_BST_KEY_SIZE];
+ struct ice_pg_cam_key pg_key;
+ struct ice_alu *alu0;
+ struct ice_alu *alu1;
+ struct ice_alu *alu2;
+ struct ice_pg_cam_action *action;
+ u8 pg_prio;
+ struct ice_gpr_pu pu;
+ u8 markers[ICE_MARKER_ID_SIZE];
+ bool protocols[ICE_PO_PAIR_SIZE];
+ u16 offsets[ICE_PO_PAIR_SIZE];
+};
+
+struct ice_parser_proto_off {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+};
+
+#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16
+#define ICE_PARSER_FLAG_PSR_SIZE 8
+#define ICE_PARSER_FV_SIZE 48
+#define ICE_PARSER_FV_MAX 24
+#define ICE_BT_TUN_PORT_OFF_H 16
+#define ICE_BT_TUN_PORT_OFF_L 15
+#define ICE_BT_VM_OFF 0
+#define ICE_UDP_PORT_OFF_H 1
+#define ICE_UDP_PORT_OFF_L 0
+
+struct ice_parser_result {
+ u16 ptype; /* 16 bits hardware PTYPE */
+ /* array of protocol and header offset pairs */
+ struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE];
+ int po_num; /* # of protocol-offset pairs must <= 16 */
+ u64 flags_psr; /* parser flags */
+ u64 flags_pkt; /* packet flags */
+ u16 flags_sw; /* key builder flags for SW */
+ u16 flags_acl; /* key builder flags for ACL */
+ u16 flags_fd; /* key builder flags for FD */
+ u16 flags_rss; /* key builder flags for RSS */
+};
+
+void ice_parser_rt_reset(struct ice_parser_rt *rt);
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len);
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt);
+
+struct ice_parser {
+ struct ice_hw *hw; /* pointer to the hardware structure */
+
+ struct ice_imem_item *imem_table;
+ struct ice_metainit_item *mi_table;
+
+ struct ice_pg_cam_item *pg_cam_table;
+ struct ice_pg_cam_item *pg_sp_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_cam_table;
+ struct ice_pg_nm_cam_item *pg_nm_sp_cam_table;
+
+ struct ice_bst_tcam_item *bst_tcam_table;
+ struct ice_lbl_item *bst_lbl_table;
+ struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table;
+ struct ice_mk_grp_item *mk_grp_table;
+ struct ice_proto_grp_item *proto_grp_table;
+ struct ice_flg_rd_item *flg_rd_table;
+
+ struct ice_xlt_kb *xlt_kb_sw;
+ struct ice_xlt_kb *xlt_kb_acl;
+ struct ice_xlt_kb *xlt_kb_fd;
+ struct ice_xlt_kb *xlt_kb_rss;
+
+ struct ice_parser_rt rt;
+};
+
+struct ice_parser *ice_parser_create(struct ice_hw *hw);
+void ice_parser_destroy(struct ice_parser *psr);
+void ice_parser_dvm_set(struct ice_parser *psr, bool on);
+int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on);
+int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf,
+ int pkt_len, struct ice_parser_result *rslt);
+void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt);
+
+struct ice_parser_fv {
+ u8 proto_id; /* hardware protocol ID */
+ u16 offset; /* offset from the start of the protocol header */
+ u16 spec; /* pattern to match */
+ u16 msk; /* pattern mask */
+};
+
+struct ice_parser_profile {
+ /* array of field vectors */
+ struct ice_parser_fv fv[ICE_PARSER_FV_SIZE];
+ int fv_num; /* # of field vectors must <= 48 */
+ u16 flags; /* key builder flags */
+ u16 flags_msk; /* key builder flag mask */
+
+ DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */
+};
+
+int ice_parser_profile_init(struct ice_parser_result *rslt,
+ const u8 *pkt_buf, const u8 *msk_buf,
+ int buf_len, enum ice_block blk,
+ struct ice_parser_profile *prof);
+void ice_parser_profile_dump(struct ice_hw *hw,
+ struct ice_parser_profile *prof);
+#endif /* _ICE_PARSER_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
new file mode 100644
index 000000000000..dedf5e854e4b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Intel Corporation */
+
+#include "ice_common.h"
+
+static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr)
+{
+ rt->gpr[ICE_GPR_TSR_IDX] = tsr;
+}
+
+static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho)
+{
+ rt->gpr[ICE_GPR_HO_IDX] = ho;
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc)
+{
+ rt->gpr[ICE_GPR_NP_IDX] = pc;
+}
+
+static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node)
+{
+ rt->gpr[ICE_GPR_NN_IDX] = node;
+}
+
+static void
+ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+ unsigned int word, id;
+
+ word = idx / ICE_GPR_FLG_SIZE;
+ id = idx % ICE_GPR_FLG_SIZE;
+
+ if (set) {
+ rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id);
+ ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx);
+ }
+}
+
+static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (idx == ICE_GPR_HO_IDX)
+ ice_rt_ho_set(rt, val);
+ else
+ rt->gpr[idx] = val;
+
+ ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val);
+}
+
+static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set)
+{
+ struct ice_hw *hw = rt->psr->hw;
+
+ if (set) {
+ rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx);
+ } else {
+ rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx);
+ ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx);
+ }
+}
+
+/**
+ * ice_parser_rt_reset - reset the parser runtime
+ * @rt: pointer to the parser runtime
+ */
+void ice_parser_rt_reset(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_metainit_item *mi;
+ unsigned int i;
+
+ mi = &psr->mi_table[0];
+
+ memset(rt, 0, sizeof(*rt));
+ rt->psr = psr;
+
+ ice_rt_tsr_set(rt, mi->tsr);
+ ice_rt_ho_set(rt, mi->ho);
+ ice_rt_np_set(rt, mi->pc);
+ ice_rt_nn_set(rt, mi->pg_rn);
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (mi->flags & BIT(i))
+ ice_rt_flag_set(rt, i, true);
+ }
+}
+
+/**
+ * ice_parser_rt_pktbuf_set - set a packet into parser runtime
+ * @rt: pointer to the parser runtime
+ * @pkt_buf: buffer with packet data
+ * @pkt_len: packet buffer length
+ */
+void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf,
+ int pkt_len)
+{
+ int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len);
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+
+ memcpy(rt->pkt_buf, pkt_buf, len);
+ rt->pkt_len = pkt_len;
+
+ memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE);
+}
+
+static void ice_bst_key_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX];
+ u16 ho = rt->gpr[ICE_GPR_HO_IDX];
+ u8 *key = rt->bst_key;
+ int idd, i;
+
+ idd = ICE_BST_TCAM_KEY_SIZE - 1;
+ if (imem->b_kb.tsr_ctrl)
+ key[idd] = tsr;
+ else
+ key[idd] = imem->b_kb.prio;
+
+ idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ for (i = idd; i >= 0; i--) {
+ int j;
+
+ j = ho + idd - i;
+ if (j < ICE_PARSER_MAX_PKT_LEN)
+ key[i] = rt->pkt_buf[ho + idd - i];
+ else
+ key[i] = 0;
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ key[0], key[1], key[2], key[3], key[4],
+ key[5], key[6], key[7], key[8], key[9]);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+}
+
+static u16 ice_bit_rev_u16(u16 v, int len)
+{
+ return bitrev16(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_bit_rev_u32(u32 v, int len)
+{
+ return bitrev32(v) >> (BITS_PER_TYPE(v) - len);
+}
+
+static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len)
+{
+ int offset;
+ u32 buf[2];
+ u64 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(buf, &rt->gpr[offset], sizeof(buf));
+
+ buf[0] = bitrev8x4(buf[0]);
+ buf[1] = bitrev8x4(buf[1]);
+
+ val = *(u64 *)buf;
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u32(val, len);
+}
+
+static u32 ice_pk_build(struct ice_parser_rt *rt,
+ struct ice_np_keybuilder *kb)
+{
+ if (kb->opc == ICE_NPKB_OPC_EXTRACT)
+ return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1);
+ else if (kb->opc == ICE_NPKB_OPC_BUILD)
+ return rt->gpr[kb->start_reg0] |
+ ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16));
+ else if (kb->opc == ICE_NPKB_OPC_BYPASS)
+ return 0;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n",
+ kb->opc);
+ return U32_MAX;
+}
+
+static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index)
+{
+ int word = index / ICE_GPR_FLG_SIZE;
+ int id = index % ICE_GPR_FLG_SIZE;
+
+ return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id));
+}
+
+static int ice_imem_pgk_init(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (imem->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx);
+ if (imem->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx);
+ if (imem->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx);
+ if (imem->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_imem_alu0_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu0 = &imem->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu1_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu1 = &imem->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_alu2_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->alu2 = &imem->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n",
+ imem->idx);
+}
+
+static void ice_imem_pgp_set(struct ice_parser_rt *rt,
+ struct ice_imem_item *imem)
+{
+ rt->pg_prio = imem->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n",
+ rt->pg_prio, imem->idx);
+}
+
+static int ice_bst_pgk_init(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ memset(&rt->pg_key, 0, sizeof(rt->pg_key));
+ rt->pg_key.boost_idx = bst->hit_idx_grp;
+ rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb);
+ if (rt->pg_key.next_proto == U32_MAX)
+ return -EINVAL;
+
+ if (bst->pg_kb.flag0_ena)
+ rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx);
+ if (bst->pg_kb.flag1_ena)
+ rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx);
+ if (bst->pg_kb.flag2_ena)
+ rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx);
+ if (bst->pg_kb.flag3_ena)
+ rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx);
+
+ rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx];
+ rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n",
+ rt->pg_key.node_id,
+ rt->pg_key.flag0,
+ rt->pg_key.flag1,
+ rt->pg_key.flag2,
+ rt->pg_key.flag3,
+ rt->pg_key.boost_idx,
+ rt->pg_key.alu_reg,
+ rt->pg_key.next_proto);
+
+ return 0;
+}
+
+static void ice_bst_alu0_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu0 = &bst->alu0;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu1_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu1 = &bst->alu1;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_alu2_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->alu2 = &bst->alu2;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n",
+ bst->addr);
+}
+
+static void ice_bst_pgp_set(struct ice_parser_rt *rt,
+ struct ice_bst_tcam_item *bst)
+{
+ rt->pg_prio = bst->pg_prio;
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n",
+ rt->pg_prio, bst->addr);
+}
+
+static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *item;
+
+ item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ if (!item)
+ item = ice_pg_cam_match(psr->pg_sp_cam_table,
+ ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key);
+ return item;
+}
+
+static
+struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_nm_cam_item *item;
+
+ item = ice_pg_nm_cam_match(psr->pg_nm_cam_table,
+ ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key);
+
+ if (!item)
+ item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table,
+ ICE_PG_NM_SP_CAM_TABLE_SIZE,
+ &rt->pg_key);
+ return item;
+}
+
+static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val)
+{
+ rt->pu.gpr_val_upd[idx] = true;
+ rt->pu.gpr_val[idx] = val;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n",
+ idx, val);
+}
+
+static void ice_pg_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n");
+
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc);
+ ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n");
+}
+
+static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.flg_msk |= BIT_ULL(idx);
+ if (val)
+ rt->pu.flg_val |= BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n",
+ idx, val);
+}
+
+static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u32 hv_bit_sel;
+ int i;
+
+ if (!alu->dedicate_flags_ena)
+ return;
+
+ if (alu->flags_extr_imm) {
+ for (i = 0; i < alu->dst_len; i++)
+ ice_flg_add(rt, alu->dst_start + i,
+ !!(alu->flags_start_imm & BIT(i)));
+ } else {
+ for (i = 0; i < alu->dst_len; i++) {
+ hv_bit_sel = ice_hv_bit_sel(rt,
+ alu->flags_start_imm + i,
+ 1);
+ ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel);
+ }
+ }
+}
+
+static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB)
+ rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset);
+ else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN)
+ rt->po = rt->gpr[ICE_GPR_HO_IDX];
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n",
+ rt->po);
+}
+
+static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx,
+ int start, int len)
+{
+ int offset;
+ u32 val;
+
+ offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16));
+
+ memcpy(&val, &rt->gpr[offset], sizeof(val));
+
+ val = bitrev8x4(val);
+ val >>= start % BITS_PER_TYPE(u16);
+
+ return ice_bit_rev_u16(val, len);
+}
+
+static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val)
+{
+ rt->pu.err_msk |= (u16)BIT(idx);
+ if (val)
+ rt->pu.flg_val |= (u64)BIT_ULL(idx);
+ else
+ rt->pu.flg_val &= ~(u64)BIT_ULL(idx);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n",
+ idx, val);
+}
+
+static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu,
+ bool val)
+{
+ u16 flg_idx;
+
+ if (alu->dedicate_flags_ena) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n",
+ alu->opc);
+ return;
+ }
+
+ if (alu->dst_reg_id == ICE_GPR_ERR_IDX) {
+ if (alu->dst_start >= ICE_PARSER_ERR_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n",
+ alu->dst_start);
+ return;
+ }
+ ice_err_add(rt, alu->dst_start, val);
+ } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) {
+ flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) +
+ alu->dst_start);
+
+ if (flg_idx >= ICE_PARSER_FLG_NUM) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n",
+ flg_idx);
+ return;
+ }
+ ice_flg_add(rt, flg_idx, val);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n",
+ alu->dst_reg_id, alu->dst_start);
+ }
+}
+
+static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu)
+{
+ u16 dst, src, shift, imm;
+
+ if (alu->shift_xlate_sel) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n");
+ return;
+ }
+
+ ice_po_update(rt, alu);
+ ice_flg_update(rt, alu);
+
+ dst = rt->gpr[alu->dst_reg_id];
+ src = ice_reg_bit_sel(rt, alu->src_reg_id,
+ alu->src_start, alu->src_len);
+ shift = alu->shift_xlate_key;
+ imm = alu->imm;
+
+ switch (alu->opc) {
+ case ICE_ALU_PARK:
+ break;
+ case ICE_ALU_MOV_ADD:
+ dst = (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ADD:
+ dst += (src << shift) + imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ case ICE_ALU_ORLT:
+ if (src < imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_OREQ:
+ if (src == imm)
+ ice_dst_reg_bit_set(rt, alu, true);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_SETEQ:
+ ice_dst_reg_bit_set(rt, alu, src == imm);
+ ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr);
+ break;
+ case ICE_ALU_MOV_XOR:
+ dst = (src << shift) ^ imm;
+ ice_gpr_add(rt, alu->dst_reg_id, dst);
+ break;
+ default:
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n",
+ alu->opc);
+ break;
+ }
+}
+
+static void ice_alu0_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n");
+ ice_alu_exe(rt, rt->alu0);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n");
+}
+
+static void ice_alu1_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n");
+ ice_alu_exe(rt, rt->alu1);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n");
+}
+
+static void ice_alu2_exe(struct ice_parser_rt *rt)
+{
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n");
+ ice_alu_exe(rt, rt->alu2);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n");
+}
+
+static void ice_pu_exe(struct ice_parser_rt *rt)
+{
+ struct ice_gpr_pu *pu = &rt->pu;
+ unsigned int i;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n");
+
+ for (i = 0; i < ICE_PARSER_GPR_NUM; i++) {
+ if (pu->gpr_val_upd[i])
+ ice_rt_gpr_set(rt, i, pu->gpr_val[i]);
+ }
+
+ for (i = 0; i < ICE_PARSER_FLG_NUM; i++) {
+ if (pu->flg_msk & BIT(i))
+ ice_rt_flag_set(rt, i, pu->flg_val & BIT(i));
+ }
+
+ for (i = 0; i < ICE_PARSER_ERR_NUM; i++) {
+ if (pu->err_msk & BIT(i))
+ ice_rt_err_set(rt, i, pu->err_val & BIT(i));
+ }
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n");
+}
+
+static void ice_alu_pg_exe(struct ice_parser_rt *rt)
+{
+ memset(&rt->pu, 0, sizeof(rt->pu));
+
+ switch (rt->pg_prio) {
+ case (ICE_PG_P0):
+ ice_pg_exe(rt);
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P1):
+ ice_alu0_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P2):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_pg_exe(rt);
+ ice_alu2_exe(rt);
+ break;
+ case (ICE_PG_P3):
+ ice_alu0_exe(rt);
+ ice_alu1_exe(rt);
+ ice_alu2_exe(rt);
+ ice_pg_exe(rt);
+ break;
+ }
+
+ ice_pu_exe(rt);
+
+ if (rt->action->ho_inc == 0)
+ return;
+
+ if (rt->action->ho_polarity)
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc);
+ else
+ ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc);
+}
+
+static void ice_proto_off_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_pg) {
+ struct ice_proto_grp_item *proto_grp =
+ &psr->proto_grp_table[rt->action->proto_id];
+ u16 po;
+ int i;
+
+ for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) {
+ struct ice_proto_off *entry = &proto_grp->po[i];
+
+ if (entry->proto_id == U8_MAX)
+ break;
+
+ if (!entry->polarity)
+ po = rt->po + entry->offset;
+ else
+ po = rt->po - entry->offset;
+
+ rt->protocols[entry->proto_id] = true;
+ rt->offsets[entry->proto_id] = po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ entry->proto_id, po);
+ }
+ } else {
+ rt->protocols[rt->action->proto_id] = true;
+ rt->offsets[rt->action->proto_id] = rt->po;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n",
+ rt->action->proto_id, rt->po);
+ }
+}
+
+static void ice_marker_set(struct ice_parser_rt *rt, int idx)
+{
+ unsigned int byte = idx / BITS_PER_BYTE;
+ unsigned int bit = idx % BITS_PER_BYTE;
+
+ rt->markers[byte] |= (u8)BIT(bit);
+}
+
+static void ice_marker_update(struct ice_parser_rt *rt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ if (rt->action->is_mg) {
+ struct ice_mk_grp_item *mk_grp =
+ &psr->mk_grp_table[rt->action->marker_id];
+ int i;
+
+ for (i = 0; i < ICE_MARKER_ID_NUM; i++) {
+ u8 marker = mk_grp->markers[i];
+
+ if (marker == ICE_MARKER_MAX_SIZE)
+ break;
+
+ ice_marker_set(rt, marker);
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ marker);
+ }
+ } else {
+ if (rt->action->marker_id != ICE_MARKER_MAX_SIZE)
+ ice_marker_set(rt, rt->action->marker_id);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n",
+ rt->action->marker_id);
+ }
+}
+
+static u16 ice_ptype_resolve(struct ice_parser_rt *rt)
+{
+ struct ice_ptype_mk_tcam_item *item;
+ struct ice_parser *psr = rt->psr;
+
+ item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table,
+ rt->markers, ICE_MARKER_ID_SIZE);
+ if (item)
+ return item->ptype;
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n");
+ return U16_MAX;
+}
+
+static void ice_proto_off_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ int i;
+
+ for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) {
+ if (rt->protocols[i]) {
+ rslt->po[rslt->po_num].proto_id = (u8)i;
+ rslt->po[rslt->po_num].offset = rt->offsets[i];
+ rslt->po_num++;
+ }
+ }
+}
+
+static void ice_result_resolve(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_parser *psr = rt->psr;
+
+ memset(rslt, 0, sizeof(*rslt));
+
+ memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX],
+ ICE_PARSER_FLAG_PSR_SIZE);
+ rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr);
+ rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt);
+ rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt);
+ rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt);
+
+ ice_proto_off_resolve(rt, rslt);
+ rslt->ptype = ice_ptype_resolve(rt);
+}
+
+/**
+ * ice_parser_rt_execute - parser execution routine
+ * @rt: pointer to the parser runtime
+ * @rslt: input/output parameter to save parser result
+ *
+ * Return: 0 on success or errno.
+ */
+int ice_parser_rt_execute(struct ice_parser_rt *rt,
+ struct ice_parser_result *rslt)
+{
+ struct ice_pg_nm_cam_item *pg_nm_cam;
+ struct ice_parser *psr = rt->psr;
+ struct ice_pg_cam_item *pg_cam;
+ int status = 0;
+ u16 node;
+ u16 pc;
+
+ node = rt->gpr[ICE_GPR_NN_IDX];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node);
+
+ while (true) {
+ struct ice_bst_tcam_item *bst;
+ struct ice_imem_item *imem;
+
+ pc = rt->gpr[ICE_GPR_NP_IDX];
+ imem = &psr->imem_table[pc];
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n",
+ pc);
+
+ ice_bst_key_init(rt, imem);
+ bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key);
+ if (!bst) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n");
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_alu0_set(rt, imem);
+ ice_imem_alu1_set(rt, imem);
+ ice_imem_alu2_set(rt, imem);
+ ice_imem_pgp_set(rt, imem);
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n",
+ bst->addr);
+ if (imem->b_m.pg) {
+ status = ice_bst_pgk_init(rt, bst);
+ if (status)
+ break;
+ ice_bst_pgp_set(rt, bst);
+ } else {
+ status = ice_imem_pgk_init(rt, imem);
+ if (status)
+ break;
+ ice_imem_pgp_set(rt, imem);
+ }
+
+ if (imem->b_m.alu0)
+ ice_bst_alu0_set(rt, bst);
+ else
+ ice_imem_alu0_set(rt, imem);
+
+ if (imem->b_m.alu1)
+ ice_bst_alu1_set(rt, bst);
+ else
+ ice_imem_alu1_set(rt, imem);
+
+ if (imem->b_m.alu2)
+ ice_bst_alu2_set(rt, bst);
+ else
+ ice_imem_alu2_set(rt, imem);
+ }
+
+ rt->action = NULL;
+ pg_cam = ice_rt_pg_cam_match(rt);
+ if (!pg_cam) {
+ pg_nm_cam = ice_rt_pg_nm_cam_match(rt);
+ if (pg_nm_cam) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n",
+ pg_nm_cam->idx);
+ rt->action = &pg_nm_cam->action;
+ }
+ } else {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n",
+ pg_cam->idx);
+ rt->action = &pg_cam->action;
+ }
+
+ if (!rt->action) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n");
+ status = -EINVAL;
+ break;
+ }
+
+ ice_alu_pg_exe(rt);
+ ice_marker_update(rt);
+ ice_proto_off_update(rt);
+
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n",
+ rt->action->next_node);
+
+ if (rt->action->is_last_round) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n");
+ break;
+ }
+
+ if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) {
+ ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n",
+ rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len);
+ break;
+ }
+ }
+
+ ice_result_resolve(rt, rslt);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ecf8f5d60292..6ca13c5dcb14 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi,
if (!root)
return -ENOMEM;
- /* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
- sizeof(*root), GFP_KERNEL);
+ sizeof(*root->children), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
return -ENOMEM;
@@ -186,10 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!node)
return -ENOMEM;
if (hw->max_children[layer]) {
- /* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
hw->max_children[layer],
- sizeof(*node), GFP_KERNEL);
+ sizeof(*node->children), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 96037bef3e78..b9e443232335 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -61,6 +61,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
ICE_DBG_AQ_DESC | \
ICE_DBG_AQ_DESC_BUF | \
ICE_DBG_AQ_CMD)
+#define ICE_DBG_PARSER BIT_ULL(28)
#define ICE_DBG_USER BIT_ULL(31)
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index fec16919ec19..be4266899690 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -12,6 +12,7 @@
#include <net/devlink.h>
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
+#include "ice_flow.h"
#include "ice_virtchnl_fdir.h"
#include "ice_vsi_vlan_ops.h"
@@ -52,6 +53,12 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+/* Structure to store fdir fv entry */
+struct ice_fdir_prof_info {
+ struct ice_parser_profile prof;
+ u64 fdir_active_cnt;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -91,6 +98,7 @@ struct ice_vf {
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
+ struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1c6ce0c4ed4e..59f62306b9cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -461,6 +461,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 &&
+ vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32;
+
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index b4feb0927687..14e3f0f89c78 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type {
ICE_FDIR_TUNNEL_TYPE_NONE = 0,
ICE_FDIR_TUNNEL_TYPE_GTPU,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
+ ICE_FDIR_TUNNEL_TYPE_ECPRI,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE,
+ ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_GRE_INNER,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2,
+ ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER,
};
struct virtchnl_fdir_fltr_conf {
@@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf {
enum ice_fdir_tunnel_type ttype;
u64 inset_flag;
u32 flow_id;
+
+ struct ice_parser_profile *prof;
+ bool parser_ena;
+ u8 *pkt_buf;
+ u8 pkt_len;
};
struct virtchnl_fdir_inset_map {
@@ -787,6 +801,107 @@ err_exit:
}
/**
+ * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary)
+ * @proto: virtchnl protocol headers
+ *
+ * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note
+ * that common FDIR rule must have non-zero proto->count. Thus, we choose the
+ * tunnel_level and count of proto as the indicators. If both tunnel_level and
+ * count of proto are zero, this FDIR rule will be regarded as raw flow.
+ *
+ * Returns: true if headers describe raw flow, false otherwise.
+ */
+static bool
+ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto)
+{
+ return (proto->tunnel_level == 0 && proto->count == 0);
+}
+
+/**
+ * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ * @conf: FDIR configuration for each filter
+ *
+ * Parse the virtual channel filter's raw flow and store it in @conf
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_fdir_parse_raw(struct ice_vf *vf,
+ struct virtchnl_proto_hdrs *proto,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ u8 *pkt_buf, *msk_buf __free(kfree);
+ struct ice_parser_result rslt;
+ struct ice_pf *pf = vf->pf;
+ struct ice_parser *psr;
+ int status = -ENOMEM;
+ struct ice_hw *hw;
+ u16 udp_port = 0;
+
+ pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ if (!pkt_buf || !msk_buf)
+ goto err_mem_alloc;
+
+ memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
+ memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+
+ hw = &pf->hw;
+
+ /* Get raw profile info via Parser Lib */
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ status = PTR_ERR(psr);
+ goto err_mem_alloc;
+ }
+
+ ice_parser_dvm_set(psr, ice_is_dvm_ena(hw));
+
+ if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
+ ice_parser_vxlan_tunnel_set(psr, udp_port, true);
+
+ status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ if (status)
+ goto err_parser_destroy;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_result_dump(hw, &rslt);
+
+ conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL);
+ if (!conf->prof) {
+ status = -ENOMEM;
+ goto err_parser_destroy;
+ }
+
+ status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
+ proto->raw.pkt_len, ICE_BLK_FD,
+ conf->prof);
+ if (status)
+ goto err_parser_profile_init;
+
+ if (hw->debug_mask & ICE_DBG_PARSER)
+ ice_parser_profile_dump(hw, conf->prof);
+
+ /* Store raw flow info into @conf */
+ conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_buf = pkt_buf;
+ conf->parser_ena = true;
+
+ ice_parser_destroy(psr);
+ return 0;
+
+err_parser_profile_init:
+ kfree(conf->prof);
+err_parser_destroy:
+ ice_parser_destroy(psr);
+err_mem_alloc:
+ kfree(pkt_buf);
+ return status;
+}
+
+/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -813,6 +928,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
return -EINVAL;
}
+ /* For raw FDIR filters created by the parser */
+ if (ice_vc_fdir_is_raw_flow(proto))
+ return ice_vc_fdir_parse_raw(vf, proto, conf);
+
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
struct ip_esp_hdr *esph;
@@ -1101,8 +1220,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- if (!ice_vc_validate_pattern(vf, proto))
- return -EINVAL;
+ /* For raw FDIR filters created by the parser */
+ if (!ice_vc_fdir_is_raw_flow(proto))
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1295,11 +1416,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (ret) {
- dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
- vf->vf_id, input->flow_type);
- goto err_free_pkt;
+ if (conf->parser_ena) {
+ memcpy(pkt, conf->pkt_buf, conf->pkt_len);
+ } else {
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (ret) {
+ dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
+ vf->vf_id, input->flow_type);
+ goto err_free_pkt;
+ }
}
ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
@@ -1521,6 +1646,16 @@ err_exit:
return ret;
}
+static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype)
+{
+ return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI ||
+ ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER);
+}
+
/**
* ice_vc_add_fdir_fltr_post
* @vf: pointer to the VF structure
@@ -1782,6 +1917,158 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
}
/**
+ * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context
+ * @fv_a: struct of parsed FDIR profile field vector
+ * @fv_b: struct of parsed FDIR profile field vector
+ *
+ * Check if the two parsed FDIR profile field vector context are different,
+ * including proto_id, offset and mask.
+ *
+ * Return: true on different, false on otherwise.
+ */
+static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a,
+ struct ice_parser_fv *fv_b)
+{
+ return (fv_a->proto_id != fv_b->proto_id ||
+ fv_a->offset != fv_b->offset ||
+ fv_a->msk != fv_b->msk);
+}
+
+/**
+ * ice_vc_parser_fv_save - save parsed FDIR profile fv context
+ * @fv: struct of parsed FDIR profile field vector
+ * @fv_src: parsed FDIR profile field vector context to save
+ *
+ * Save the parsed FDIR profile field vector context, including proto_id,
+ * offset and mask.
+ *
+ * Return: Void.
+ */
+static void ice_vc_parser_fv_save(struct ice_parser_fv *fv,
+ struct ice_parser_fv *fv_src)
+{
+ fv->proto_id = fv_src->proto_id;
+ fv->offset = fv_src->offset;
+ fv->msk = fv_src->msk;
+ fv->spec = 0;
+}
+
+/**
+ * ice_vc_add_fdir_raw - add a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_add_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_add *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ int ret, ptg, id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+ bool fv_found;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ fv_found = false;
+
+ /* Check if profile info already exists, then update the counter */
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++)
+ if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i],
+ &conf->prof->fv[i]))
+ break;
+ if (i == ICE_MAX_FV_WORDS) {
+ fv_found = true;
+ pi->fdir_active_cnt++;
+ }
+ }
+
+ /* HW profile setting is only required for the first time */
+ if (!fv_found) {
+ ret = ice_flow_set_parser_prof(hw, vf_vsi->idx,
+ ctrl_vsi->idx, conf->prof,
+ ICE_BLK_FD);
+
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert hw prof failed\n",
+ vf->vf_id);
+ return ret;
+ }
+ }
+
+ ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
+ if (ret) {
+ *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ dev_dbg(dev, "VF %d: insert FDIR list failed\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ ret = ice_vc_fdir_set_irq_ctx(vf, conf,
+ VIRTCHNL_OP_ADD_FDIR_FILTER);
+ if (ret) {
+ dev_dbg(dev, "VF %d: set FDIR context failed\n",
+ vf->vf_id);
+ goto err_rem_entry;
+ }
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, true, false);
+ if (ret) {
+ dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n",
+ vf->vf_id, ret);
+ goto err_clr_irq;
+ }
+
+ /* Save parsed profile fv info of the FDIR rule for the first time */
+ if (!fv_found) {
+ for (i = 0; i < conf->prof->fv_num; i++)
+ ice_vc_parser_fv_save(&pi->prof.fv[i],
+ &conf->prof->fv[i]);
+ pi->prof.fv_num = conf->prof->fv_num;
+ pi->fdir_active_cnt = 1;
+ }
+
+ return 0;
+
+err_clr_irq:
+ ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
+ ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ return ret;
+}
+
+/**
* ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1846,7 +2133,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
len = sizeof(*stat);
ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
if (ret) {
- v_ret = VIRTCHNL_STATUS_SUCCESS;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
goto err_free_conf;
@@ -1861,6 +2148,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_free_conf;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1922,6 +2218,78 @@ err_exit:
}
/**
+ * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF
+ * @vf: pointer to the VF info
+ * @conf: FDIR configuration for each filter
+ * @v_ret: the final VIRTCHNL code
+ * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER
+ * @len: length of the stat
+ *
+ * Return: 0 on success or negative errno on failure.
+ */
+static int
+ice_vc_del_fdir_raw(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf,
+ enum virtchnl_status_code *v_ret,
+ struct virtchnl_fdir_del *stat, int len)
+{
+ struct ice_vsi *vf_vsi, *ctrl_vsi;
+ enum ice_block blk = ICE_BLK_FD;
+ struct ice_fdir_prof_info *pi;
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+ struct ice_hw *hw;
+ unsigned long id;
+ u16 vsi_num;
+ int ptg;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ *v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+
+ id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX);
+ ptg = hw->blk[ICE_BLK_FD].xlt1.t[id];
+
+ ret = ice_vc_fdir_write_fltr(vf, conf, false, false);
+ if (ret) {
+ dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n",
+ vf->vf_id, ret);
+ return ret;
+ }
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ return -ENODEV;
+ }
+
+ ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
+ if (!ctrl_vsi) {
+ dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n",
+ vf->vf_id);
+ return -ENODEV;
+ }
+
+ pi = &vf->fdir_prof_info[ptg];
+ if (pi->fdir_active_cnt != 0) {
+ pi->fdir_active_cnt--;
+ /* Remove the profile id flow if no active FDIR rule left */
+ if (!pi->fdir_active_cnt) {
+ vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+
+ vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
+ ice_rem_prof_id_flow(hw, blk, vsi_num, id);
+ }
+ }
+
+ conf->parser_ena = false;
+ return 0;
+}
+
+/**
* ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1933,7 +2301,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
struct virtchnl_fdir_del *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
+ struct ice_vf_fdir *fdir = &vf->fdir;
enum virtchnl_status_code v_ret;
+ struct ice_fdir_fltr *input;
+ enum ice_fltr_ptype flow;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1983,6 +2354,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_exit;
}
+ /* For raw FDIR filters created by the parser */
+ if (conf->parser_ena) {
+ ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len);
+ if (ret)
+ goto err_del_tmr;
+ goto exit;
+ }
+
+ is_tun = ice_fdir_is_tunnel(conf->ttype);
ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
@@ -1992,6 +2372,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
goto err_del_tmr;
}
+ /* Remove unused profiles to avoid unexpected behaviors */
+ input = &conf->input;
+ flow = input->flow_type;
+ if (fdir->fdir_fltr_cnt[flow][is_tun] == 1)
+ ice_vc_fdir_rem_prof(vf, flow, is_tun);
+
+exit:
kfree(stat);
return ret;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 0b6c8fd5bc90..4f20343e49a9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -357,24 +357,11 @@ int idpf_intr_req(struct idpf_adapter *adapter)
goto free_msix;
}
- if (adapter->req_vec_chunks) {
- struct virtchnl2_vector_chunks *vchunks;
- struct virtchnl2_alloc_vectors *ac;
-
- ac = adapter->req_vec_chunks;
- vchunks = &ac->vchunks;
-
- num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
- vchunks);
- if (num_vec_ids < v_actual) {
- err = -EINVAL;
- goto free_vecids;
- }
- } else {
- int i;
-
- for (i = 0; i < v_actual; i++)
- vecids[i] = i;
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ &adapter->req_vec_chunks->vchunks);
+ if (num_vec_ids < v_actual) {
+ err = -EINVAL;
+ goto free_vecids;
}
for (vector = 0; vector < v_actual; vector++) {
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 7b83678ba83a..6ad35a00a287 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -282,7 +282,6 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
-void igbvf_check_options(struct igbvf_adapter *);
void igbvf_set_ethtool_ops(struct net_device *);
int igbvf_up(struct igbvf_adapter *);
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index e5b31818d565..7637d21445bf 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -49,7 +49,6 @@
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
-void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
s32 e1000_init_mbx_params_vf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 9e6984815386..3c289bfe0a09 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -95,7 +95,6 @@ struct ltq_etop_priv {
struct mii_bus *mii_bus;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
- int tx_free[MAX_DMA_CHAN >> 1];
int tx_burst_len;
int rx_burst_len;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 41894834fb53..d72b2d5f96db 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1781,7 +1781,7 @@ static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
}
/* Set TXQ descriptors fields relevant for CSUM calculation */
-static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvneta_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index e809f91c08fb..9e02e4367bec 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1088,7 +1088,7 @@ struct mvpp2 {
unsigned int max_port_rxqs;
/* Workqueue to gather hardware statistics */
- char queue_name[30];
+ char queue_name[31];
struct workqueue_struct *stats_queue;
/* Debugfs root entry */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 40aeaa7bd739..1641791a2d5b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1522,29 +1522,19 @@ static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
return 0;
}
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 port_ctx)
{
u32 rss_ctx;
- int ret, i;
+ int ret;
ret = mvpp22_rss_context_create(port, &rss_ctx);
if (ret)
return ret;
- /* Find the first available context number in the port, starting from 1.
- * Context 0 on each port is reserved for the default context.
- */
- for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
- if (port->rss_ctx[i] < 0)
- break;
- }
-
- if (i == MVPP22_N_RSS_TABLES)
+ if (WARN_ON_ONCE(port->rss_ctx[port_ctx] >= 0))
return -EINVAL;
- port->rss_ctx[i] = rss_ctx;
- *port_ctx = i;
-
+ port->rss_ctx[port_ctx] = rss_ctx;
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 663157dc8062..85c9c6e80678 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -264,7 +264,7 @@ int mvpp22_port_rss_init(struct mvpp2_port *port);
int mvpp22_port_rss_enable(struct mvpp2_port *port);
int mvpp22_port_rss_disable(struct mvpp2_port *port);
-int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *rss_ctx);
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 rss_ctx);
int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 0d62a33afa80..2fe8bae4eb3c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5696,40 +5696,82 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
return ret;
}
-static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port,
+ const struct ethtool_rxfh_param *rxfh)
{
- struct mvpp2_port *port = netdev_priv(dev);
- u32 *rss_context = &rxfh->rss_context;
- int ret = 0;
-
if (!mvpp22_rss_is_supported(port))
- return -EOPNOTSUPP;
+ return false;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_CRC32)
- return -EOPNOTSUPP;
+ return false;
if (rxfh->key)
+ return false;
+
+ return true;
+}
+
+static int mvpp2_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
return -EOPNOTSUPP;
- if (*rss_context && rxfh->rss_delete)
- return mvpp22_port_rss_ctx_delete(port, *rss_context);
+ ctx->hfunc = ETH_RSS_HASH_CRC32;
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- ret = mvpp22_port_rss_ctx_create(port, rss_context);
- if (ret)
- return ret;
- }
+ ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context);
+ if (ret)
+ return ret;
- if (rxfh->indir)
- ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
+ if (!rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context,
+ ethtool_rxfh_context_indir(ctx));
+ else
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
rxfh->indir);
+ return ret;
+}
+static int mvpp2_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret = 0;
+
+ if (!mvpp2_ethtool_rxfh_okay(port, rxfh))
+ return -EOPNOTSUPP;
+
+ if (rxfh->indir)
+ ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context,
+ rxfh->indir);
return ret;
}
+static int mvpp2_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_port_rss_ctx_delete(port, rss_context);
+}
+
+static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
+}
+
/* Device ops */
static const struct net_device_ops mvpp2_netdev_ops = {
@@ -5749,7 +5791,7 @@ static const struct net_device_ops mvpp2_netdev_ops = {
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
- .cap_rss_ctx_supported = true,
+ .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
@@ -5772,6 +5814,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .create_rxfh_context = mvpp2_create_rxfh_context,
+ .modify_rxfh_context = mvpp2_modify_rxfh_context,
+ .remove_rxfh_context = mvpp2_remove_rxfh_context,
};
/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
@@ -7417,8 +7462,6 @@ static int mvpp2_get_sram(struct platform_device *pdev,
static int mvpp2_probe(struct platform_device *pdev)
{
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- struct fwnode_handle *port_fwnode;
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
@@ -7591,7 +7634,7 @@ static int mvpp2_probe(struct platform_device *pdev)
}
/* Map DTS-active ports. Should be done before FIFO mvpp2_init */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
priv->port_map |= BIT(i);
}
@@ -7614,7 +7657,7 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
/* Initialize ports */
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ device_for_each_child_node_scoped(&pdev->dev, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
if (err < 0)
goto err_port_probe;
@@ -7653,14 +7696,8 @@ static int mvpp2_probe(struct platform_device *pdev)
return 0;
err_port_probe:
- fwnode_handle_put(port_fwnode);
-
- i = 0;
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i])
- mvpp2_port_remove(priv->port_list[i]);
- i++;
- }
+ for (i = 0; i < priv->port_count; i++)
+ mvpp2_port_remove(priv->port_list[i]);
err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
@@ -7677,18 +7714,13 @@ err_pp_clk:
static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
- struct fwnode_handle *fwnode = pdev->dev.fwnode;
- int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
- struct fwnode_handle *port_fwnode;
+ int i, poolnum = MVPP2_BM_POOLS_NUM;
mvpp2_dbgfs_cleanup(priv);
- fwnode_for_each_available_child_node(fwnode, port_fwnode) {
- if (priv->port_list[i]) {
- mutex_destroy(&priv->port_list[i]->gather_stats_lock);
- mvpp2_port_remove(priv->port_list[i]);
- }
- i++;
+ for (i = 0; i < priv->port_count; i++) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+ mvpp2_port_remove(priv->port_list[i]);
}
destroy_workqueue(priv->stats_queue);
@@ -7711,7 +7743,7 @@ static void mvpp2_remove(struct platform_device *pdev)
aggr_txq->descs_dma);
}
- if (is_acpi_node(port_fwnode))
+ if (!dev_of_node(&pdev->dev))
return;
clk_disable_unprepare(priv->axi_clk);
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 1c5b85a86df1..930f180688e5 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -18,6 +18,7 @@
#include <uapi/linux/ppp_defs.h>
#define AIROHA_MAX_NUM_GDM_PORTS 1
+#define AIROHA_MAX_NUM_QDMA 2
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
@@ -66,9 +67,11 @@
#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
#define FE_RST_CORE_MASK BIT(0)
+#define REG_FE_WAN_MAC_H 0x0030
#define REG_FE_LAN_MAC_H 0x0040
-#define REG_FE_LAN_MAC_LMIN 0x0044
-#define REG_FE_LAN_MAC_LMAX 0x0048
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
#define REG_FE_CDM1_OQ_MAP0 0x0050
#define REG_FE_CDM1_OQ_MAP1 0x0054
@@ -727,7 +730,7 @@ struct airoha_queue_entry {
};
struct airoha_queue {
- struct airoha_eth *eth;
+ struct airoha_qdma *qdma;
/* protect concurrent queue accesses */
spinlock_t lock;
@@ -746,7 +749,7 @@ struct airoha_queue {
};
struct airoha_tx_irq_queue {
- struct airoha_eth *eth;
+ struct airoha_qdma *qdma;
struct napi_struct napi;
u32 *q;
@@ -782,9 +785,30 @@ struct airoha_hw_stats {
u64 rx_len[7];
};
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
struct net_device *dev;
- struct airoha_eth *eth;
int id;
struct airoha_hw_stats stats;
@@ -794,31 +818,15 @@ struct airoha_eth {
struct device *dev;
unsigned long state;
-
- void __iomem *qdma_regs;
void __iomem *fe_regs;
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
-
struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-
struct net_device *napi_dev;
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
};
static u32 airoha_rr(void __iomem *base, u32 offset)
@@ -850,60 +858,72 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
#define airoha_fe_clear(eth, offset, val) \
airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-#define airoha_qdma_rr(eth, offset) \
- airoha_rr((eth)->qdma_regs, (offset))
-#define airoha_qdma_wr(eth, offset, val) \
- airoha_wr((eth)->qdma_regs, (offset), (val))
-#define airoha_qdma_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
-#define airoha_qdma_set(eth, offset, val) \
- airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
-#define airoha_qdma_clear(eth, offset, val) \
- airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
-
-static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
{
unsigned long flags;
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
return;
- spin_lock_irqsave(&eth->irq_lock, flags);
+ spin_lock_irqsave(&qdma->irq_lock, flags);
- eth->irqmask[index] &= ~clear;
- eth->irqmask[index] |= set;
- airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
+ qdma->irqmask[index] &= ~clear;
+ qdma->irqmask[index] |= set;
+ airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
/* Read irq_enable register in order to guarantee the update above
* completes in the spinlock critical section.
*/
- airoha_qdma_rr(eth, REG_INT_ENABLE(index));
+ airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
- spin_unlock_irqrestore(&eth->irq_lock, flags);
+ spin_unlock_irqrestore(&qdma->irq_lock, flags);
}
-static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
+static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
u32 mask)
{
- airoha_qdma_set_irqmask(eth, index, 0, mask);
+ airoha_qdma_set_irqmask(qdma, index, 0, mask);
}
-static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
+static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
u32 mask)
{
- airoha_qdma_set_irqmask(eth, index, mask, 0);
+ airoha_qdma_set_irqmask(qdma, index, mask, 0);
}
-static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
+static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
{
- u32 val;
+ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+ * GDM{2,3,4} can be used as wan port connected to an external
+ * phy module.
+ */
+ return port->id == 1;
+}
+static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 val, reg;
+
+ reg = airhoa_is_lan_gdm_port(port) ? REG_FE_LAN_MAC_H
+ : REG_FE_WAN_MAC_H;
val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
- airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
+ airoha_fe_wr(eth, reg, val);
val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
- airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
}
static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
@@ -1383,8 +1403,9 @@ static int airoha_fe_init(struct airoha_eth *eth)
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
{
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
- struct airoha_eth *eth = q->eth;
- int qid = q - &eth->q_rx[0];
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
int nframes = 0;
while (q->queued < q->ndesc - 1) {
@@ -1420,7 +1441,8 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
WRITE_ONCE(desc->msg2, 0);
WRITE_ONCE(desc->msg3, 0);
- airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid),
+ RX_RING_CPU_IDX_MASK,
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
}
@@ -1450,8 +1472,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
{
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
- struct airoha_eth *eth = q->eth;
- int qid = q - &eth->q_rx[0];
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0];
int done = 0;
while (done < budget) {
@@ -1513,7 +1536,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
- struct airoha_eth *eth = q->eth;
int cur, done = 0;
do {
@@ -1522,14 +1544,14 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
} while (cur && done < budget);
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
+ airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
return done;
}
-static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
- struct airoha_queue *q, int ndesc)
+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
{
const struct page_pool_params pp_params = {
.order = 0,
@@ -1538,15 +1560,16 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
.dma_dir = DMA_FROM_DEVICE,
.max_len = PAGE_SIZE,
.nid = NUMA_NO_NODE,
- .dev = eth->dev,
+ .dev = qdma->eth->dev,
.napi = &q->napi,
};
- int qid = q - &eth->q_rx[0], thr;
+ struct airoha_eth *eth = qdma->eth;
+ int qid = q - &qdma->q_rx[0], thr;
dma_addr_t dma_addr;
q->buf_size = PAGE_SIZE / 2;
q->ndesc = ndesc;
- q->eth = eth;
+ q->qdma = qdma;
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
GFP_KERNEL);
@@ -1568,14 +1591,15 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
- airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
+ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
+ RX_RING_SIZE_MASK,
FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
thr = clamp(ndesc >> 3, 1, 32);
- airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
FIELD_PREP(RX_RING_THR_MASK, thr));
- airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
airoha_qdma_fill_rx_queue(q);
@@ -1585,7 +1609,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->eth;
+ struct airoha_eth *eth = q->qdma->eth;
while (q->queued) {
struct airoha_queue_entry *e = &q->entry[q->tail];
@@ -1599,11 +1623,11 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
}
}
-static int airoha_qdma_init_rx(struct airoha_eth *eth)
+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
int err;
if (!(RX_DONE_INT_MASK & BIT(i))) {
@@ -1611,7 +1635,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth)
continue;
}
- err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
+ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
RX_DSCP_NUM(i));
if (err)
return err;
@@ -1623,12 +1647,14 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth)
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
+ struct airoha_qdma *qdma;
struct airoha_eth *eth;
int id, done = 0;
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
- eth = irq_q->eth;
- id = irq_q - &eth->q_tx_irq[0];
+ qdma = irq_q->qdma;
+ id = irq_q - &qdma->q_tx_irq[0];
+ eth = qdma->eth;
while (irq_q->queued > 0 && done < budget) {
u32 qid, last, val = irq_q->q[irq_q->head];
@@ -1645,10 +1671,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
- if (qid >= ARRAY_SIZE(eth->q_tx))
+ if (qid >= ARRAY_SIZE(qdma->q_tx))
continue;
- q = &eth->q_tx[qid];
+ q = &qdma->q_tx[qid];
if (!q->ndesc)
continue;
@@ -1697,28 +1723,29 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
int i, len = done >> 7;
for (i = 0; i < len; i++)
- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
IRQ_CLEAR_LEN_MASK, 0x80);
- airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id),
IRQ_CLEAR_LEN_MASK, (done & 0x7f));
}
if (done < budget && napi_complete(napi))
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(id));
return done;
}
-static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
- struct airoha_queue *q, int size)
+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
{
- int i, qid = q - &eth->q_tx[0];
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
dma_addr_t dma_addr;
spin_lock_init(&q->lock);
q->ndesc = size;
- q->eth = eth;
+ q->qdma = qdma;
q->free_thr = 1 + MAX_SKB_FRAGS;
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
@@ -1738,20 +1765,20 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
- airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
- airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
return 0;
}
-static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
- struct airoha_tx_irq_queue *irq_q,
- int size)
+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
{
- int id = irq_q - &eth->q_tx_irq[0];
+ int id = irq_q - &qdma->q_tx_irq[0];
+ struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
@@ -1763,30 +1790,30 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
memset(irq_q->q, 0xff, size * sizeof(u32));
irq_q->size = size;
- irq_q->eth = eth;
+ irq_q->qdma = qdma;
- airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
- airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
FIELD_PREP(TX_IRQ_THR_MASK, 1));
return 0;
}
-static int airoha_qdma_init_tx(struct airoha_eth *eth)
+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
{
int i, err;
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
IRQ_QUEUE_LEN(i));
if (err)
return err;
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
TX_DSCP_NUM);
if (err)
return err;
@@ -1797,7 +1824,7 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth)
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->eth;
+ struct airoha_eth *eth = q->qdma->eth;
spin_lock_bh(&q->lock);
while (q->queued) {
@@ -1814,34 +1841,35 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
spin_unlock_bh(&q->lock);
}
-static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
u32 status;
int size;
size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!eth->hfwd.desc)
+ qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!qdma->hfwd.desc)
return -ENOMEM;
- airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
- eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
- GFP_KERNEL);
- if (!eth->hfwd.q)
+ qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!qdma->hfwd.q)
return -ENOMEM;
- airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
- airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
- airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
- airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
@@ -1849,87 +1877,87 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
return read_poll_timeout(airoha_qdma_rr, status,
!(status & LMGR_INIT_START), USEC_PER_MSEC,
- 30 * USEC_PER_MSEC, true, eth,
+ 30 * USEC_PER_MSEC, true, qdma,
REG_LMGR_INIT_CFG);
}
-static void airoha_qdma_init_qos(struct airoha_eth *eth)
+static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
{
- airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
- airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+ airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
- airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
+ airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG,
PSE_BUF_ESTIMATE_EN_MASK);
- airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_EN_MASK |
EGRESS_RATE_METER_EQ_RATE_EN_MASK);
/* 2047us x 31 = 63.457ms */
- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_WINDOW_SZ_MASK,
FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
- airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG,
EGRESS_RATE_METER_TIMESLICE_MASK,
FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
/* ratelimit init */
- airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
/* fast-tick 25us */
- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
FIELD_PREP(GLB_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
- airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
+ airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG,
EGRESS_SLOW_TICK_RATIO_MASK,
FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
- airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
- airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
+ airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG,
INGRESS_TRTCM_MODE_MASK);
- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
- airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
+ airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG,
INGRESS_SLOW_TICK_RATIO_MASK,
FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
- airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
FIELD_PREP(SLA_FAST_TICK_MASK, 25));
- airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
-static int airoha_qdma_hw_init(struct airoha_eth *eth)
+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
/* clear pending irqs */
- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
- airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
+ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
/* setup irqs */
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
- airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+ airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
/* setup irq binding */
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- if (!eth->q_tx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
continue;
if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
- airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
else
- airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
+ airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
}
- airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
@@ -1940,18 +1968,18 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth)
GLOBAL_CFG_TX_WB_DONE_MASK |
FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
- airoha_qdma_init_qos(eth);
+ airoha_qdma_init_qos(qdma);
/* disable qdma rx delay interrupt */
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
RX_DELAY_INT_MASK);
}
- airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
+ airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
return 0;
@@ -1959,150 +1987,180 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth)
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
{
- struct airoha_eth *eth = dev_instance;
- u32 intr[ARRAY_SIZE(eth->irqmask)];
+ struct airoha_qdma *qdma = dev_instance;
+ u32 intr[ARRAY_SIZE(qdma->irqmask)];
int i;
- for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
- intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
- intr[i] &= eth->irqmask[i];
- airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
+ for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+ intr[i] &= qdma->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
- if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
+ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
if (intr[1] & RX_DONE_INT_MASK) {
- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
+ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
RX_DONE_INT_MASK);
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
if (intr[1] & BIT(i))
- napi_schedule(&eth->q_rx[i].napi);
+ napi_schedule(&qdma->q_rx[i].napi);
}
}
if (intr[0] & INT_TX_MASK) {
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
u32 status, head;
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
- airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
+ airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
- status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
irq_q->head = head % irq_q->size;
irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
- napi_schedule(&eth->q_tx_irq[i].napi);
+ napi_schedule(&qdma->q_tx_irq[i].napi);
}
}
return IRQ_HANDLED;
}
-static int airoha_qdma_init(struct airoha_eth *eth)
+static int airoha_qdma_init(struct platform_device *pdev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
{
- int err;
+ int err, id = qdma - &eth->qdma[0];
+ const char *res;
- err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, eth);
- if (err)
- return err;
+ spin_lock_init(&qdma->irq_lock);
+ qdma->eth = eth;
- err = airoha_qdma_init_rx(eth);
+ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
+ if (!res)
+ return -ENOMEM;
+
+ qdma->regs = devm_platform_ioremap_resource_byname(pdev, res);
+ if (IS_ERR(qdma->regs))
+ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
+ "failed to iomap qdma%d regs\n", id);
+
+ qdma->irq = platform_get_irq(pdev, 4 * id);
+ if (qdma->irq < 0)
+ return qdma->irq;
+
+ err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, qdma);
if (err)
return err;
- err = airoha_qdma_init_tx(eth);
+ err = airoha_qdma_init_rx(qdma);
if (err)
return err;
- err = airoha_qdma_init_hfwd_queues(eth);
+ err = airoha_qdma_init_tx(qdma);
if (err)
return err;
- err = airoha_qdma_hw_init(eth);
+ err = airoha_qdma_init_hfwd_queues(qdma);
if (err)
return err;
- set_bit(DEV_STATE_INITIALIZED, &eth->state);
-
- return 0;
+ return airoha_qdma_hw_init(qdma);
}
-static int airoha_hw_init(struct airoha_eth *eth)
+static int airoha_hw_init(struct platform_device *pdev,
+ struct airoha_eth *eth)
{
- int err;
+ int err, i;
/* disable xsi */
- reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts),
+ eth->xsi_rsts);
+ if (err)
+ return err;
+
+ err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
- reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
- msleep(20);
- reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
msleep(20);
+ err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ if (err)
+ return err;
+ msleep(20);
err = airoha_fe_init(eth);
if (err)
return err;
- return airoha_qdma_init(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ err = airoha_qdma_init(pdev, eth, &eth->qdma[i]);
+ if (err)
+ return err;
+ }
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
}
-static void airoha_hw_cleanup(struct airoha_eth *eth)
+static void airoha_hw_cleanup(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&eth->q_rx[i].napi);
- netif_napi_del(&eth->q_rx[i].napi);
- airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
- if (eth->q_rx[i].page_pool)
- page_pool_destroy(eth->q_rx[i].page_pool);
+ napi_disable(&qdma->q_rx[i].napi);
+ netif_napi_del(&qdma->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
+ if (qdma->q_rx[i].page_pool)
+ page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
- napi_disable(&eth->q_tx_irq[i].napi);
- netif_napi_del(&eth->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ napi_disable(&qdma->q_tx_irq[i].napi);
+ netif_napi_del(&qdma->q_tx_irq[i].napi);
}
- for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
- if (!eth->q_tx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
continue;
- airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
}
}
-static void airoha_qdma_start_napi(struct airoha_eth *eth)
+static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
{
int i;
- for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
- napi_enable(&eth->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_enable(&qdma->q_tx_irq[i].napi);
- for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
- if (!eth->q_rx[i].ndesc)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
continue;
- napi_enable(&eth->q_rx[i].napi);
+ napi_enable(&qdma->q_rx[i].napi);
}
}
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
- struct airoha_eth *eth = port->eth;
+ struct airoha_eth *eth = port->qdma->eth;
u32 val, i = 0;
spin_lock(&port->stats.lock);
@@ -2247,23 +2305,24 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(eth, true);
+ err = airoha_set_gdm_ports(qdma->eth, true);
if (err)
return err;
if (netdev_uses_dsa(dev))
- airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
+ airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
else
- airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
+ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
- airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
return 0;
}
@@ -2271,16 +2330,17 @@ static int airoha_dev_open(struct net_device *dev)
static int airoha_dev_stop(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
int err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(eth, false);
+ err = airoha_set_gdm_ports(qdma->eth, false);
if (err)
return err;
- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
- airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
return 0;
}
@@ -2294,7 +2354,7 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
if (err)
return err;
- airoha_set_macaddr(port->eth, dev->dev_addr);
+ airoha_set_macaddr(port, dev->dev_addr);
return 0;
}
@@ -2303,7 +2363,7 @@ static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- airoha_set_macaddr(port->eth, dev->dev_addr);
+ airoha_set_macaddr(port, dev->dev_addr);
return 0;
}
@@ -2337,7 +2397,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct airoha_gdm_port *port = netdev_priv(dev);
u32 msg0 = 0, msg1, len = skb_headlen(skb);
int i, qid = skb_get_queue_mapping(skb);
- struct airoha_eth *eth = port->eth;
+ struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
@@ -2367,7 +2427,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
- q = &eth->q_tx[qid];
+ q = &qdma->q_tx[qid];
if (WARN_ON_ONCE(!q->ndesc))
goto error;
@@ -2411,7 +2471,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
e->dma_addr = addr;
e->dma_len = len;
- airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid),
+ TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
data = skb_frag_address(frag);
@@ -2448,7 +2509,7 @@ static void airoha_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct airoha_gdm_port *port = netdev_priv(dev);
- struct airoha_eth *eth = port->eth;
+ struct airoha_eth *eth = port->qdma->eth;
strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
@@ -2529,6 +2590,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
+ struct airoha_qdma *qdma;
struct net_device *dev;
int err, index;
u32 id;
@@ -2558,6 +2620,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
return -ENOMEM;
}
+ qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA];
dev->netdev_ops = &airoha_netdev_ops;
dev->ethtool_ops = &airoha_ethtool_ops;
dev->max_mtu = AIROHA_MAX_MTU;
@@ -2567,6 +2630,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
NETIF_F_SG | NETIF_F_TSO;
dev->features |= dev->hw_features;
dev->dev.of_node = np;
+ dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
err = of_get_ethdev_address(np, dev);
@@ -2582,8 +2646,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port = netdev_priv(dev);
u64_stats_init(&port->stats.syncp);
spin_lock_init(&port->stats.lock);
+ port->qdma = qdma;
port->dev = dev;
- port->eth = eth;
port->id = id;
eth->ports[index] = port;
@@ -2613,11 +2677,6 @@ static int airoha_probe(struct platform_device *pdev)
return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
"failed to iomap fe regs\n");
- eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
- if (IS_ERR(eth->qdma_regs))
- return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
- "failed to iomap qdma regs\n");
-
eth->rsts[0].id = "fe";
eth->rsts[1].id = "pdma";
eth->rsts[2].id = "qdma";
@@ -2642,11 +2701,6 @@ static int airoha_probe(struct platform_device *pdev)
return err;
}
- spin_lock_init(&eth->irq_lock);
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0)
- return eth->irq;
-
eth->napi_dev = alloc_netdev_dummy(0);
if (!eth->napi_dev)
return -ENOMEM;
@@ -2656,11 +2710,13 @@ static int airoha_probe(struct platform_device *pdev)
strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
platform_set_drvdata(pdev, eth);
- err = airoha_hw_init(eth);
+ err = airoha_hw_init(pdev, eth);
if (err)
goto error;
- airoha_qdma_start_napi(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_start_napi(&eth->qdma[i]);
+
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -2678,7 +2734,9 @@ static int airoha_probe(struct platform_device *pdev)
return 0;
error:
- airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2696,7 +2754,9 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_hw_cleanup(&eth->qdma[i]);
+
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
@@ -2715,6 +2775,7 @@ static const struct of_device_id of_airoha_match[] = {
{ .compatible = "airoha,en7581-eth" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, of_airoha_match);
static struct platform_driver airoha_driver = {
.probe = airoha_probe,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index eb1708b43aa3..0d5225f1d3ee 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -724,12 +724,8 @@ enum mtk_clks_map {
MTK_CLK_ETHWARP_WOCPU2,
MTK_CLK_ETHWARP_WOCPU1,
MTK_CLK_ETHWARP_WOCPU0,
- MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
- MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
MTK_CLK_TOP_SGM_0_SEL,
MTK_CLK_TOP_SGM_1_SEL,
- MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
- MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
MTK_CLK_TOP_ETH_GMII_SEL,
MTK_CLK_TOP_ETH_REFCK_50M_SEL,
MTK_CLK_TOP_ETH_SYS_200M_SEL,
@@ -800,19 +796,9 @@ enum mtk_clks_map {
BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
BIT_ULL(MTK_CLK_CRYPTO) | \
- BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
- BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
- BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
- BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 0acee405a749..ada852adc5f7 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -8,8 +8,11 @@
#include <linux/platform_device.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+
#include <net/dst_metadata.h>
#include <net/dsa.h>
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
@@ -338,7 +341,6 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
- int i;
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
@@ -359,10 +361,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
return -EINVAL;
}
- for (i = 0; i < 4; i++)
- src[i] = be32_to_cpu(src_addr[i]);
- for (i = 0; i < 4; i++)
- dest[i] = be32_to_cpu(dest_addr[i]);
+ ipv6_addr_be32_to_cpu(src, src_addr);
+ ipv6_addr_be32_to_cpu(dest, dest_addr);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 1a97feca77f2..570ebf91f693 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -3,6 +3,9 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
+
+#include <net/ipv6.h>
+
#include "mtk_eth_soc.h"
struct mtk_flow_addr_info
@@ -47,16 +50,14 @@ static const char *mtk_foe_pkt_type_str(int type)
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
- __be32 n_addr[4];
- int i;
+ __be32 n_addr[IPV6_ADDR_WORDS];
if (!ipv6) {
seq_printf(m, "%pI4h", addr);
return;
}
- for (i = 0; i < ARRAY_SIZE(n_addr); i++)
- n_addr[i] = htonl(addr[i]);
+ ipv6_addr_cpu_to_be32(n_addr, addr);
seq_printf(m, "%pI6", n_addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 943d6918c2ec..cd17a3f4faf8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2036,20 +2036,20 @@ static int mlx4_en_get_module_info(struct net_device *dev,
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d9e241423bc5..da0a1c65ec4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1173,14 +1173,16 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal);
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index bb6b1a979ba1..62b3f7ff5562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -25,6 +25,8 @@ struct mlx5_ct_fs_ops {
struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule);
void (*ct_rule_del)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule);
+ int (*ct_rule_update)(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr);
size_t priv_size;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
index ae4f55be48ce..64a82aafaaca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c
@@ -65,9 +65,30 @@ mlx5_ct_fs_dmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(dmfs_rule);
}
+static int mlx5_ct_fs_dmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_dmfs_rule *dmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_dmfs_rule,
+ fs_rule);
+ struct mlx5e_priv *priv = netdev_priv(fs->netdev);
+ struct mlx5_flow_handle *rule;
+
+ rule = mlx5_tc_rule_insert(priv, spec, attr);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+ mlx5_tc_rule_delete(priv, dmfs_rule->rule, dmfs_rule->attr);
+
+ dmfs_rule->rule = rule;
+ dmfs_rule->attr = attr;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops dmfs_ops = {
.ct_rule_add = mlx5_ct_fs_dmfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_dmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_dmfs_ct_rule_update,
.init = mlx5_ct_fs_dmfs_init,
.destroy = mlx5_ct_fs_dmfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 8c531f4ec912..1c062a2e8996 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -368,9 +368,35 @@ mlx5_ct_fs_smfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_ru
kfree(smfs_rule);
}
+static int mlx5_ct_fs_smfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_smfs_rule *smfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_smfs_rule,
+ fs_rule);
+ struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
+ struct mlx5dr_action *actions[3]; /* We only need to create 3 actions, see below. */
+ struct mlx5dr_rule *rule;
+
+ actions[0] = smfs_rule->count_action;
+ actions[1] = attr->modify_hdr->action.dr_action;
+ actions[2] = fs_smfs->fwd_action;
+
+ rule = mlx5_smfs_rule_create(smfs_rule->smfs_matcher->dr_matcher, spec,
+ ARRAY_SIZE(actions), actions, spec->flow_context.flow_source);
+ if (!rule)
+ return -EINVAL;
+
+ mlx5_smfs_rule_destroy(smfs_rule->rule);
+ smfs_rule->rule = rule;
+
+ return 0;
+}
+
static struct mlx5_ct_fs_ops fs_smfs_ops = {
.ct_rule_add = mlx5_ct_fs_smfs_ct_rule_add,
.ct_rule_del = mlx5_ct_fs_smfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_smfs_ct_rule_update,
.init = mlx5_ct_fs_smfs_init,
.destroy = mlx5_ct_fs_smfs_destroy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 71a168746ebe..dcfccaaa8d91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -876,15 +876,14 @@ err_attr:
}
static int
-mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- bool nat, u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_ct_fs_rule *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -902,29 +901,26 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
- ct_dbg("Failed to create ct entry mod hdr");
+ ct_dbg("Failed to create ct entry mod hdr, err: %d", err);
goto err_mod_hdr;
}
mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
- rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr);
+ if (err) {
+ ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err);
goto err_rule;
}
- ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
- zone_rule->rule = rule;
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
zone_rule->mh = mh;
mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
kfree(old_attr);
kvfree(spec);
- ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+ ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
return 0;
@@ -1141,23 +1137,23 @@ err_orig:
}
static int
-mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
- struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry,
- u8 zone_restore_id)
+mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
{
int err = 0;
if (mlx5_tc_ct_entry_in_ct_table(entry)) {
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
if (err)
return err;
}
if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
if (err && mlx5_tc_ct_entry_in_ct_table(entry))
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
}
@@ -1165,13 +1161,13 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
}
static int
-mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
- struct mlx5_ct_entry *entry, unsigned long cookie)
+mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
{
struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
int err;
- err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
if (!err)
return 0;
@@ -1216,7 +1212,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
- err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
+ err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 6cc23af66b5b..efb34de4cb7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
+ struct mlx5_flow_attr *extra_split_attr;
struct list_head attrs;
u32 chain_mapping;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 797db853de36..53cfa39188cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -127,6 +127,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
attrs->lft.hard_packet_limit);
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->lft.soft_packet_limit != XFRM_INF) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 36845872ae94..4d123dae912c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -83,17 +83,15 @@ struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
- unsigned int i, bit, idx; \
+ unsigned int i; \
cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
- bit = modes[i] % 64; \
- idx = modes[i] / 64; \
- __set_bit(bit, &cfg->supported[idx]); \
- __set_bit(bit, &cfg->advertised[idx]); \
+ bitmap_set(cfg->supported, modes[i], 1); \
+ bitmap_set(cfg->advertised, modes[i], 1); \
} \
})
@@ -354,35 +352,25 @@ static void mlx5e_get_ringparam(struct net_device *dev,
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
- if (param->rx_jumbo_pending) {
- netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
- __func__);
- return -EINVAL;
- }
- if (param->rx_mini_pending) {
- netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
- __func__);
- return -EINVAL;
- }
-
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
- __func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "rx (%d) < min (%d)",
+ param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
- netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
- __func__, param->tx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx (%d) < min (%d)",
+ param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
@@ -418,7 +406,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
@@ -445,7 +433,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
unsigned int count = ch->combined_count;
struct mlx5e_params new_params;
bool arfs_enabled;
- int rss_cnt;
bool opened;
int err = 0;
@@ -499,17 +486,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
- /* Don't allow changing the number of channels if non-default RSS contexts exist,
- * the kernel doesn't protect against set_channels operations that break them.
- */
- rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1;
- if (rss_cnt) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n",
- __func__, rss_cnt);
- goto out;
- }
-
/* Don't allow changing the number of channels if MQPRIO mode channel offload is active,
* because it defines a partition over the channels queues.
*/
@@ -557,12 +533,15 @@ static int mlx5e_set_channels(struct net_device *dev,
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal)
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
- if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
rx_moder = &priv->channels.params.rx_cq_moderation;
coal->rx_coalesce_usecs = rx_moder->usec;
@@ -586,7 +565,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
@@ -708,26 +687,34 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
- !MLX5_CAP_GEN(mdev, cq_period_mode_modify))
+ !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) {
+ NL_SET_ERR_MSG_MOD(extack, "CQ moderation not supported");
return -EOPNOTSUPP;
+ }
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
- netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
- __func__, MLX5E_MAX_COAL_TIME);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce time %lu usecs, tx-usecs (%u) rx-usecs (%u)",
+ MLX5E_MAX_COAL_TIME, coal->tx_coalesce_usecs,
+ coal->rx_coalesce_usecs);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
- netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
- __func__, MLX5E_MAX_COAL_FRAMES);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Max coalesce frames %lu, tx-frames (%u) rx-frames (%u)",
+ MLX5E_MAX_COAL_FRAMES, coal->tx_max_coalesced_frames,
+ coal->rx_max_coalesced_frames);
return -ERANGE;
}
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
- NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ NL_SET_ERR_MSG_MOD(extack, "cqe-mode-rx/tx is not supported on this device");
return -EOPNOTSUPP;
}
@@ -1299,7 +1286,8 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (*ptys2legacy_ethtool_table[i].advertised == 0)
+ if (bitmap_empty(ptys2legacy_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes,
@@ -1313,18 +1301,18 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
- unsigned long modes[2];
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
- if (ptys2ext_ethtool_table[i].advertised[0] == 0 &&
- ptys2ext_ethtool_table[i].advertised[1] == 0)
+ if (bitmap_empty(ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
continue;
- memset(modes, 0, sizeof(modes));
+ bitmap_zero(modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
- if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] &&
- modes[1] == ptys2ext_ethtool_table[i].advertised[1])
+ if (bitmap_equal(modes, ptys2ext_ethtool_table[i].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
return ptys_modes;
@@ -2015,8 +2003,10 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read == -EINVAL)
return -EINVAL;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
- __func__, size_read);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Query module eeprom by page failed, read %u bytes, err %d\n",
+ i, size_read);
return i;
}
@@ -2605,6 +2595,7 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_rss_ctx_supported = true,
+ .rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 8790d57dc6db..b885042eef14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -360,7 +360,7 @@ mlx5e_rep_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5e_rep_get_channels(struct net_device *dev,
@@ -386,7 +386,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 30673292e15f..6b3b1afe8312 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1740,10 +1740,118 @@ has_encap_dests(struct mlx5_flow_attr *attr)
}
static int
+extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ bool int_dest = false, ext_dest = false;
+ struct mlx5_esw_flow_attr *esw_attr;
+ int i;
+
+ if (flow->attr != attr ||
+ !list_is_first(&attr->list, &flow->attrs))
+ return 0;
+
+ if (flow_flag_test(flow, SLOW))
+ return 0;
+
+ esw_attr = attr->esw_attr;
+ if (!esw_attr->split_count ||
+ esw_attr->split_count == esw_attr->out_count - 1)
+ return 0;
+
+ if (esw_attr->dest_int_port &&
+ (esw_attr->dests[esw_attr->split_count].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
+ return esw_attr->split_count + 1;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
+ /* external dest with encap is considered as internal by firmware */
+ if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ ext_dest = true;
+ else
+ int_dest = true;
+
+ if (ext_dest && int_dest)
+ return esw_attr->split_count;
+ }
+
+ return 0;
+}
+
+static int
+extra_split_attr_dests(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr, int split_count)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5e_tc_flow_parse_attr *parse_attr, *parse_attr2;
+ struct mlx5_esw_flow_attr *esw_attr, *esw_attr2;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *attr2;
+ int i, j, err;
+
+ if (IS_ERR(post_act))
+ return PTR_ERR(post_act);
+
+ attr2 = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
+ parse_attr2 = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr2) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ attr2->parse_attr = parse_attr2;
+
+ handle = mlx5e_tc_post_act_add(post_act, attr2);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_free;
+ }
+
+ esw_attr = attr->esw_attr;
+ esw_attr2 = attr2->esw_attr;
+ esw_attr2->in_rep = esw_attr->in_rep;
+
+ parse_attr = attr->parse_attr;
+ parse_attr2->filter_dev = parse_attr->filter_dev;
+
+ for (i = split_count, j = 0; i < esw_attr->out_count; i++, j++)
+ esw_attr2->dests[j] = esw_attr->dests[i];
+
+ esw_attr2->out_count = j;
+ attr2->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ err = mlx5e_tc_post_act_offload(post_act, handle);
+ if (err)
+ goto err_post_act_offload;
+
+ err = mlx5e_tc_post_act_set_handle(flow->priv->mdev, handle,
+ &parse_attr->mod_hdr_acts);
+ if (err)
+ goto err_post_act_set_handle;
+
+ esw_attr->out_count = split_count;
+ attr->extra_split_ft = mlx5e_tc_post_act_get_ft(post_act);
+ flow->extra_split_attr = attr2;
+
+ attr2->post_act_handle = handle;
+
+ return 0;
+
+err_post_act_set_handle:
+ mlx5e_tc_post_act_unoffload(post_act, handle);
+err_post_act_offload:
+ mlx5e_tc_post_act_del(post_act, handle);
+err_free:
+ kvfree(parse_attr2);
+ kfree(attr2);
+ return err;
+}
+
+static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
+ int extra_split;
bool vf_tun;
int err = 0;
@@ -1757,6 +1865,13 @@ post_process_attr(struct mlx5e_tc_flow *flow,
goto err_out;
}
+ extra_split = extra_split_attr_dests_needed(flow, attr);
+ if (extra_split > 0) {
+ err = extra_split_attr_dests(flow, attr, extra_split);
+ if (err)
+ goto err_out;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
@@ -1971,6 +2086,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
+ if (flow->extra_split_attr) {
+ mlx5_free_flow_attr_actions(flow, flow->extra_split_attr);
+ kvfree(flow->extra_split_attr->parse_attr);
+ kfree(flow->extra_split_attr);
+ }
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index c24bda56b2b5..e1b8cb78369f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -86,6 +86,7 @@ struct mlx5_flow_attr {
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
+ struct mlx5_flow_table *extra_split_ft;
u8 inner_match_level;
u8 outer_match_level;
u8 tun_ip_version;
@@ -139,7 +140,7 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
-#define MLX5E_TC_MAX_INT_PORT_NUM (8)
+#define MLX5E_TC_MAX_INT_PORT_NUM (32)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index cb7e7e4104af..f15ecaef1331 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -835,28 +835,9 @@ static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
mlx5_irq_release_vector(irq);
}
-static int mlx5_cpumask_default_spread(int numa_node, int index)
+static int mlx5_cpumask_default_spread(struct mlx5_core_dev *dev, int index)
{
- const struct cpumask *prev = cpu_none_mask;
- const struct cpumask *mask;
- int found_cpu = 0;
- int i = 0;
- int cpu;
-
- rcu_read_lock();
- for_each_numa_hop_mask(mask, numa_node) {
- for_each_cpu_andnot(cpu, mask, prev) {
- if (i++ == index) {
- found_cpu = cpu;
- goto spread_done;
- }
- }
- prev = mask;
- }
-
-spread_done:
- rcu_read_unlock();
- return found_cpu;
+ return cpumask_local_spread(index, dev->priv.numa_node);
}
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
@@ -880,7 +861,7 @@ static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev);
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
+ cpu = mlx5_cpumask_default_spread(dev, vecidx);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -1145,7 +1126,7 @@ int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
if (mask)
cpu = cpumask_first(mask);
else
- cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
+ cpu = mlx5_cpumask_default_spread(dev, vector);
return cpu;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 578466d69f21..f44b4c7ebcfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -887,9 +887,6 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
-void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
-void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
-
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 768199d2255a..f24f91d213f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -613,6 +613,13 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
+ if (attr->extra_split_ft) {
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[*i].ft = attr->extra_split_ft;
+ (*i)++;
+ }
+
out:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index b61b7d966114..76ad46bf477d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -224,6 +224,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
+ mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9180_0x91FF);
}
if (MLX5_CAP_GEN(dev, qcam_reg))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 26f8a11b8906..9772327d5124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -74,7 +74,7 @@ static int mlx5i_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- return mlx5e_ethtool_set_ringparam(priv, param);
+ return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
static void mlx5i_get_ringparam(struct net_device *dev,
@@ -132,7 +132,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0361741632a6..b306ae79bf97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -38,6 +38,10 @@
#include "lib/eq.h"
#include "en.h"
#include "clock.h"
+#ifdef CONFIG_X86
+#include <linux/timekeeping.h>
+#include <linux/cpufeature.h>
+#endif /* CONFIG_X86 */
enum {
MLX5_PIN_MODE_IN = 0x0,
@@ -148,6 +152,87 @@ static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
MLX5_REG_MTUTC, 0, 1);
}
+#ifdef CONFIG_X86
+static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtptm_reg)] = {0};
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG3(dev, mtptm))
+ return false;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPTM,
+ 0, 0);
+ if (err)
+ return false;
+
+ return !!MLX5_GET(mtptm_reg, out, psta);
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 host, device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
+ MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
+ real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
+ 0, 0);
+ if (err)
+ return err;
+
+ if (!MLX5_GET(mtctr_reg, out, first_clock_valid) ||
+ !MLX5_GET(mtctr_reg, out, second_clock_valid))
+ return -EINVAL;
+
+ host = MLX5_GET64(mtctr_reg, out, first_clock_timestamp);
+ *sys_counterval = (struct system_counterval_t) {
+ .cycles = host,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = true,
+ };
+
+ device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+ if (real_time_mode)
+ *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
+ else
+ *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device);
+
+ return 0;
+}
+
+static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev))
+ return -EBUSY;
+
+ ktime_get_snapshot(&history_begin);
+
+ return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev,
+ &history_begin, cts);
+}
+#endif /* CONFIG_X86 */
+
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts,
bool real_time)
@@ -1034,6 +1119,12 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+#ifdef CONFIG_X86
+ if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
+ clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+#endif /* CONFIG_X86 */
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5b7e6f4b5c7e..4b88d969a66c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -923,6 +923,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
}
mlx5_pci_vsc_init(dev);
+
+ err = pci_enable_ptm(pdev, NULL);
+ if (err)
+ mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+
return 0;
err_clr_master:
@@ -939,6 +944,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
* before removing the pci bars
*/
mlx5_drain_health_wq(dev);
+ pci_disable_ptm(dev->pdev);
iounmap(dev->iseg);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d894a88fa9f2..972e8e9df585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -608,6 +608,11 @@ enum {
RELEASE_ALL_PAGES_MASK = 0x4000,
};
+/* This limit is based on the capability of the firmware as it cannot release
+ * more than 50000 back to the host in one go.
+ */
+#define MAX_RECLAIM_NPAGES (-50000)
+
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
@@ -639,7 +644,16 @@ static int req_pages_handler(struct notifier_block *nb,
req->dev = dev;
req->func_id = func_id;
- req->npages = npages;
+
+ /* npages > 0 means HCA asking host to allocate/give pages,
+ * npages < 0 means HCA asking host to reclaim back the pages allocated.
+ * Here we are restricting the maximum number of pages that can be
+ * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is
+ * a negative value.
+ * Since MAX_RECLAIM is negative, we are using max() to restrict
+ * req->npages (and not min ()).
+ */
+ req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES);
req->ec_function = ec_function;
req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index d61478c0c632..303d2ce4dc1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -411,7 +411,7 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = {
static int
mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
{
- char tz_name[THERMAL_NAME_LENGTH];
+ char tz_name[40];
int err;
if (module_tz->slot_index)
@@ -445,17 +445,13 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev)
thermal_zone_device_unregister(tzdev);
}
-static void
-mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal *thermal,
+static int
+mlxsw_thermal_module_init(struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area, u8 module)
{
struct mlxsw_thermal_module *module_tz;
module_tz = &area->tz_module_arr[module];
- /* Skip if parent is already set (case of port split). */
- if (module_tz->parent)
- return;
module_tz->module = module;
module_tz->slot_index = area->slot_index;
module_tz->parent = thermal;
@@ -465,15 +461,13 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
memcpy(module_tz->cooling_states, default_cooling_states,
sizeof(thermal->cooling_states));
+
+ return mlxsw_thermal_module_tz_init(module_tz);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
{
- if (module_tz && module_tz->tzdev) {
- mlxsw_thermal_module_tz_fini(module_tz->tzdev);
- module_tz->tzdev = NULL;
- module_tz->parent = NULL;
- }
+ mlxsw_thermal_module_tz_fini(module_tz->tzdev);
}
static int
@@ -481,7 +475,6 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal,
struct mlxsw_thermal_area *area)
{
- struct mlxsw_thermal_module *module_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
int i, err;
@@ -503,22 +496,16 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core,
if (!area->tz_module_arr)
return -ENOMEM;
- for (i = 0; i < area->tz_module_num; i++)
- mlxsw_thermal_module_init(dev, core, thermal, area, i);
-
for (i = 0; i < area->tz_module_num; i++) {
- module_tz = &area->tz_module_arr[i];
- if (!module_tz->parent)
- continue;
- err = mlxsw_thermal_module_tz_init(module_tz);
+ err = mlxsw_thermal_module_init(thermal, area, i);
if (err)
- goto err_thermal_module_tz_init;
+ goto err_thermal_module_init;
}
return 0;
-err_thermal_module_tz_init:
- for (i = area->tz_module_num - 1; i >= 0; i--)
+err_thermal_module_init:
+ for (i--; i >= 0; i--)
mlxsw_thermal_module_fini(&area->tz_module_arr[i]);
kfree(area->tz_module_arr);
return err;
@@ -821,10 +808,7 @@ err_linecards_event_ops_register:
err_thermal_gearboxes_init:
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
err_thermal_modules_init:
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
@@ -845,10 +829,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal);
mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]);
mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]);
- if (thermal->tzdev) {
- thermal_zone_device_unregister(thermal->tzdev);
- thermal->tzdev = NULL;
- }
+ thermal_zone_device_unregister(thermal->tzdev);
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index c002ede36402..85519690b837 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -23,6 +23,8 @@ config FBNIC
depends on !S390
depends on MAX_SKB_FRAGS < 22
depends on PCI_MSI
+ select NET_DEVLINK
+ select PAGE_POOL
select PHYLINK
help
This driver supports Meta Platforms Host Network Interface.
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index b7ce6da68543..571374361259 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -4,6 +4,7 @@
#include <linux/etherdevice.h>
#include <linux/ipv6.h>
#include <linux/types.h>
+#include <net/netdev_queues.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -316,6 +317,74 @@ void fbnic_clear_rx_mode(struct net_device *netdev)
__dev_mc_unsync(netdev, NULL);
}
+static void fbnic_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64)
+{
+ u64 tx_bytes, tx_packets, tx_dropped = 0;
+ u64 rx_bytes, rx_packets, rx_dropped = 0;
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_queue_stats *stats;
+ unsigned int start, i;
+
+ stats = &fbn->tx_stats;
+
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ stats64->tx_dropped = tx_dropped;
+
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ struct fbnic_ring *txr = fbn->tx[i];
+
+ if (!txr)
+ continue;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
+ }
+
+ stats = &fbn->rx_stats;
+
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+
+ stats64->rx_bytes = rx_bytes;
+ stats64->rx_packets = rx_packets;
+ stats64->rx_dropped = rx_dropped;
+
+ for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *rxr = fbn->rx[i];
+
+ if (!rxr)
+ continue;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rx_bytes = stats->bytes;
+ rx_packets = stats->packets;
+ rx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->rx_bytes += rx_bytes;
+ stats64->rx_packets += rx_packets;
+ stats64->rx_dropped += rx_dropped;
+ }
+}
+
static const struct net_device_ops fbnic_netdev_ops = {
.ndo_open = fbnic_open,
.ndo_stop = fbnic_stop,
@@ -324,6 +393,72 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_features_check = fbnic_features_check,
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
+ .ndo_get_stats64 = fbnic_get_stats64,
+};
+
+static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *rx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *rxr = fbn->rx[idx];
+ struct fbnic_queue_stats *stats;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!rxr)
+ return;
+
+ stats = &rxr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ rx->bytes = bytes;
+ rx->packets = packets;
+}
+
+static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_ring *txr = fbn->tx[idx];
+ struct fbnic_queue_stats *stats;
+ unsigned int start;
+ u64 bytes, packets;
+
+ if (!txr)
+ return;
+
+ stats = &txr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes = bytes;
+ tx->packets = packets;
+}
+
+static void fbnic_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+
+ tx->bytes = fbn->tx_stats.bytes;
+ tx->packets = fbn->tx_stats.packets;
+
+ rx->bytes = fbn->rx_stats.bytes;
+ rx->packets = fbn->rx_stats.packets;
+}
+
+static const struct netdev_stat_ops fbnic_stat_ops = {
+ .get_queue_stats_rx = fbnic_get_queue_stats_rx,
+ .get_queue_stats_tx = fbnic_get_queue_stats_tx,
+ .get_base_stats = fbnic_get_base_stats,
};
void fbnic_reset_queues(struct fbnic_net *fbn,
@@ -384,6 +519,7 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbd->netdev = netdev;
netdev->netdev_ops = &fbnic_netdev_ops;
+ netdev->stat_ops = &fbnic_stat_ops;
fbn = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 6bc0ebeb8182..60199e634468 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -40,6 +40,9 @@ struct fbnic_net {
u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+ /* Storage for stats after ring destruction */
+ struct fbnic_queue_stats tx_stats;
+ struct fbnic_queue_stats rx_stats;
u64 link_down_events;
struct list_head napis;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 0ed4c9fff5d8..4d0406af297f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -273,6 +273,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
err_free:
dev_kfree_skb_any(skb);
err_count:
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
return NETDEV_TX_OK;
}
@@ -363,10 +366,19 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
txq = txring_txq(nv->napi.dev, ring);
if (unlikely(discard)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
netdev_tx_completed_queue(txq, total_packets, total_bytes);
return;
}
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+
netif_txq_completed_wake(txq, total_packets, total_bytes,
fbnic_desc_unused(ring),
FBNIC_TX_DESC_WAKEUP);
@@ -730,12 +742,12 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
+ unsigned int packets = 0, bytes = 0, dropped = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
- u64 packets = 0;
done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
raw_rcd = &rcq->desc[head & rcq->size_mask];
@@ -780,9 +792,11 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
fbnic_populate_skb_fields(nv, rcd, skb, qt);
packets++;
+ bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
+ dropped++;
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -799,6 +813,14 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
}
}
+ u64_stats_update_begin(&rcq->stats.syncp);
+ rcq->stats.packets += packets;
+ rcq->stats.bytes += bytes;
+ /* Re-add ethernet header length (removed in fbnic_build_skb) */
+ rcq->stats.bytes += ETH_HLEN * packets;
+ rcq->stats.dropped += dropped;
+ u64_stats_update_end(&rcq->stats.syncp);
+
/* Unmap and free processed buffers */
if (head0 >= 0)
fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
@@ -865,12 +887,36 @@ static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct fbnic_queue_stats *stats = &rxr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->rx_stats.bytes += stats->bytes;
+ fbn->rx_stats.packets += stats->packets;
+ fbn->rx_stats.dropped += stats->dropped;
+}
+
+static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct fbnic_queue_stats *stats = &txr->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+ fbn->tx_stats.dropped += stats->dropped;
+}
+
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
if (!(txr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_tx_counters(fbn, txr);
+
/* Remove pointer to the Tx ring */
WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
fbn->tx[txr->q_idx] = NULL;
@@ -882,6 +928,8 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
if (!(rxr->flags & FBNIC_RING_F_STATS))
return;
+ fbnic_aggregate_ring_rx_counters(fbn, rxr);
+
/* Remove pointer to the Rx ring */
WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
fbn->rx[rxr->q_idx] = NULL;
@@ -974,6 +1022,7 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
int q_idx, u8 flags)
{
+ u64_stats_init(&ring->stats.syncp);
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 4a206c0e7192..2f91f68d11d5 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
#include <net/xdp.h>
struct fbnic_net;
@@ -51,6 +52,13 @@ struct fbnic_pkt_buff {
u16 nr_frags;
};
+struct fbnic_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 dropped;
+ struct u64_stats_sync syncp;
+};
+
/* Pagecnt bias is long max to reserve the last bit to catch overflow
* cases where if we overcharge the bias it will flip over to be negative.
*/
@@ -77,6 +85,8 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
+ struct fbnic_queue_stats stats;
+
/* Slow path fields follow */
dma_addr_t dma; /* Phys addr of descriptor memory */
size_t size; /* Size of descriptor ring in memory */
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 39f56973746d..3e865985340e 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -511,7 +511,7 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
}
/* Release pre-allocated RX buffers */
-static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
+void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
{
struct device *dev;
int i;
@@ -608,7 +608,7 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
*datasize = mtu + ETH_HLEN;
}
-static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
+int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
{
struct device *dev;
struct page *page;
@@ -622,7 +622,7 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
dev = mpc->ac->gdma_dev->gdma_context->dev;
- num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
+ num_rxb = mpc->num_queues * mpc->rx_queue_size;
WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
@@ -1909,15 +1909,17 @@ static int mana_create_txq(struct mana_port_context *apc,
return -ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
- * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * apc->tx_queue_size represents the maximum number of WQEs
* the SQ can store. This value is then used to size other queues
* to prevent overflow.
+ * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
+ * as min val of apc->tx_queue_size is 128 and that would make
+ * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
+ * are always power of two
*/
- txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
- BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
+ txq_size = apc->tx_queue_size * 32;
- cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
- cq_size = MANA_PAGE_ALIGN(cq_size);
+ cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
gc = gd->gdma_context;
@@ -2155,10 +2157,11 @@ static int mana_push_wqe(struct mana_rxq *rxq)
static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
{
+ struct mana_port_context *mpc = netdev_priv(rxq->ndev);
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = RX_BUFFERS_PER_QUEUE;
+ pprm.pool_size = mpc->rx_queue_size;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
@@ -2190,13 +2193,13 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc = gd->gdma_context;
- rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
+ rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
GFP_KERNEL);
if (!rxq)
return NULL;
rxq->ndev = ndev;
- rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->num_rx_buf = apc->rx_queue_size;
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
@@ -2744,6 +2747,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
apc->num_queues = gc->max_num_queues;
+ apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
+ apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
apc->port_handle = INVALID_MANA_HANDLE;
apc->pf_filter_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 146d5db1792f..d6a35fbda447 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -369,6 +369,78 @@ static int mana_set_channels(struct net_device *ndev,
return err;
}
+static void mana_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ ring->rx_pending = apc->rx_queue_size;
+ ring->tx_pending = apc->tx_queue_size;
+ ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
+ ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
+}
+
+static int mana_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ u32 new_tx, new_rx;
+ u32 old_tx, old_rx;
+ int err;
+
+ old_tx = apc->tx_queue_size;
+ old_rx = apc->rx_queue_size;
+
+ if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
+ MIN_TX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
+ NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
+ MIN_RX_BUFFERS_PER_QUEUE);
+ return -EINVAL;
+ }
+
+ new_rx = roundup_pow_of_two(ring->rx_pending);
+ new_tx = roundup_pow_of_two(ring->tx_pending);
+ netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
+ new_tx, new_rx);
+
+ /* pre-allocating new buffers to prevent failures in mana_attach() later */
+ apc->rx_queue_size = new_rx;
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu);
+ apc->rx_queue_size = old_rx;
+ if (err) {
+ netdev_err(ndev, "Insufficient memory for new allocations\n");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev, "mana_detach failed: %d\n", err);
+ goto out;
+ }
+
+ apc->tx_queue_size = new_tx;
+ apc->rx_queue_size = new_rx;
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev, "mana_attach failed: %d\n", err);
+ apc->tx_queue_size = old_tx;
+ apc->rx_queue_size = old_rx;
+ }
+out:
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
+}
+
const struct ethtool_ops mana_ethtool_ops = {
.get_ethtool_stats = mana_get_ethtool_stats,
.get_sset_count = mana_get_sset_count,
@@ -380,4 +452,6 @@ const struct ethtool_ops mana_ethtool_ops = {
.set_rxfh = mana_set_rxfh,
.get_channels = mana_get_channels,
.set_channels = mana_set_channels,
+ .get_ringparam = mana_get_ringparam,
+ .set_ringparam = mana_set_ringparam,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index df2ab5cbd49b..3a02eef58cc6 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -4537,8 +4537,8 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
u64 *prog;
int err;
- prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
- GFP_KERNEL);
+ prog = kmemdup_array(nfp_prog->prog, nfp_prog->prog_len, sizeof(u64),
+ GFP_KERNEL);
if (!prog)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index 2dd37557185e..7276e44a21d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -41,6 +41,8 @@ struct nfp_dump_tl {
);
char data[];
};
+static_assert(offsetof(struct nfp_dump_tl, data) == sizeof(struct nfp_dump_tl_hdr),
+ "struct member likely outside of struct_group_tagged()");
/* NFP CPP parameters */
struct nfp_dumpspec_cpp_isl_id {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ed24d6af7487..9cff0a8ffb2c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3185,8 +3185,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter,
struct list_head *head;
bool ret = false;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ dev = ifa->ifa_dev->dev;
if (dev == NULL)
goto out;
@@ -3379,7 +3378,7 @@ netxen_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
unsigned long ip_event;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 90df4a0909fa..b3588a1ebc25 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -4146,7 +4146,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ dev = ifa->ifa_dev->dev;
recheck:
if (dev == NULL)
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 896ffca4aee2..5c2551369812 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -37,7 +37,6 @@ ef100_ethtool_get_ringparam(struct net_device *net_dev,
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.get_drvinfo = efx_ethtool_get_drvinfo,
.get_msglevel = efx_ethtool_get_msglevel,
.set_msglevel = efx_ethtool_set_msglevel,
@@ -59,6 +58,7 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 7c887160e2ef..848b1923133a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -240,7 +240,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
}
const struct ethtool_ops efx_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -268,6 +267,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_key = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cd36ff4da68c..684489156dce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -29,6 +29,7 @@
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_3_70 0x37
#define DWMAC_CORE_4_00 0x40
#define DWMAC_CORE_4_10 0x41
#define DWMAC_CORE_5_00 0x50
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 9e40c28d453a..bfe6e2d631bd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,15 +8,90 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
+#include "dwmac1000.h"
+
+/* Normal Loongson Tx Summary */
+#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Summary */
+#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000
+
+#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \
+ DMA_INTR_ENA_NIE_RX_LOONGSON | \
+ DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
+
+/* Abnormal Loongson Tx Summary */
+#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Summary */
+#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000
+
+#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \
+ DMA_INTR_ENA_AIE_RX_LOONGSON | \
+ DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
+
+#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \
+ DMA_INTR_ABNORMAL_LOONGSON)
+
+/* Normal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000
+/* Normal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000
+
+/* Abnormal Loongson Tx Interrupt Summary */
+#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000
+/* Abnormal Loongson Rx Interrupt Summary */
+#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000
+
+/* Fatal Loongson Tx Bus Error Interrupt */
+#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000
+/* Fatal Loongson Rx Bus Error Interrupt */
+#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000
+
+#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \
+ DMA_STATUS_NIS_RX_LOONGSON | \
+ DMA_STATUS_AIS_TX_LOONGSON | \
+ DMA_STATUS_AIS_RX_LOONGSON | \
+ DMA_STATUS_FBI_TX_LOONGSON | \
+ DMA_STATUS_FBI_RX_LOONGSON)
+
+#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | DMA_STATUS_RU | \
+ DMA_STATUS_RI | DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | DMA_STATUS_TU | \
+ DMA_STATUS_TPS | DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON_LOONGSON)
+
+#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
+#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
+#define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
+#define CHANNEL_NUM 8
+
+struct loongson_data {
+ u32 loongson_id;
+ struct device *dev;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
-static int loongson_default_data(struct plat_stmmacenet_data *plat)
+static void loongson_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
{
+ /* Get bus_id, this can be overwritten later */
+ plat->bus_id = pci_dev_id(pdev);
+
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
/* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat->multicast_filter_bins = 256;
+
+ plat->mac_interface = PHY_INTERFACE_MODE_NA;
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
@@ -24,10 +99,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
/* Disable Priority config by default */
plat->tx_queues_cfg[0].use_prio = false;
plat->rx_queues_cfg[0].use_prio = false;
@@ -35,30 +106,424 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat)
/* Disable RX queues routing by default */
plat->rx_queues_cfg[0].pkt_route = 0x0;
+ plat->clk_ref_rate = 125000000;
+ plat->clk_ptp_rate = 125000000;
+
/* Default to phy auto-detection */
plat->phy_addr = -1;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
+}
+
+static int loongson_gmac_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct loongson_data *ld;
+ int i;
+
+ ld = plat->bsp_priv;
+
+ loongson_default_data(pdev, plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ plat->rx_queues_to_use = CHANNEL_NUM;
+ plat->tx_queues_to_use = CHANNEL_NUM;
+
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (i = 1; i < CHANNEL_NUM; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
+ } else {
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+ }
+
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
- plat->multicast_filter_bins = 256;
return 0;
}
-static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static struct stmmac_pci_info loongson_gmac_pci_info = {
+ .setup = loongson_gmac_data,
+};
+
+static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
+ unsigned int mode)
{
- struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- struct device_node *np;
- int ret, i, phy_mode;
+ struct loongson_data *ld = (struct loongson_data *)priv;
+ struct net_device *ndev = dev_get_drvdata(ld->dev);
+ struct stmmac_priv *ptr = netdev_priv(ndev);
+
+ /* The integrated PHY has a weird problem with switching from the low
+ * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link
+ * re-negotiation.
+ */
+ if (speed == SPEED_1000) {
+ if (readl(ptr->ioaddr + MAC_CTRL_REG) &
+ GMAC_CONTROL_PS)
+ /* Word around hardware bug, restart autoneg */
+ phy_restart_aneg(ndev->phydev);
+ }
+}
- np = dev_of_node(&pdev->dev);
+static int loongson_gnet_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct loongson_data *ld;
+ int i;
- if (!np) {
- pr_info("dwmac_loongson_pci: No OF node\n");
- return -ENODEV;
+ ld = plat->bsp_priv;
+
+ loongson_default_data(pdev, plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ plat->rx_queues_to_use = CHANNEL_NUM;
+ plat->tx_queues_to_use = CHANNEL_NUM;
+
+ /* Only channel 0 supports checksum,
+ * so turn off checksum to enable multiple channels.
+ */
+ for (i = 1; i < CHANNEL_NUM; i++)
+ plat->tx_queues_cfg[i].coe_unsupported = 1;
+ } else {
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
}
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+ plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
+ plat->fix_mac_speed = loongson_gnet_fix_speed;
+
+ return 0;
+}
+
+static struct stmmac_pci_info loongson_gnet_pci_info = {
+ .setup = loongson_gnet_data,
+};
+
+static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ u32 chan)
+{
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ if (dma_cfg->pblx8)
+ value |= DMA_BUS_MODE_MAXPBL;
+
+ value |= DMA_BUS_MODE_USP;
+ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
+ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+
+ /* Set the Fixed burst mode */
+ if (dma_cfg->fixed_burst)
+ value |= DMA_BUS_MODE_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (dma_cfg->mixed_burst)
+ value |= DMA_BUS_MODE_MB;
+
+ if (dma_cfg->atds)
+ value |= DMA_BUS_MODE_ATDS;
+
+ if (dma_cfg->aal)
+ value |= DMA_BUS_MODE_AAL;
+
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
+ DMA_CHAN_INTR_ENA(chan));
+}
+
+static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_extra_stats *x,
+ u32 chan, u32 dir)
+{
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
+ u32 abnor_intr_status;
+ u32 nor_intr_status;
+ u32 fb_intr_status;
+ u32 intr_status;
+ int ret = 0;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
+
+ nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
+ DMA_STATUS_NIS_RX_LOONGSON);
+ abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
+ DMA_STATUS_AIS_RX_LOONGSON);
+ fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
+ DMA_STATUS_FBI_RX_LOONGSON);
+
+ /* ABNORMAL interrupts */
+ if (unlikely(abnor_intr_status)) {
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT))
+ x->tx_jabber_irq++;
+ if (unlikely(intr_status & DMA_STATUS_OVF))
+ x->rx_overflow_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(fb_intr_status)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(nor_intr_status)) {
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */
+ writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
+
+ return ret;
+}
+
+static struct mac_device_info *loongson_dwmac_setup(void *apriv)
+{
+ struct stmmac_priv *priv = apriv;
+ struct mac_device_info *mac;
+ struct stmmac_dma_ops *dma;
+ struct loongson_data *ld;
+ struct pci_dev *pdev;
+
+ ld = priv->plat->bsp_priv;
+ pdev = to_pci_dev(priv->device);
+
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ /* The Loongson GMAC and GNET devices are based on the DW GMAC
+ * v3.50a and v3.73a IP-cores. But the HW designers have changed the
+ * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
+ * network controllers with the multi-channels feature
+ * available to emphasize the differences: multiple DMA-channels,
+ * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
+ * original value so the correct HW-interface would be selected.
+ */
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
+ priv->synopsys_id = DWMAC_CORE_3_70;
+ *dma = dwmac1000_dma_ops;
+ dma->init_chan = loongson_dwmac_dma_init_channel;
+ dma->dma_interrupt = loongson_dwmac_dma_interrupt;
+ mac->dma = dma;
+ }
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Pre-initialize the respective "mac" fields as it's done in
+ * dwmac1000_setup()
+ */
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ /* Loongson GMAC doesn't support the flow control. LS2K2000
+ * GNET doesn't support the half-duplex link mode.
+ */
+ if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
+ mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
+ } else {
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
+ else
+ mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+ }
+
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
+
+ return mac;
+}
+
+static int loongson_dwmac_msi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int i, ret, vecs;
+
+ vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
+ ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
+ return ret;
+ }
+
+ res->irq = pci_irq_vector(pdev, 0);
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ res->rx_irq[CHANNEL_NUM - 1 - i] =
+ pci_irq_vector(pdev, 1 + i * 2);
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ res->tx_irq[CHANNEL_NUM - 1 - i] =
+ pci_irq_vector(pdev, 2 + i * 2);
+ }
+
+ plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
+
+ return 0;
+}
+
+static void loongson_dwmac_msi_clear(struct pci_dev *pdev)
+{
+ pci_free_irq_vectors(pdev);
+}
+
+static int loongson_dwmac_dt_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ struct device_node *np = dev_of_node(&pdev->dev);
+ int ret;
+
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ ret = of_alias_get_id(np, "ethernet");
+ if (ret >= 0)
+ plat->bus_id = ret;
+
+ res->irq = of_irq_get_byname(np, "macirq");
+ if (res->irq < 0) {
+ dev_err(&pdev->dev, "IRQ macirq not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+ if (res->wol_irq < 0) {
+ dev_info(&pdev->dev,
+ "IRQ eth_wake_irq not found, using macirq\n");
+ res->wol_irq = res->irq;
+ }
+
+ res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
+ if (res->lpi_irq < 0) {
+ dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ ret = device_get_phy_mode(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_mode not found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ plat->phy_interface = ret;
+
+ return 0;
+
+err_put_node:
+ of_node_put(plat->mdio_node);
+
+ return ret;
+}
+
+static void loongson_dwmac_dt_clear(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ of_node_put(plat->mdio_node);
+}
+
+static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ if (!pdev->irq)
+ return -EINVAL;
+
+ res->irq = pdev->irq;
+
+ return 0;
+}
+
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_pci_info *info;
+ struct stmmac_resources res;
+ struct loongson_data *ld;
+ int ret, i;
+
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
return -ENOMEM;
@@ -69,25 +534,23 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (!plat->mdio_bus_data)
return -ENOMEM;
- plat->mdio_node = of_get_child_by_name(np, "mdio");
- if (plat->mdio_node) {
- dev_info(&pdev->dev, "Found MDIO subnode\n");
- plat->mdio_bus_data->needs_reset = true;
- }
-
plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
- if (!plat->dma_cfg) {
- ret = -ENOMEM;
- goto err_put_node;
- }
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
+ if (!ld)
+ return -ENOMEM;
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
- goto err_put_node;
+ return ret;
}
+ pci_set_master(pdev);
+
/* Get the base address of device */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
@@ -98,59 +561,43 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
break;
}
- plat->bus_id = of_alias_get_id(np, "ethernet");
- if (plat->bus_id < 0)
- plat->bus_id = pci_dev_id(pdev);
-
- phy_mode = device_get_phy_mode(&pdev->dev);
- if (phy_mode < 0) {
- dev_err(&pdev->dev, "phy_mode not found\n");
- ret = phy_mode;
- goto err_disable_device;
- }
-
- plat->phy_interface = phy_mode;
- plat->mac_interface = PHY_INTERFACE_MODE_GMII;
-
- pci_set_master(pdev);
-
- loongson_default_data(plat);
- pci_enable_msi(pdev);
memset(&res, 0, sizeof(res));
res.addr = pcim_iomap_table(pdev)[0];
- res.irq = of_irq_get_byname(np, "macirq");
- if (res.irq < 0) {
- dev_err(&pdev->dev, "IRQ macirq not found\n");
- ret = -ENODEV;
- goto err_disable_msi;
- }
+ plat->bsp_priv = ld;
+ plat->setup = loongson_dwmac_setup;
+ ld->dev = &pdev->dev;
+ ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
- res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
- if (res.wol_irq < 0) {
- dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
- res.wol_irq = res.irq;
- }
+ info = (struct stmmac_pci_info *)id->driver_data;
+ ret = info->setup(pdev, plat);
+ if (ret)
+ goto err_disable_device;
- res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res.lpi_irq < 0) {
- dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
- ret = -ENODEV;
- goto err_disable_msi;
- }
+ if (dev_of_node(&pdev->dev))
+ ret = loongson_dwmac_dt_config(pdev, plat, &res);
+ else
+ ret = loongson_dwmac_acpi_config(pdev, plat, &res);
+ if (ret)
+ goto err_disable_device;
+
+ /* Use the common MAC IRQ if per-channel MSIs allocation failed */
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_config(pdev, plat, &res);
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret)
- goto err_disable_msi;
+ goto err_plat_clear;
- return ret;
+ return 0;
-err_disable_msi:
- pci_disable_msi(pdev);
+err_plat_clear:
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, plat);
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_clear(pdev);
err_disable_device:
pci_disable_device(pdev);
-err_put_node:
- of_node_put(plat->mdio_node);
return ret;
}
@@ -158,11 +605,18 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
{
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct loongson_data *ld;
int i;
- of_node_put(priv->plat->mdio_node);
+ ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
+ if (dev_of_node(&pdev->dev))
+ loongson_dwmac_dt_clear(pdev, priv->plat);
+
+ if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
+ loongson_dwmac_msi_clear(pdev);
+
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
@@ -170,7 +624,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
break;
}
- pci_disable_msi(pdev);
pci_disable_device(pdev);
}
@@ -213,7 +666,8 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
loongson_dwmac_resume);
static const struct pci_device_id loongson_dwmac_id_table[] = {
- { PCI_VDEVICE(LOONGSON, 0x7a03) },
+ { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
+ { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
{}
};
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
@@ -232,4 +686,5 @@ module_pci_driver(loongson_dwmac_driver);
MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7ae04d8d291c..50073bdade46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1116,6 +1116,161 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* VCCIO0_1_3_IOC */
+#define RK3576_VCCIO0_1_3_IOC_CON2 0X6408
+#define RK3576_VCCIO0_1_3_IOC_CON3 0X640c
+#define RK3576_VCCIO0_1_3_IOC_CON4 0X6410
+#define RK3576_VCCIO0_1_3_IOC_CON5 0X6414
+
+#define RK3576_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3576_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* SDGMAC_GRF */
+#define RK3576_GRF_GMAC_CON0 0X0020
+#define RK3576_GRF_GMAC_CON1 0X0024
+
+#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
+#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
+
+#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
+#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
+#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+
+#define RK3576_GMAC_CLK_RGMII_DIV1 \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV5 \
+ (GRF_BIT(6) | GRF_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV50 \
+ (GRF_BIT(6) | GRF_CLR_BIT(5))
+
+#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
+#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+
+static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int offset_con;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RGMII_MODE);
+
+ offset_con = bsp_priv->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
+ RK3576_VCCIO0_1_3_IOC_CON2;
+
+ /* m0 && m1 delay enabled */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+
+ /* m0 && m1 delay value */
+ regmap_write(bsp_priv->php_grf, offset_con,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+ regmap_write(bsp_priv->php_grf, offset_con + 0x4,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+}
+
+static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int offset_con;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
+}
+
+static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, offset_con;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV20;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV50;
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV2;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV5;
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RGMII_DIV1;
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3576_GMAC_CLK_SELECT_IO :
+ RK3576_GMAC_CLK_SELECT_CRU;
+ unsigned int offset_con;
+
+ val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
+ RK3576_GMAC_CLK_RMII_GATE;
+
+ offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(bsp_priv->grf, offset_con, val);
+}
+
+static const struct rk_gmac_ops rk3576_ops = {
+ .set_to_rgmii = rk3576_set_to_rgmii,
+ .set_to_rmii = rk3576_set_to_rmii,
+ .set_rgmii_speed = rk3576_set_gmac_speed,
+ .set_rmii_speed = rk3576_set_gmac_speed,
+ .set_clock_selection = rk3576_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0x2a220000, /* gmac0 */
+ 0x2a230000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
/* sys_grf */
#define RK3588_GRF_GMAC_CON7 0X031c
#define RK3588_GRF_GMAC_CON8 0X0320
@@ -1141,8 +1296,8 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
-#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
-#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELECT_IO(id) GRF_CLR_BIT(5 * (id) + 4)
#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
@@ -1240,8 +1395,8 @@ err:
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
bool enable)
{
- unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
- RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+ unsigned int val = input ? RK3588_GMAC_CLK_SELECT_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELECT_CRU(bsp_priv->id);
val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
@@ -1908,6 +2063,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
{ .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index e1b761dcfa1d..cc93f73a380e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
* Called from stmmac via stmmac_dma_ops->init
*/
static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
@@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv,
writel(v, ioaddr + EMAC_TX_CTL1);
}
-static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
u32 v;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index adccdd816ea9..118a22406a2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
-static void dwmac1000_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+static void dwmac1000_dma_init_channel(struct stmmac_priv *priv,
+ void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
- /*
- * Set the DMA PBL (Programmable Burst Length) mode.
+ value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
+
+ /* Set the DMA PBL (Programmable Burst Length) mode.
*
* Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
@@ -98,16 +100,16 @@ static void dwmac1000_dma_init(void __iomem *ioaddr,
if (dma_cfg->mixed_burst)
value |= DMA_BUS_MODE_MB;
- if (atds)
+ if (dma_cfg->atds)
value |= DMA_BUS_MODE_ATDS;
if (dma_cfg->aal)
value |= DMA_BUS_MODE_AAL;
- writel(value, ioaddr + DMA_BUS_MODE);
+ writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
/* Mask interrupts by writing to CSR7 */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
@@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv,
dma_addr_t dma_rx_phy, u32 chan)
{
/* RX descriptor base address list must be written into DMA CSR3 */
- writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan));
}
static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
@@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv,
dma_addr_t dma_tx_phy, u32 chan)
{
/* TX descriptor base address list must be written into DMA CSR4 */
- writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
@@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
/* Configure flow control based on rx fifo size */
csr6 = dwmac1000_configure_fc(csr6, fifosz);
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
- u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel));
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
@@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv,
csr6 |= DMA_CONTROL_TTC_256;
}
- writel(csr6, ioaddr + DMA_CONTROL);
+ writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel));
}
static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv,
@@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
static void dwmac1000_rx_watchdog(struct stmmac_priv *priv,
void __iomem *ioaddr, u32 riwt, u32 queue)
{
- writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
.reset = dwmac_dma_reset,
- .init = dwmac1000_dma_init,
+ .init_chan = dwmac1000_dma_init_channel,
.init_rx_chan = dwmac1000_dma_init_rx,
.init_tx_chan = dwmac1000_dma_init_tx,
.axi = dwmac1000_dma_axi,
@@ -294,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.get_hw_feature = dwmac1000_get_hw_feature,
.rx_watchdog = dwmac1000_rx_watchdog,
};
+EXPORT_SYMBOL_GPL(dwmac1000_dma_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index b402fb54f613..82957db47c99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -19,7 +19,7 @@
#include "dwmac_dma.h"
static void dwmac100_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 84d3a8551b03..e0165358c4ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv,
}
static void dwmac4_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 72672391675f..5d9c18f5bbf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,31 @@
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+/* Following DMA defines are channels oriented */
+#define DMA_CHAN_BASE_OFFSET 0x100
+
+static inline u32 dma_chan_base_addr(u32 base, u32 chan)
+{
+ return base + chan * DMA_CHAN_BASE_OFFSET;
+}
+
+#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan)
+#define DMA_CHAN_XMT_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_POLL_DEMAND(chan) \
+ dma_chan_base_addr(DMA_RCV_POLL_DEMAND, chan)
+#define DMA_CHAN_RCV_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan)
+#define DMA_CHAN_TX_BASE_ADDR(chan) \
+ dma_chan_base_addr(DMA_TX_BASE_ADDR, chan)
+#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan)
+#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan)
+#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan)
+#define DMA_CHAN_MISSED_FRAME_CTR(chan) \
+ dma_chan_base_addr(DMA_MISSED_FRAME_CTR, chan)
+#define DMA_CHAN_RX_WATCHDOG(chan) \
+ dma_chan_base_addr(DMA_RX_WATCHDOG, chan)
+
/* SW Reset */
#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
@@ -152,7 +177,7 @@
#define NUM_DWMAC1000_DMA_REGS 23
#define NUM_DWMAC4_DMA_REGS 27
-void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 85e18f9a22f9..4846bf49c576 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr)
}
/* CSR1 enables the transmit DMA to check for new descriptor */
-void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
{
- writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+ writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value |= DMA_INTR_DEFAULT_RX;
if (tx)
value |= DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
- u32 value = readl(ioaddr + DMA_INTR_ENA);
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
if (rx)
value &= ~DMA_INTR_DEFAULT_RX;
if (tx)
value &= ~DMA_INTR_DEFAULT_TX;
- writel(value, ioaddr + DMA_INTR_ENA);
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
}
void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan)
{
- u32 value = readl(ioaddr + DMA_CONTROL);
+ u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
}
#ifdef DWMAC_DMA_DEBUG
@@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
#ifdef DWMAC_DMA_DEBUG
/* Enable it to monitor DMA rx/tx status in case of critical problems */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f196cd99d510..cbf2dd976ab1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -846,42 +846,41 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
-#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
-#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
-
+static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
+static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
- { true, "TDPES0", DPP_TX_ERR },
- { true, "TDPES1", DPP_TX_ERR },
- { true, "TDPES2", DPP_TX_ERR },
- { true, "TDPES3", DPP_TX_ERR },
- { true, "TDPES4", DPP_TX_ERR },
- { true, "TDPES5", DPP_TX_ERR },
- { true, "TDPES6", DPP_TX_ERR },
- { true, "TDPES7", DPP_TX_ERR },
- { true, "TDPES8", DPP_TX_ERR },
- { true, "TDPES9", DPP_TX_ERR },
- { true, "TDPES10", DPP_TX_ERR },
- { true, "TDPES11", DPP_TX_ERR },
- { true, "TDPES12", DPP_TX_ERR },
- { true, "TDPES13", DPP_TX_ERR },
- { true, "TDPES14", DPP_TX_ERR },
- { true, "TDPES15", DPP_TX_ERR },
- { true, "RDPES0", DPP_RX_ERR },
- { true, "RDPES1", DPP_RX_ERR },
- { true, "RDPES2", DPP_RX_ERR },
- { true, "RDPES3", DPP_RX_ERR },
- { true, "RDPES4", DPP_RX_ERR },
- { true, "RDPES5", DPP_RX_ERR },
- { true, "RDPES6", DPP_RX_ERR },
- { true, "RDPES7", DPP_RX_ERR },
- { true, "RDPES8", DPP_RX_ERR },
- { true, "RDPES9", DPP_RX_ERR },
- { true, "RDPES10", DPP_RX_ERR },
- { true, "RDPES11", DPP_RX_ERR },
- { true, "RDPES12", DPP_RX_ERR },
- { true, "RDPES13", DPP_RX_ERR },
- { true, "RDPES14", DPP_RX_ERR },
- { true, "RDPES15", DPP_RX_ERR },
+ { true, "TDPES0", dpp_tx_err },
+ { true, "TDPES1", dpp_tx_err },
+ { true, "TDPES2", dpp_tx_err },
+ { true, "TDPES3", dpp_tx_err },
+ { true, "TDPES4", dpp_tx_err },
+ { true, "TDPES5", dpp_tx_err },
+ { true, "TDPES6", dpp_tx_err },
+ { true, "TDPES7", dpp_tx_err },
+ { true, "TDPES8", dpp_tx_err },
+ { true, "TDPES9", dpp_tx_err },
+ { true, "TDPES10", dpp_tx_err },
+ { true, "TDPES11", dpp_tx_err },
+ { true, "TDPES12", dpp_tx_err },
+ { true, "TDPES13", dpp_tx_err },
+ { true, "TDPES14", dpp_tx_err },
+ { true, "TDPES15", dpp_tx_err },
+ { true, "RDPES0", dpp_rx_err },
+ { true, "RDPES1", dpp_rx_err },
+ { true, "RDPES2", dpp_rx_err },
+ { true, "RDPES3", dpp_rx_err },
+ { true, "RDPES4", dpp_rx_err },
+ { true, "RDPES5", dpp_rx_err },
+ { true, "RDPES6", dpp_rx_err },
+ { true, "RDPES7", dpp_rx_err },
+ { true, "RDPES8", dpp_rx_err },
+ { true, "RDPES9", dpp_rx_err },
+ { true, "RDPES10", dpp_rx_err },
+ { true, "RDPES11", dpp_rx_err },
+ { true, "RDPES12", dpp_rx_err },
+ { true, "RDPES13", dpp_rx_err },
+ { true, "RDPES14", dpp_rx_err },
+ { true, "RDPES15", dpp_rx_err },
};
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index dd2ab6185c40..7840bc403788 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr)
}
static void dwxgmac2_dma_init(void __iomem *ioaddr,
- struct stmmac_dma_cfg *dma_cfg, int atds)
+ struct stmmac_dma_cfg *dma_cfg)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e53c32362774..7e90f34b8c88 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -175,8 +175,7 @@ struct dma_features;
struct stmmac_dma_ops {
/* DMA core initialization */
int (*reset)(void __iomem *ioaddr);
- void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
- int atds);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg);
void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_dma_cfg *dma_cfg, u32 chan);
void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -198,7 +197,7 @@ struct stmmac_dma_ops {
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
- void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f3a1b179aaea..d9fca8d1227c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2367,9 +2367,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
- /* Adjust for real per queue fifo size */
- rxfifosz /= rx_channels_count;
- txfifosz /= tx_channels_count;
+ /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+ }
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
@@ -2553,7 +2555,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
true, priv->mode, true, true,
xdp_desc.len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
xsk_tx_metadata_to_compl(meta,
&tx_q->tx_skbuff_dma[entry].xsk_meta);
@@ -3003,7 +3005,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
u32 chan = 0;
- int atds = 0;
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
@@ -3012,7 +3013,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
- atds = 1;
+ priv->plat->dma_cfg->atds = 1;
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
@@ -3021,7 +3022,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
}
/* DMA Configuration */
- stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
if (priv->plat->axi)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
@@ -4754,7 +4755,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
@@ -4981,7 +4982,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
u64_stats_update_end(&txq_stats->q_syncp);
}
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
+ stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 2f30715e9b67..1e887d951a04 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -114,37 +114,23 @@ static void vnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct vnet *vp = (struct vnet *)netdev_priv(dev);
struct vnet_port *port;
- char *p = (char *)buf;
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
- p += sizeof(ethtool_stats_keys);
+ buf += sizeof(ethtool_stats_keys);
rcu_read_lock();
list_for_each_entry_rcu(port, &vp->port_list, list) {
- snprintf(p, ETH_GSTRING_LEN, "p%u.%s-%pM",
- port->q_index, port->switch_port ? "s" : "q",
- port->raddr);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_packets",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.rx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.tx_bytes",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_up",
- port->q_index);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "p%u.event_reset",
- port->q_index);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&buf, "p%u.%s-%pM", port->q_index,
+ port->switch_port ? "s" : "q",
+ port->raddr);
+ ethtool_sprintf(&buf, "p%u.rx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_packets", port->q_index);
+ ethtool_sprintf(&buf, "p%u.rx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.tx_bytes", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_up", port->q_index);
+ ethtool_sprintf(&buf, "p%u.event_reset", port->q_index);
}
rcu_read_unlock();
break;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 81d9f21086ec..555aca4ffa24 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2761,7 +2761,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
}
phylink = phylink_create(&port->slave.phylink_config,
- of_node_to_fwnode(port->slave.port_np),
+ of_fwnode_handle(port->slave.port_np),
port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 5688f054cec5..5073ec195854 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -83,13 +83,11 @@ static void emac_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!icssg_all_stats[i].standard_stats) {
- memcpy(p, icssg_all_stats[i].name,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
+ ethtool_puts(&p, icssg_all_miig_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ ethtool_puts(&p, icssg_all_pa_stats[i].name);
break;
default:
break;
@@ -104,9 +102,12 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
emac_update_hardware_stats(emac);
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++)
- if (!icssg_all_stats[i].standard_stats)
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++)
+ if (!icssg_all_miig_stats[i].standard_stats)
*(data++) = emac->stats[i];
+
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++)
+ *(data++) = emac->pa_stats[i];
}
static int emac_get_ts_info(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index e3451beed323..becdda143c19 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -857,6 +857,7 @@ static int prueth_netdev_init(struct prueth *prueth,
}
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ ndev->dev.of_node = eth_node;
ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
ndev->max_mtu = PRUETH_MAX_MTU;
ndev->netdev_ops = &emac_netdev_ops;
@@ -1181,6 +1182,12 @@ static int prueth_probe(struct platform_device *pdev)
return -ENODEV;
}
+ prueth->pa_stats = syscon_regmap_lookup_by_phandle(np, "ti,pa-stats");
+ if (IS_ERR(prueth->pa_stats)) {
+ dev_err(dev, "couldn't get ti,pa-stats syscon regmap\n");
+ return -ENODEV;
+ }
+
if (eth0_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
@@ -1271,8 +1278,8 @@ static int prueth_probe(struct platform_device *pdev)
goto exit_iep;
}
- if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC0]->half_duplex =
+ of_property_read_bool(eth0_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -1285,8 +1292,8 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
- if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC1]->half_duplex =
+ of_property_read_bool(eth1_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f678d656a3ed..786bd1ba34ab 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -50,8 +50,10 @@
#define ICSSG_MAX_RFLOWS 8 /* per slice */
+#define ICSSG_NUM_PA_STATS 4
+#define ICSSG_NUM_MIIG_STATS 60
/* Number of ICSSG related stats */
-#define ICSSG_NUM_STATS 60
+#define ICSSG_NUM_STATS (ICSSG_NUM_MIIG_STATS + ICSSG_NUM_PA_STATS)
#define ICSSG_NUM_STANDARD_STATS 31
#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS)
@@ -190,7 +192,8 @@ struct prueth_emac {
int port_vlan;
struct delayed_work stats_work;
- u64 stats[ICSSG_NUM_STATS];
+ u64 stats[ICSSG_NUM_MIIG_STATS];
+ u64 pa_stats[ICSSG_NUM_PA_STATS];
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
@@ -230,6 +233,7 @@ struct icssg_firmwares {
* @registered_netdevs: list of registered netdevs
* @miig_rt: regmap to mii_g_rt block
* @mii_rt: regmap to mii_rt block
+ * @pa_stats: regmap to pa_stats block
* @pru_id: ID for each of the PRUs
* @pdev: pointer to ICSSG platform device
* @pdata: pointer to platform data for ICSSG driver
@@ -263,6 +267,7 @@ struct prueth {
struct net_device *registered_netdevs[PRUETH_NUM_MACS];
struct regmap *miig_rt;
struct regmap *mii_rt;
+ struct regmap *pa_stats;
enum pruss_pru_id pru_id[PRUSS_NUM_PRUS];
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index e180c1166170..292f04d29f4f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -847,6 +847,7 @@ static int prueth_netdev_init(struct prueth *prueth,
}
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+ ndev->dev.of_node = eth_node;
ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
ndev->max_mtu = PRUETH_MAX_MTU;
ndev->netdev_ops = &emac_netdev_ops;
@@ -1045,8 +1046,8 @@ static int prueth_probe(struct platform_device *pdev)
goto exit_iep;
}
- if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC0]->half_duplex =
+ of_property_read_bool(eth0_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -1059,8 +1060,8 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
- if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
- prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+ prueth->emac[PRUETH_MAC1]->half_duplex =
+ of_property_read_bool(eth1_node, "ti,half-duplex-capable");
prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 2fb150c13078..06a15c0b2acc 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -11,6 +11,7 @@
#define ICSSG_TX_PACKET_OFFSET 0xA0
#define ICSSG_TX_BYTE_OFFSET 0xEC
+#define ICSSG_FW_STATS_BASE 0x0248
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
@@ -22,24 +23,31 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
int slice = prueth_emac_slice(emac);
u32 base = stats_base[slice];
u32 tx_pkt_cnt = 0;
- u32 val;
+ u32 val, reg;
int i;
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
- base + icssg_all_stats[i].offset,
+ base + icssg_all_miig_stats[i].offset,
&val);
regmap_write(prueth->miig_rt,
- base + icssg_all_stats[i].offset,
+ base + icssg_all_miig_stats[i].offset,
val);
- if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
+ if (icssg_all_miig_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
tx_pkt_cnt = val;
emac->stats[i] += val;
- if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
+ if (icssg_all_miig_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
emac->stats[i] -= tx_pkt_cnt * 8;
}
+
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ reg = ICSSG_FW_STATS_BASE + icssg_all_pa_stats[i].offset *
+ PRUETH_NUM_MACS + slice * sizeof(u32);
+ regmap_read(prueth->pa_stats, reg, &val);
+ emac->pa_stats[i] += val;
+ }
}
void icssg_stats_work_handler(struct work_struct *work)
@@ -57,9 +65,14 @@ int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
{
int i;
- for (i = 0; i < ARRAY_SIZE(icssg_all_stats); i++) {
- if (!strcmp(icssg_all_stats[i].name, stat_name))
- return emac->stats[icssg_all_stats[i].offset / sizeof(u32)];
+ for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
+ if (!strcmp(icssg_all_miig_stats[i].name, stat_name))
+ return emac->stats[icssg_all_miig_stats[i].offset / sizeof(u32)];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(icssg_all_pa_stats); i++) {
+ if (!strcmp(icssg_all_pa_stats[i].name, stat_name))
+ return emac->pa_stats[icssg_all_pa_stats[i].offset / sizeof(u32)];
}
netdev_err(emac->ndev, "Invalid stats %s\n", stat_name);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.h b/drivers/net/ethernet/ti/icssg/icssg_stats.h
index 999a4a91276c..e88b919f532c 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.h
@@ -77,82 +77,114 @@ struct miig_stats_regs {
u32 tx_bytes;
};
-#define ICSSG_STATS(field, stats_type) \
+#define ICSSG_MIIG_STATS(field, stats_type) \
{ \
#field, \
offsetof(struct miig_stats_regs, field), \
stats_type \
}
-struct icssg_stats {
+struct icssg_miig_stats {
char name[ETH_GSTRING_LEN];
u32 offset;
bool standard_stats;
};
-static const struct icssg_stats icssg_all_stats[] = {
+static const struct icssg_miig_stats icssg_all_miig_stats[] = {
/* Rx */
- ICSSG_STATS(rx_packets, true),
- ICSSG_STATS(rx_broadcast_frames, false),
- ICSSG_STATS(rx_multicast_frames, true),
- ICSSG_STATS(rx_crc_errors, true),
- ICSSG_STATS(rx_mii_error_frames, false),
- ICSSG_STATS(rx_odd_nibble_frames, false),
- ICSSG_STATS(rx_frame_max_size, true),
- ICSSG_STATS(rx_max_size_error_frames, false),
- ICSSG_STATS(rx_frame_min_size, true),
- ICSSG_STATS(rx_min_size_error_frames, false),
- ICSSG_STATS(rx_over_errors, true),
- ICSSG_STATS(rx_class0_hits, false),
- ICSSG_STATS(rx_class1_hits, false),
- ICSSG_STATS(rx_class2_hits, false),
- ICSSG_STATS(rx_class3_hits, false),
- ICSSG_STATS(rx_class4_hits, false),
- ICSSG_STATS(rx_class5_hits, false),
- ICSSG_STATS(rx_class6_hits, false),
- ICSSG_STATS(rx_class7_hits, false),
- ICSSG_STATS(rx_class8_hits, false),
- ICSSG_STATS(rx_class9_hits, false),
- ICSSG_STATS(rx_class10_hits, false),
- ICSSG_STATS(rx_class11_hits, false),
- ICSSG_STATS(rx_class12_hits, false),
- ICSSG_STATS(rx_class13_hits, false),
- ICSSG_STATS(rx_class14_hits, false),
- ICSSG_STATS(rx_class15_hits, false),
- ICSSG_STATS(rx_smd_frags, false),
- ICSSG_STATS(rx_bucket1_size, true),
- ICSSG_STATS(rx_bucket2_size, true),
- ICSSG_STATS(rx_bucket3_size, true),
- ICSSG_STATS(rx_bucket4_size, true),
- ICSSG_STATS(rx_64B_frames, true),
- ICSSG_STATS(rx_bucket1_frames, true),
- ICSSG_STATS(rx_bucket2_frames, true),
- ICSSG_STATS(rx_bucket3_frames, true),
- ICSSG_STATS(rx_bucket4_frames, true),
- ICSSG_STATS(rx_bucket5_frames, true),
- ICSSG_STATS(rx_bytes, true),
- ICSSG_STATS(rx_tx_total_bytes, false),
+ ICSSG_MIIG_STATS(rx_packets, true),
+ ICSSG_MIIG_STATS(rx_broadcast_frames, false),
+ ICSSG_MIIG_STATS(rx_multicast_frames, true),
+ ICSSG_MIIG_STATS(rx_crc_errors, true),
+ ICSSG_MIIG_STATS(rx_mii_error_frames, false),
+ ICSSG_MIIG_STATS(rx_odd_nibble_frames, false),
+ ICSSG_MIIG_STATS(rx_frame_max_size, true),
+ ICSSG_MIIG_STATS(rx_max_size_error_frames, false),
+ ICSSG_MIIG_STATS(rx_frame_min_size, true),
+ ICSSG_MIIG_STATS(rx_min_size_error_frames, false),
+ ICSSG_MIIG_STATS(rx_over_errors, true),
+ ICSSG_MIIG_STATS(rx_class0_hits, false),
+ ICSSG_MIIG_STATS(rx_class1_hits, false),
+ ICSSG_MIIG_STATS(rx_class2_hits, false),
+ ICSSG_MIIG_STATS(rx_class3_hits, false),
+ ICSSG_MIIG_STATS(rx_class4_hits, false),
+ ICSSG_MIIG_STATS(rx_class5_hits, false),
+ ICSSG_MIIG_STATS(rx_class6_hits, false),
+ ICSSG_MIIG_STATS(rx_class7_hits, false),
+ ICSSG_MIIG_STATS(rx_class8_hits, false),
+ ICSSG_MIIG_STATS(rx_class9_hits, false),
+ ICSSG_MIIG_STATS(rx_class10_hits, false),
+ ICSSG_MIIG_STATS(rx_class11_hits, false),
+ ICSSG_MIIG_STATS(rx_class12_hits, false),
+ ICSSG_MIIG_STATS(rx_class13_hits, false),
+ ICSSG_MIIG_STATS(rx_class14_hits, false),
+ ICSSG_MIIG_STATS(rx_class15_hits, false),
+ ICSSG_MIIG_STATS(rx_smd_frags, false),
+ ICSSG_MIIG_STATS(rx_bucket1_size, true),
+ ICSSG_MIIG_STATS(rx_bucket2_size, true),
+ ICSSG_MIIG_STATS(rx_bucket3_size, true),
+ ICSSG_MIIG_STATS(rx_bucket4_size, true),
+ ICSSG_MIIG_STATS(rx_64B_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket1_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket2_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket3_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket4_frames, true),
+ ICSSG_MIIG_STATS(rx_bucket5_frames, true),
+ ICSSG_MIIG_STATS(rx_bytes, true),
+ ICSSG_MIIG_STATS(rx_tx_total_bytes, false),
/* Tx */
- ICSSG_STATS(tx_packets, true),
- ICSSG_STATS(tx_broadcast_frames, false),
- ICSSG_STATS(tx_multicast_frames, false),
- ICSSG_STATS(tx_odd_nibble_frames, false),
- ICSSG_STATS(tx_underflow_errors, false),
- ICSSG_STATS(tx_frame_max_size, true),
- ICSSG_STATS(tx_max_size_error_frames, false),
- ICSSG_STATS(tx_frame_min_size, true),
- ICSSG_STATS(tx_min_size_error_frames, false),
- ICSSG_STATS(tx_bucket1_size, true),
- ICSSG_STATS(tx_bucket2_size, true),
- ICSSG_STATS(tx_bucket3_size, true),
- ICSSG_STATS(tx_bucket4_size, true),
- ICSSG_STATS(tx_64B_frames, true),
- ICSSG_STATS(tx_bucket1_frames, true),
- ICSSG_STATS(tx_bucket2_frames, true),
- ICSSG_STATS(tx_bucket3_frames, true),
- ICSSG_STATS(tx_bucket4_frames, true),
- ICSSG_STATS(tx_bucket5_frames, true),
- ICSSG_STATS(tx_bytes, true),
+ ICSSG_MIIG_STATS(tx_packets, true),
+ ICSSG_MIIG_STATS(tx_broadcast_frames, false),
+ ICSSG_MIIG_STATS(tx_multicast_frames, false),
+ ICSSG_MIIG_STATS(tx_odd_nibble_frames, false),
+ ICSSG_MIIG_STATS(tx_underflow_errors, false),
+ ICSSG_MIIG_STATS(tx_frame_max_size, true),
+ ICSSG_MIIG_STATS(tx_max_size_error_frames, false),
+ ICSSG_MIIG_STATS(tx_frame_min_size, true),
+ ICSSG_MIIG_STATS(tx_min_size_error_frames, false),
+ ICSSG_MIIG_STATS(tx_bucket1_size, true),
+ ICSSG_MIIG_STATS(tx_bucket2_size, true),
+ ICSSG_MIIG_STATS(tx_bucket3_size, true),
+ ICSSG_MIIG_STATS(tx_bucket4_size, true),
+ ICSSG_MIIG_STATS(tx_64B_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket1_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket2_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket3_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket4_frames, true),
+ ICSSG_MIIG_STATS(tx_bucket5_frames, true),
+ ICSSG_MIIG_STATS(tx_bytes, true),
+};
+
+/**
+ * struct pa_stats_regs - ICSSG Firmware maintained PA Stats register
+ * @fw_rx_cnt: Number of valid packets sent by Rx PRU to Host on PSI
+ * @fw_tx_cnt: Number of valid packets copied by RTU0 to Tx queues
+ * @fw_tx_pre_overflow: Host Egress Q (Pre-emptible) Overflow Counter
+ * @fw_tx_exp_overflow: Host Egress Q (Express) Overflow Counter
+ */
+struct pa_stats_regs {
+ u32 fw_rx_cnt;
+ u32 fw_tx_cnt;
+ u32 fw_tx_pre_overflow;
+ u32 fw_tx_exp_overflow;
+};
+
+#define ICSSG_PA_STATS(field) \
+{ \
+ #field, \
+ offsetof(struct pa_stats_regs, field), \
+}
+
+struct icssg_pa_stats {
+ char name[ETH_GSTRING_LEN];
+ u32 offset;
+};
+
+static const struct icssg_pa_stats icssg_all_pa_stats[] = {
+ ICSSG_PA_STATS(fw_rx_cnt),
+ ICSSG_PA_STATS(fw_tx_cnt),
+ ICSSG_PA_STATS(fw_tx_pre_overflow),
+ ICSSG_PA_STATS(fw_tx_exp_overflow),
};
#endif /* __NET_TI_ICSSG_STATS_H */
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index edd8b59680e5..a04d4073def9 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -377,8 +377,8 @@ static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
int ret;
bool first = true;
- if (txb->len < 60)
- pad = 60 - txb->len;
+ if (txb->len < ETH_ZLEN)
+ pad = ETH_ZLEN - txb->len;
while (1) {
mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
@@ -451,7 +451,7 @@ static void mse102x_tx_work(struct work_struct *work)
if (ret == -ETIMEDOUT) {
if (netif_msg_timer(mse))
- netdev_err(mse->ndev, "tx work timeout\n");
+ netdev_err_once(mse->ndev, "tx work timeout\n");
mse->stats.tx_timeout++;
}
@@ -485,8 +485,8 @@ static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
if (ret) {
eth_hw_addr_random(ndev);
- netdev_err(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_warn(ndev->dev.parent, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
}
}
@@ -622,8 +622,6 @@ static const struct ethtool_ops mse102x_ethtool_ops = {
/* driver bus management functions */
-#ifdef CONFIG_PM_SLEEP
-
static int mse102x_suspend(struct device *dev)
{
struct mse102x_net *mse = dev_get_drvdata(dev);
@@ -649,9 +647,8 @@ static int mse102x_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
static int mse102x_probe_spi(struct spi_device *spi)
{
@@ -736,9 +733,6 @@ static void mse102x_remove_spi(struct spi_device *spi)
struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
- if (netif_msg_drv(mse))
- dev_info(&spi->dev, "remove\n");
-
mse102x_remove_device_debugfs(mses);
unregister_netdev(mse->ndev);
}
@@ -761,7 +755,7 @@ static struct spi_driver mse102x_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = mse102x_match_table,
- .pm = &mse102x_pm_ops,
+ .pm = pm_sleep_ptr(&mse102x_pm_ops),
},
.probe = mse102x_probe_spi,
.remove = mse102x_remove_spi,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 1eecba984f3b..2b3d6586f44a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -251,10 +251,7 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index d6b2b3c781b6..cd1372da92a9 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -103,8 +103,7 @@ static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
- if (eeprom_ptrs)
- kvfree(eeprom_ptrs);
+ kvfree(eeprom_ptrs);
*checksum = TXGBE_EEPROM_SUM - *checksum;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 5f502265f0a6..67b61afdde96 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -688,8 +688,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
mii_bus->parent = &pdev->dev;
mii_bus->phy_mask = GENMASK(31, 1);
mii_bus->priv = wx;
- snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "txgbe-%x", pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret) {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 09c9f9787180..c301dd2ee083 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -29,26 +29,26 @@
/* Configuration options */
/* Accept all incoming packets. Default: disabled (cleared) */
-#define XAE_OPTION_PROMISC (1 << 0)
+#define XAE_OPTION_PROMISC BIT(0)
/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
-#define XAE_OPTION_JUMBO (1 << 1)
+#define XAE_OPTION_JUMBO BIT(1)
/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
-#define XAE_OPTION_VLAN (1 << 2)
+#define XAE_OPTION_VLAN BIT(2)
/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
-#define XAE_OPTION_FLOW_CONTROL (1 << 4)
+#define XAE_OPTION_FLOW_CONTROL BIT(4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
* stripped. Default: disabled (set)
*/
-#define XAE_OPTION_FCS_STRIP (1 << 5)
+#define XAE_OPTION_FCS_STRIP BIT(5)
/* Generate FCS field and add PAD automatically for outgoing frames.
* Default: enabled (set)
*/
-#define XAE_OPTION_FCS_INSERT (1 << 6)
+#define XAE_OPTION_FCS_INSERT BIT(6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
@@ -56,13 +56,13 @@
* types of frames are encountered. When this option is cleared, the MAC will
* allow these types of frames to be received. Default: enabled (set)
*/
-#define XAE_OPTION_LENTYPE_ERR (1 << 7)
+#define XAE_OPTION_LENTYPE_ERR BIT(7)
/* Enable the transmitter. Default: enabled (set) */
-#define XAE_OPTION_TXEN (1 << 11)
+#define XAE_OPTION_TXEN BIT(11)
/* Enable the receiver. Default: enabled (set) */
-#define XAE_OPTION_RXEN (1 << 12)
+#define XAE_OPTION_RXEN BIT(12)
/* Default options set when device is initialized or reset */
#define XAE_OPTION_DEFAULTS \
@@ -156,6 +156,7 @@
#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */
#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */
#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */
+#define XAE_STATS_OFFSET 0x00000200 /* Statistics counters */
#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
@@ -163,6 +164,7 @@
#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
+#define XAE_ABILITY_OFFSET 0x000004FC /* Ability Register offset */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
@@ -173,6 +175,8 @@
#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
+#define XAE_AM0_OFFSET 0x00000750 /* Frame Filter Mask Value Bytes 3-0 */
+#define XAE_AM1_OFFSET 0x00000754 /* Frame Filter Mask Value Bytes 7-4 */
#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */
#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */
@@ -284,6 +288,16 @@
#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+/* Bit masks for Axi Ethernet ability register */
+#define XAE_ABILITY_PFC BIT(16)
+#define XAE_ABILITY_FRAME_FILTER BIT(10)
+#define XAE_ABILITY_HALF_DUPLEX BIT(9)
+#define XAE_ABILITY_STATS BIT(8)
+#define XAE_ABILITY_2_5G BIT(3)
+#define XAE_ABILITY_1G BIT(2)
+#define XAE_ABILITY_100M BIT(1)
+#define XAE_ABILITY_10M BIT(0)
+
/* Bit masks for Axi Ethernet MDIO interface MC register */
#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */
@@ -327,11 +341,12 @@
#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
-#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
-#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
-#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
-#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
-#define XAE_FEATURE_DMA_64BIT (1 << 4)
+#define XAE_FEATURE_PARTIAL_RX_CSUM BIT(0)
+#define XAE_FEATURE_PARTIAL_TX_CSUM BIT(1)
+#define XAE_FEATURE_FULL_RX_CSUM BIT(2)
+#define XAE_FEATURE_FULL_TX_CSUM BIT(3)
+#define XAE_FEATURE_DMA_64BIT BIT(4)
+#define XAE_FEATURE_STATS BIT(5)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -345,6 +360,61 @@
#define XLNX_MII_STD_SELECT_REG 0x11
#define XLNX_MII_STD_SELECT_SGMII BIT(0)
+/* enum temac_stat - TEMAC statistics counters
+ *
+ * Index of statistics counters within the TEMAC. This must match the
+ * order/offset of hardware registers exactly.
+ */
+enum temac_stat {
+ STAT_RX_BYTES = 0,
+ STAT_TX_BYTES,
+ STAT_UNDERSIZE_FRAMES,
+ STAT_FRAGMENT_FRAMES,
+ STAT_RX_64_BYTE_FRAMES,
+ STAT_RX_65_127_BYTE_FRAMES,
+ STAT_RX_128_255_BYTE_FRAMES,
+ STAT_RX_256_511_BYTE_FRAMES,
+ STAT_RX_512_1023_BYTE_FRAMES,
+ STAT_RX_1024_MAX_BYTE_FRAMES,
+ STAT_RX_OVERSIZE_FRAMES,
+ STAT_TX_64_BYTE_FRAMES,
+ STAT_TX_65_127_BYTE_FRAMES,
+ STAT_TX_128_255_BYTE_FRAMES,
+ STAT_TX_256_511_BYTE_FRAMES,
+ STAT_TX_512_1023_BYTE_FRAMES,
+ STAT_TX_1024_MAX_BYTE_FRAMES,
+ STAT_TX_OVERSIZE_FRAMES,
+ STAT_RX_GOOD_FRAMES,
+ STAT_RX_FCS_ERRORS,
+ STAT_RX_BROADCAST_FRAMES,
+ STAT_RX_MULTICAST_FRAMES,
+ STAT_RX_CONTROL_FRAMES,
+ STAT_RX_LENGTH_ERRORS,
+ STAT_RX_VLAN_FRAMES,
+ STAT_RX_PAUSE_FRAMES,
+ STAT_RX_CONTROL_OPCODE_ERRORS,
+ STAT_TX_GOOD_FRAMES,
+ STAT_TX_BROADCAST_FRAMES,
+ STAT_TX_MULTICAST_FRAMES,
+ STAT_TX_UNDERRUN_ERRORS,
+ STAT_TX_CONTROL_FRAMES,
+ STAT_TX_VLAN_FRAMES,
+ STAT_TX_PAUSE_FRAMES,
+ STAT_TX_SINGLE_COLLISION_FRAMES,
+ STAT_TX_MULTIPLE_COLLISION_FRAMES,
+ STAT_TX_DEFERRED_FRAMES,
+ STAT_TX_LATE_COLLISIONS,
+ STAT_TX_EXCESS_COLLISIONS,
+ STAT_TX_EXCESS_DEFERRAL,
+ STAT_RX_ALIGNMENT_ERRORS,
+ STAT_TX_PFC_FRAMES,
+ STAT_RX_PFC_FRAMES,
+ STAT_USER_DEFINED0,
+ STAT_USER_DEFINED1,
+ STAT_USER_DEFINED2,
+ STAT_COUNT,
+};
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
@@ -435,6 +505,16 @@ struct skbuf_dma_descriptor {
* @tx_packets: TX packet count for statistics
* @tx_bytes: TX byte count for statistics
* @tx_stat_sync: Synchronization object for TX stats
+ * @hw_stat_base: Base offset for statistics counters. This may be nonzero if
+ * the statistics counteres were reset or wrapped around.
+ * @hw_last_counter: Last-seen value of each statistic counter
+ * @reset_in_progress: Set while we are performing a reset and statistics
+ * counters may be invalid
+ * @hw_stats_seqcount: Sequence counter for @hw_stat_base, @hw_last_counter,
+ * and @reset_in_progress.
+ * @stats_lock: Lock for @hw_stats_seqcount
+ * @stats_work: Work for reading the hardware statistics counters often enough
+ * to catch overflows.
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -506,6 +586,13 @@ struct axienet_local {
u64_stats_t tx_bytes;
struct u64_stats_sync tx_stat_sync;
+ u64 hw_stat_base[STAT_COUNT];
+ u32 hw_last_counter[STAT_COUNT];
+ seqcount_mutex_t hw_stats_seqcount;
+ struct mutex stats_lock;
+ struct delayed_work stats_work;
+ bool reset_in_progress;
+
struct work_struct dma_err_task;
int tx_irq;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9aeb7b9f3ae4..fe6a0e2e463f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -415,6 +415,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
+
axienet_set_mac_address(ndev, addr->sa_data);
return 0;
}
@@ -436,24 +437,27 @@ static void axienet_set_multicast_list(struct net_device *ndev)
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
- /* We must make the kernel realize we had to move into
- * promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it.
- */
- ndev->flags |= IFF_PROMISC;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg &= ~XAE_FMI_PM_MASK;
+ if (ndev->flags & IFF_PROMISC)
reg |= XAE_FMI_PM_MASK;
+ else
+ reg &= ~XAE_FMI_PM_MASK;
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+
+ if (ndev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
+ reg &= 0xFFFFFF00;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
- dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
+ axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
+ axienet_iow(lp, XAE_AF1_OFFSET, 0);
+ axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
+ axienet_iow(lp, XAE_AM1_OFFSET, 0);
+ axienet_iow(lp, XAE_FFE_OFFSET, 1);
+ i = 1;
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
-
netdev_for_each_mc_addr(ha, ndev) {
if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
break;
@@ -466,25 +470,21 @@ static void axienet_set_multicast_list(struct net_device *ndev)
af1reg = (ha->addr[4]);
af1reg |= (ha->addr[5] << 8);
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg &= 0xFFFFFF00;
reg |= i;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
+ axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
+ axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
axienet_iow(lp, XAE_FFE_OFFSET, 1);
i++;
}
- } else {
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
-
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg &= 0xFFFFFF00;
reg |= i;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
axienet_iow(lp, XAE_FFE_OFFSET, 0);
@@ -519,11 +519,55 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
+static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
+{
+ u32 counter;
+
+ if (lp->reset_in_progress)
+ return lp->hw_stat_base[stat];
+
+ counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+ return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
+}
+
+static void axienet_stats_update(struct axienet_local *lp, bool reset)
+{
+ enum temac_stat stat;
+
+ write_seqcount_begin(&lp->hw_stats_seqcount);
+ lp->reset_in_progress = reset;
+ for (stat = 0; stat < STAT_COUNT; stat++) {
+ u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+
+ lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
+ lp->hw_last_counter[stat] = counter;
+ }
+ write_seqcount_end(&lp->hw_stats_seqcount);
+}
+
+static void axienet_refresh_stats(struct work_struct *work)
+{
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ stats_work.work);
+
+ mutex_lock(&lp->stats_lock);
+ axienet_stats_update(lp, false);
+ mutex_unlock(&lp->stats_lock);
+
+ /* Just less than 2^32 bytes at 2.5 GBit/s */
+ schedule_delayed_work(&lp->stats_work, 13 * HZ);
+}
+
static int __axienet_device_reset(struct axienet_local *lp)
{
u32 value;
int ret;
+ /* Save statistics counters in case they will be reset */
+ mutex_lock(&lp->stats_lock);
+ if (lp->features & XAE_FEATURE_STATS)
+ axienet_stats_update(lp, true);
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -538,7 +582,7 @@ static int __axienet_device_reset(struct axienet_local *lp)
XAXIDMA_TX_CR_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
- return ret;
+ goto out;
}
/* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
@@ -548,10 +592,29 @@ static int __axienet_device_reset(struct axienet_local *lp)
XAE_IS_OFFSET);
if (ret) {
dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
- return ret;
+ goto out;
}
- return 0;
+ /* Update statistics counters with new values */
+ if (lp->features & XAE_FEATURE_STATS) {
+ enum temac_stat stat;
+
+ write_seqcount_begin(&lp->hw_stats_seqcount);
+ lp->reset_in_progress = false;
+ for (stat = 0; stat < STAT_COUNT; stat++) {
+ u32 counter =
+ axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
+
+ lp->hw_stat_base[stat] +=
+ lp->hw_last_counter[stat] - counter;
+ lp->hw_last_counter[stat] = counter;
+ }
+ write_seqcount_end(&lp->hw_stats_seqcount);
+ }
+
+out:
+ mutex_unlock(&lp->stats_lock);
+ return ret;
}
/**
@@ -614,8 +677,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
- if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -1297,7 +1359,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
ndev->stats.rx_missed_errors++;
if (pending & XAE_INT_RXRJECT_MASK)
- ndev->stats.rx_frame_errors++;
+ ndev->stats.rx_dropped++;
axienet_iow(lp, XAE_IS_OFFSET, pending);
return IRQ_HANDLED;
@@ -1515,8 +1577,6 @@ static int axienet_open(struct net_device *ndev)
int ret;
struct axienet_local *lp = netdev_priv(ndev);
- dev_dbg(&ndev->dev, "%s\n", __func__);
-
/* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
@@ -1533,6 +1593,9 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
+ /* Start the statistics refresh work */
+ schedule_delayed_work(&lp->stats_work, 0);
+
if (lp->use_dmaengine) {
/* Enable interrupts for Axi Ethernet core (if defined) */
if (lp->eth_irq > 0) {
@@ -1557,6 +1620,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
return ret;
@@ -1577,13 +1641,13 @@ static int axienet_stop(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
int i;
- dev_dbg(&ndev->dev, "axienet_close()\n");
-
if (!lp->use_dmaengine) {
napi_disable(&lp->napi_tx);
napi_disable(&lp->napi_rx);
}
+ cancel_delayed_work_sync(&lp->stats_work);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1658,6 +1722,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
static void axienet_poll_controller(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
+
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
axienet_rx_irq(lp->tx_irq, ndev);
@@ -1696,6 +1761,35 @@ axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets = u64_stats_read(&lp->tx_packets);
stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
} while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ stats->rx_length_errors =
+ axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
+ stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
+ stats->rx_frame_errors =
+ axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
+ stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
+ axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
+ stats->rx_length_errors +
+ stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
+
+ stats->tx_aborted_errors =
+ axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
+ stats->tx_fifo_errors =
+ axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
+ stats->tx_window_errors =
+ axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
+ stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
+ stats->tx_aborted_errors +
+ stats->tx_fifo_errors +
+ stats->tx_window_errors;
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
}
static const struct net_device_ops axienet_netdev_ops = {
@@ -1988,6 +2082,213 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
return phylink_ethtool_nway_reset(lp->phylink);
}
+static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ data[0] = axienet_stat(lp, STAT_RX_BYTES);
+ data[1] = axienet_stat(lp, STAT_TX_BYTES);
+ data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
+ data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
+ data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
+ data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
+ data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
+ data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
+ data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
+ "Received bytes",
+ "Transmitted bytes",
+ "RX Good VLAN Tagged Frames",
+ "TX Good VLAN Tagged Frames",
+ "TX Good PFC Frames",
+ "RX Good PFC Frames",
+ "User Defined Counter 0",
+ "User Defined Counter 1",
+ "User Defined Counter 2",
+};
+
+static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, axienet_ethtool_stats_strings,
+ sizeof(axienet_ethtool_stats_strings));
+ break;
+ }
+}
+
+static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (lp->features & XAE_FEATURE_STATS)
+ return ARRAY_SIZE(axienet_ethtool_stats_strings);
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+axienet_ethtools_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ pause_stats->tx_pause_frames =
+ axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
+ pause_stats->rx_pause_frames =
+ axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static void
+axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ mac_stats->FramesTransmittedOK =
+ axienet_stat(lp, STAT_TX_GOOD_FRAMES);
+ mac_stats->SingleCollisionFrames =
+ axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
+ mac_stats->MultipleCollisionFrames =
+ axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
+ mac_stats->FramesReceivedOK =
+ axienet_stat(lp, STAT_RX_GOOD_FRAMES);
+ mac_stats->FrameCheckSequenceErrors =
+ axienet_stat(lp, STAT_RX_FCS_ERRORS);
+ mac_stats->AlignmentErrors =
+ axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
+ mac_stats->FramesWithDeferredXmissions =
+ axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
+ mac_stats->LateCollisions =
+ axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
+ mac_stats->FramesAbortedDueToXSColls =
+ axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
+ mac_stats->MulticastFramesXmittedOK =
+ axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
+ mac_stats->BroadcastFramesXmittedOK =
+ axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
+ mac_stats->FramesWithExcessiveDeferral =
+ axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
+ mac_stats->MulticastFramesReceivedOK =
+ axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
+ mac_stats->BroadcastFramesReceivedOK =
+ axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
+ mac_stats->InRangeLengthErrors =
+ axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static void
+axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ ctrl_stats->MACControlFramesTransmitted =
+ axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
+ ctrl_stats->MACControlFramesReceived =
+ axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
+ ctrl_stats->UnsupportedOpcodesReceived =
+ axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+}
+
+static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void
+axienet_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ if (!(lp->features & XAE_FEATURE_STATS))
+ return;
+
+ do {
+ start = read_seqcount_begin(&lp->hw_stats_seqcount);
+ rmon_stats->undersize_pkts =
+ axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
+ rmon_stats->oversize_pkts =
+ axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
+ rmon_stats->fragments =
+ axienet_stat(lp, STAT_FRAGMENT_FRAMES);
+
+ rmon_stats->hist[0] =
+ axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
+ rmon_stats->hist[1] =
+ axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
+ rmon_stats->hist[2] =
+ axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
+ rmon_stats->hist[3] =
+ axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
+ rmon_stats->hist[4] =
+ axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
+ rmon_stats->hist[5] =
+ axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
+ rmon_stats->hist[6] =
+ rmon_stats->oversize_pkts;
+
+ rmon_stats->hist_tx[0] =
+ axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
+ rmon_stats->hist_tx[1] =
+ axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
+ rmon_stats->hist_tx[2] =
+ axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
+ rmon_stats->hist_tx[3] =
+ axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
+ rmon_stats->hist_tx[4] =
+ axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
+ rmon_stats->hist_tx[5] =
+ axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
+ rmon_stats->hist_tx[6] =
+ axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
+ } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
+
+ *ranges = axienet_rmon_ranges;
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS,
@@ -2004,6 +2305,13 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
.nway_reset = axienet_ethtools_nway_reset,
+ .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
+ .get_strings = axienet_ethtools_get_strings,
+ .get_sset_count = axienet_ethtools_get_sset_count,
+ .get_pause_stats = axienet_ethtools_get_pause_stats,
+ .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = axienet_ethtool_get_rmon_stats,
};
static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
@@ -2272,6 +2580,10 @@ static int axienet_probe(struct platform_device *pdev)
u64_stats_init(&lp->rx_stat_sync);
u64_stats_init(&lp->tx_stat_sync);
+ mutex_init(&lp->stats_lock);
+ seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
+ INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2312,6 +2624,9 @@ static int axienet_probe(struct platform_device *pdev)
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
+ if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
+ lp->features |= XAE_FEATURE_STATS;
+
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
if (!ret) {
switch (value) {
@@ -2397,7 +2712,7 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
}
- if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
+ if (!of_property_present(pdev->dev.of_node, "dmas")) {
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 810977952f95..e690b95b1bbb 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -882,7 +882,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
-#define VRSS_CHANNEL_DEFAULT 8
+#define VRSS_CHANNEL_DEFAULT 16
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 4a9522689fa4..e01c5997a551 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -183,7 +183,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
+ ret = dev_xdp_propagate(vf_netdev, &xdp);
if (ret && prog)
bpf_prog_put(prog);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 44142245343d..153b97f8ec0d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -987,7 +987,8 @@ struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
dev_info->bprog = prog;
}
} else {
- dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->num_chn = max(VRSS_CHANNEL_DEFAULT,
+ netif_get_num_default_rss_queues());
dev_info->send_sections = NETVSC_DEFAULT_TX;
dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
dev_info->recv_sections = NETVSC_DEFAULT_RX;
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 65fd14da0f86..c572da9e9bc4 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -242,11 +242,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
int ret;
clk = clk_get(dev, "core");
- if (IS_ERR(clk)) {
- dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
-
- return ERR_CAST(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_cast_probe(dev, clk, "error getting core clock\n");
ret = clk_set_rate(clk, data->core_clock_rate);
if (ret) {
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 08e607f62e10..2f4fc664d2e1 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -390,7 +390,7 @@ EXPORT_SYMBOL(of_phy_get_and_connect);
bool of_phy_is_fixed_link(struct device_node *np)
{
struct device_node *dn;
- int len, err;
+ int err;
const char *managed;
/* New binding */
@@ -405,8 +405,7 @@ bool of_phy_is_fixed_link(struct device_node *np)
return true;
/* Old binding */
- if (of_get_property(np, "fixed-link", &len) &&
- len == (5 * sizeof(__be32)))
+ if (of_property_count_u32_elems(np, "fixed-link") == 5)
return true;
return false;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 9c09293b5258..01cf33fa7503 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -37,8 +37,9 @@
#include <linux/configfs.h>
#include <linux/etherdevice.h>
#include <linux/utsname.h>
+#include <linux/rtnetlink.h>
-MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
+MODULE_AUTHOR("Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
@@ -72,9 +73,16 @@ __setup("netconsole=", option_setup);
/* Linked list of all configured targets */
static LIST_HEAD(target_list);
+/* target_cleanup_list is used to track targets that need to be cleaned outside
+ * of target_list_lock. It should be cleaned in the same function it is
+ * populated.
+ */
+static LIST_HEAD(target_cleanup_list);
/* This needs to be a spinlock because write_msg() cannot sleep */
static DEFINE_SPINLOCK(target_list_lock);
+/* This needs to be a mutex because netpoll_cleanup might sleep */
+static DEFINE_MUTEX(target_cleanup_list_lock);
/*
* Console driver for extended netconsoles. Registered on the first use to
@@ -210,6 +218,33 @@ static struct netconsole_target *alloc_and_init(void)
return nt;
}
+/* Clean up every target in the cleanup_list and move the clean targets back to
+ * the main target_list.
+ */
+static void netconsole_process_cleanups_core(void)
+{
+ struct netconsole_target *nt, *tmp;
+ unsigned long flags;
+
+ /* The cleanup needs RTNL locked */
+ ASSERT_RTNL();
+
+ mutex_lock(&target_cleanup_list_lock);
+ list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) {
+ /* all entries in the cleanup_list needs to be disabled */
+ WARN_ON_ONCE(nt->enabled);
+ do_netpoll_cleanup(&nt->np);
+ /* moved the cleaned target to target_list. Need to hold both
+ * locks
+ */
+ spin_lock_irqsave(&target_list_lock, flags);
+ list_move(&nt->list, &target_list);
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ }
+ WARN_ON_ONCE(!list_empty(&target_cleanup_list));
+ mutex_unlock(&target_cleanup_list_lock);
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -246,6 +281,19 @@ static struct netconsole_target *to_target(struct config_item *item)
struct netconsole_target, group);
}
+/* Do the list cleanup with the rtnl lock hold. rtnl lock is necessary because
+ * netdev might be cleaned-up by calling __netpoll_cleanup(),
+ */
+static void netconsole_process_cleanups(void)
+{
+ /* rtnl lock is called here, because it has precedence over
+ * target_cleanup_list_lock mutex and target_cleanup_list
+ */
+ rtnl_lock();
+ netconsole_process_cleanups_core();
+ rtnl_unlock();
+}
+
/* Get rid of possible trailing newline, returning the new length */
static void trim_newline(char *s, size_t maxlen)
{
@@ -336,14 +384,14 @@ static ssize_t enabled_store(struct config_item *item,
struct netconsole_target *nt = to_target(item);
unsigned long flags;
bool enabled;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
- err = kstrtobool(buf, &enabled);
- if (err)
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
goto out_unlock;
- err = -EINVAL;
+ ret = -EINVAL;
if (enabled == nt->enabled) {
pr_info("network logging has already %s\n",
nt->enabled ? "started" : "stopped");
@@ -365,8 +413,8 @@ static ssize_t enabled_store(struct config_item *item,
*/
netpoll_print_options(&nt->np);
- err = netpoll_setup(&nt->np);
- if (err)
+ ret = netpoll_setup(&nt->np);
+ if (ret)
goto out_unlock;
nt->enabled = true;
@@ -376,17 +424,23 @@ static ssize_t enabled_store(struct config_item *item,
* otherwise we might end up in write_msg() with
* nt->np.dev == NULL and nt->enabled == true
*/
+ mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
nt->enabled = false;
+ /* Remove the target from the list, while holding
+ * target_list_lock
+ */
+ list_move(&nt->list, &target_cleanup_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- netpoll_cleanup(&nt->np);
+ mutex_unlock(&target_cleanup_list_lock);
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
+ /* Deferred cleanup */
+ netconsole_process_cleanups();
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t release_store(struct config_item *item, const char *buf,
@@ -394,27 +448,26 @@ static ssize_t release_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
bool release;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- err = -EINVAL;
+ ret = -EINVAL;
goto out_unlock;
}
- err = kstrtobool(buf, &release);
- if (err)
+ ret = kstrtobool(buf, &release);
+ if (ret)
goto out_unlock;
nt->release = release;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t extended_store(struct config_item *item, const char *buf,
@@ -422,27 +475,25 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
bool extended;
- int err;
+ ssize_t ret;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
config_item_name(&nt->group.cg_item));
- err = -EINVAL;
+ ret = -EINVAL;
goto out_unlock;
}
- err = kstrtobool(buf, &extended);
- if (err)
+ ret = kstrtobool(buf, &extended);
+ if (ret)
goto out_unlock;
nt->extended = extended;
-
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return err;
+ return ret;
}
static ssize_t dev_name_store(struct config_item *item, const char *buf,
@@ -469,7 +520,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- int rv = -EINVAL;
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -478,21 +529,20 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- rv = kstrtou16(buf, 10, &nt->np.local_port);
- if (rv < 0)
+ ret = kstrtou16(buf, 10, &nt->np.local_port);
+ if (ret < 0)
goto out_unlock;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return rv;
+ return ret;
}
static ssize_t remote_port_store(struct config_item *item,
const char *buf, size_t count)
{
struct netconsole_target *nt = to_target(item);
- int rv = -EINVAL;
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -501,20 +551,20 @@ static ssize_t remote_port_store(struct config_item *item,
goto out_unlock;
}
- rv = kstrtou16(buf, 10, &nt->np.remote_port);
- if (rv < 0)
+ ret = kstrtou16(buf, 10, &nt->np.remote_port);
+ if (ret < 0)
goto out_unlock;
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return rv;
+ return ret;
}
static ssize_t local_ip_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -541,17 +591,17 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
static ssize_t remote_ip_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -578,11 +628,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
@@ -590,6 +639,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
u8 remote_mac[ETH_ALEN];
+ ssize_t ret = -EINVAL;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -604,11 +654,10 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
goto out_unlock;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
- mutex_unlock(&dynamic_netconsole_mutex);
- return strnlen(buf, count);
+ ret = strnlen(buf, count);
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
- return -EINVAL;
+ return ret;
}
struct userdatum {
@@ -685,7 +734,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
struct userdatum *udm = to_userdatum(item);
struct netconsole_target *nt;
struct userdata *ud;
- int ret;
+ ssize_t ret;
if (count > MAX_USERDATA_VALUE_LENGTH)
return -EMSGSIZE;
@@ -700,9 +749,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
ud = to_userdata(item->ci_parent);
nt = userdata_to_target(ud);
update_userdata(nt);
-
- mutex_unlock(&dynamic_netconsole_mutex);
- return count;
+ ret = count;
out_unlock:
mutex_unlock(&dynamic_netconsole_mutex);
return ret;
@@ -778,7 +825,7 @@ static struct configfs_group_operations userdata_ops = {
.drop_item = userdatum_drop,
};
-static struct config_item_type userdata_type = {
+static const struct config_item_type userdata_type = {
.ct_item_ops = &userdatum_ops,
.ct_group_ops = &userdata_ops,
.ct_attrs = userdata_attrs,
@@ -950,7 +997,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned long flags;
- struct netconsole_target *nt;
+ struct netconsole_target *nt, *tmp;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
bool stopped = false;
@@ -958,9 +1005,9 @@ static int netconsole_netdev_event(struct notifier_block *this,
event == NETDEV_RELEASE || event == NETDEV_JOIN))
goto done;
+ mutex_lock(&target_cleanup_list_lock);
spin_lock_irqsave(&target_list_lock, flags);
-restart:
- list_for_each_entry(nt, &target_list, list) {
+ list_for_each_entry_safe(nt, tmp, &target_list, list) {
netconsole_target_get(nt);
if (nt->np.dev == dev) {
switch (event) {
@@ -970,25 +1017,16 @@ restart:
case NETDEV_RELEASE:
case NETDEV_JOIN:
case NETDEV_UNREGISTER:
- /* rtnl_lock already held
- * we might sleep in __netpoll_cleanup()
- */
nt->enabled = false;
- spin_unlock_irqrestore(&target_list_lock, flags);
-
- __netpoll_cleanup(&nt->np);
-
- spin_lock_irqsave(&target_list_lock, flags);
- netdev_put(nt->np.dev, &nt->np.dev_tracker);
- nt->np.dev = NULL;
+ list_move(&nt->list, &target_cleanup_list);
stopped = true;
- netconsole_target_put(nt);
- goto restart;
}
}
netconsole_target_put(nt);
}
spin_unlock_irqrestore(&target_list_lock, flags);
+ mutex_unlock(&target_cleanup_list_lock);
+
if (stopped) {
const char *msg = "had an event";
@@ -1007,6 +1045,11 @@ restart:
dev->name, msg);
}
+ /* Process target_cleanup_list entries. By the end, target_cleanup_list
+ * should be empty
+ */
+ netconsole_process_cleanups_core();
+
done:
return NOTIFY_DONE;
}
@@ -1215,11 +1258,18 @@ static struct netconsole_target *alloc_param_target(char *target_config,
goto fail;
err = netpoll_setup(&nt->np);
- if (err)
- goto fail;
-
+ if (err) {
+ pr_err("Not enabling netconsole for %s%d. Netpoll setup failed\n",
+ NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
+ if (!IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC))
+ /* only fail if dynamic reconfiguration is set,
+ * otherwise, keep the target in the list, but disabled.
+ */
+ goto fail;
+ } else {
+ nt->enabled = true;
+ }
populate_configfs_item(nt, cmdline_count);
- nt->enabled = true;
return nt;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 7fddc8306d82..f530fcd092fe 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -44,6 +44,9 @@ config LED_TRIGGER_PHY
<Speed in megabits>Mbps OR <Speed in gigabits>Gbps OR link
for any speed known to the PHY.
+config OPEN_ALLIANCE_HELPERS
+ bool
+
config PHYLIB_LEDS
def_bool OF
depends on LEDS_CLASS=y || LEDS_CLASS=PHYLIB
@@ -414,6 +417,7 @@ config DP83TD510_PHY
config DP83TG720_PHY
tristate "Texas Instruments DP83TG720 Ethernet 1000Base-T1 PHY"
+ select OPEN_ALLIANCE_HELPERS
help
The DP83TG720S-Q1 is an automotive Ethernet physical layer
transceiver compliant with IEEE 802.3bp and Open Alliance
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 202ed7f450da..33adb3df5f64 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,7 +2,7 @@
# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
- linkmode.o
+ linkmode.o phy_link_topology.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
@@ -22,6 +22,7 @@ endif
obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o
libphy-$(CONFIG_SWPHY) += swphy.o
libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
+libphy-$(CONFIG_OPEN_ALLIANCE_HELPERS) += open_alliance_helpers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 551e37786c2d..92aa3a2b9744 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -58,6 +58,10 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_priv {
+ bool alcd_test_active;
+};
+
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
*
* I assume that this PHY is using a variation of Spread Spectrum Time Domain
@@ -169,6 +173,10 @@ static const u16 dp83td510_mse_sqi_map[] = {
#define DP83TD510E_UNKN_030E 0x30e
#define DP83TD510E_030E_VAL 0x2520
+#define DP83TD510E_ALCD_STAT 0xa9f
+#define DP83TD510E_ALCD_COMPLETE BIT(15)
+#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -325,8 +333,23 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev)
*/
static int dp83td510_cable_test_start(struct phy_device *phydev)
{
+ struct dp83td510_priv *priv = phydev->priv;
int ret;
+ /* If link partner is active, we won't be able to use TDR, since
+ * we can't force link partner to be silent. The autonegotiation
+ * pulses will be too frequent and the TDR sequence will be
+ * too long. So, TDR will always fail. Since the link is established
+ * we already know that the cable is working, so we can get some
+ * extra information line the cable length using ALCD.
+ */
+ if (phydev->link) {
+ priv->alcd_test_active = true;
+ return 0;
+ }
+
+ priv->alcd_test_active = false;
+
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
DP83TD510E_CTRL_HW_RESET);
if (ret)
@@ -402,8 +425,8 @@ static int dp83td510_cable_test_start(struct phy_device *phydev)
}
/**
- * dp83td510_cable_test_get_status - Get the status of the cable test for the
- * DP83TD510 PHY.
+ * dp83td510_cable_test_get_tdr_status - Get the status of the TDR test for the
+ * DP83TD510 PHY.
* @phydev: Pointer to the phy_device structure.
* @finished: Pointer to a boolean that indicates whether the test is finished.
*
@@ -411,13 +434,11 @@ static int dp83td510_cable_test_start(struct phy_device *phydev)
*
* Returns: 0 on success or a negative error code on failure.
*/
-static int dp83td510_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
+static int dp83td510_cable_test_get_tdr_status(struct phy_device *phydev,
+ bool *finished)
{
int ret, stat;
- *finished = false;
-
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG);
if (ret < 0)
return ret;
@@ -459,6 +480,77 @@ static int dp83td510_cable_test_get_status(struct phy_device *phydev,
return phy_init_hw(phydev);
}
+/**
+ * dp83td510_cable_test_get_alcd_status - Get the status of the ALCD test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ * The function reads the cable length and reports it to the user.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_alcd_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned int location;
+ int ret, phy_sts;
+
+ phy_sts = phy_read(phydev, DP83TD510E_PHY_STS);
+
+ if (!(phy_sts & DP83TD510E_LINK_STATUS)) {
+ /* If the link is down, we can't do any thing usable now */
+ ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+ *finished = true;
+ return 0;
+ }
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_ALCD_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & DP83TD510E_ALCD_COMPLETE))
+ return 0;
+
+ location = FIELD_GET(DP83TD510E_ALCD_CABLE_LENGTH, ret) * 100;
+
+ ethnl_cable_test_fault_length_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ location,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+
+ ethnl_cable_test_result_with_src(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK,
+ ETHTOOL_A_CABLE_INF_SRC_ALCD);
+ *finished = true;
+
+ return 0;
+}
+
+/**
+ * dp83td510_cable_test_get_status - Get the status of the cable test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ *finished = false;
+
+ if (priv->alcd_test_active)
+ return dp83td510_cable_test_get_alcd_status(phydev, finished);
+
+ return dp83td510_cable_test_get_tdr_status(phydev, finished);
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -477,12 +569,27 @@ static int dp83td510_get_features(struct phy_device *phydev)
return 0;
}
+static int dp83td510_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83td510_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver dp83td510_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
.name = "TI DP83TD510E",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83td510_probe,
.config_aneg = dp83td510_config_aneg,
.read_status = dp83td510_read_status,
.get_features = dp83td510_get_features,
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index c706429b225a..0ef4d7dba065 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -3,10 +3,13 @@
* Copyright (c) 2023 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include "open_alliance_helpers.h"
+
#define DP83TG720S_PHY_ID 0x2000a284
/* MDIO_MMD_VEND2 registers */
@@ -14,6 +17,17 @@
#define DP83TG720S_STS_MII_INT BIT(7)
#define DP83TG720S_LINK_STATUS BIT(0)
+/* TDR Configuration Register (0x1E) */
+#define DP83TG720S_TDR_CFG 0x1e
+/* 1b = TDR start, 0b = No TDR */
+#define DP83TG720S_TDR_START BIT(15)
+/* 1b = TDR auto on link down, 0b = Manual TDR start */
+#define DP83TG720S_CFG_TDR_AUTO_RUN BIT(14)
+/* 1b = TDR done, 0b = TDR in progress */
+#define DP83TG720S_TDR_DONE BIT(1)
+/* 1b = TDR fail, 0b = TDR success */
+#define DP83TG720S_TDR_FAIL BIT(0)
+
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
@@ -22,18 +36,155 @@
/* Power Mode 0 is Normal mode */
#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+/* Open Aliance 1000BaseT1 compatible HDD.TDR Fault Status Register */
+#define DP83TG720S_TDR_FAULT_STATUS 0x30f
+
+/* Register 0x0301: TDR Configuration 2 */
+#define DP83TG720S_TDR_CFG2 0x301
+
+/* Register 0x0303: TDR Configuration 3 */
+#define DP83TG720S_TDR_CFG3 0x303
+
+/* Register 0x0304: TDR Configuration 4 */
+#define DP83TG720S_TDR_CFG4 0x304
+
+/* Register 0x0405: Unknown Register */
+#define DP83TG720S_UNKNOWN_0405 0x405
+
+/* Register 0x0576: TDR Master Link Down Control */
+#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/* Register 0x083F: Unknown Register */
+#define DP83TG720S_UNKNOWN_083F 0x83f
+
#define DP83TG720S_SQI_REG_1 0x871
#define DP83TG720S_SQI_OUT_WORST GENMASK(7, 5)
#define DP83TG720S_SQI_OUT GENMASK(3, 1)
#define DP83TG720_SQI_MAX 7
+/**
+ * dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This sequence is based on the documented procedure for the DP83TG720 PHY.
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ */
+static int dp83tg720_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Initialize the PHY to run the TDR test as described in the
+ * "DP83TG720S-Q1: Configuring for Open Alliance Specification
+ * Compliance (Rev. B)" application note.
+ * Most of the registers are not documented. Some of register names
+ * are guessed by comparing the register offsets with the DP83TD510E.
+ */
+
+ /* Force master link down */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_TDR_MASTER_LINK_DOWN, 0x0400);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG2,
+ 0xa008);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG3,
+ 0x0928);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG4,
+ 0x0004);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_0405,
+ 0x6400);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_UNKNOWN_083F,
+ 0x3003);
+ if (ret)
+ return ret;
+
+ /* Start the TDR */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG,
+ DP83TG720S_TDR_START);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * dp83tg720_cable_test_get_status - Get the status of the cable test for the
+ * DP83TG720 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, stat;
+
+ *finished = false;
+
+ /* Read the TDR status */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_TDR_CFG);
+ if (ret < 0)
+ return ret;
+
+ /* Check if the TDR test is done */
+ if (!(ret & DP83TG720S_TDR_DONE))
+ return 0;
+
+ /* Check for TDR test failure */
+ if (!(ret & DP83TG720S_TDR_FAIL)) {
+ int location;
+
+ /* Read fault status */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TG720S_TDR_FAULT_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Get fault type */
+ stat = oa_1000bt1_get_ethtool_cable_result_code(ret);
+
+ /* Determine fault location */
+ location = oa_1000bt1_get_tdr_distance(ret);
+ if (location > 0)
+ ethnl_cable_test_fault_length(phydev,
+ ETHTOOL_A_CABLE_PAIR_A,
+ location);
+ } else {
+ /* Active link partner or other issues */
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+
+ *finished = true;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+
+ return phy_init_hw(phydev);
+}
+
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
int ret;
@@ -195,12 +346,15 @@ static struct phy_driver dp83tg720_driver[] = {
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
+ .flags = PHY_POLL_CABLE_TEST,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
.config_init = dp83tg720_config_init,
.get_sqi = dp83tg720_get_sqi,
.get_sqi_max = dp83tg720_get_sqi_max,
+ .cable_test_start = dp83tg720_cable_test_start,
+ .cable_test_get_status = dp83tg720_cable_test_get_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index b88398e6872b..0b777cdd7078 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -553,6 +553,8 @@ static const struct sfp_upstream_ops sfp_phy_ops = {
.link_down = mv2222_sfp_link_down,
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int mv2222_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b89fbffa6a93..9964bf3dea2f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -3613,6 +3613,8 @@ static const struct sfp_upstream_ops m88e1510_sfp_ops = {
.module_remove = m88e1510_sfp_remove,
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int m88e1510_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index ad43e280930c..6642eb642d4b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -503,6 +503,8 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
static const struct sfp_upstream_ops mv3310_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
.module_insert = mv3310_sfp_insert,
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index a35528497a57..5732ad65e7f9 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -12,6 +12,7 @@
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
+#define PHY_ID_LAN887X 0x0007c1f0
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
@@ -94,8 +95,101 @@
/* SQI defines */
#define LAN87XX_MAX_SQI 0x07
+/* Chiptop registers */
+#define LAN887X_PMA_EXT_ABILITY_2 0x12
+#define LAN887X_PMA_EXT_ABILITY_2_1000T1 BIT(1)
+#define LAN887X_PMA_EXT_ABILITY_2_100T1 BIT(0)
+
+/* DSP 100M registers */
+#define LAN887x_CDR_CONFIG1_100 0x0405
+#define LAN887x_LOCK1_EQLSR_CONFIG_100 0x0411
+#define LAN887x_SLV_HD_MUFAC_CONFIG_100 0x0417
+#define LAN887x_PLOCK_MUFAC_CONFIG_100 0x041c
+#define LAN887x_PROT_DISABLE_100 0x0425
+#define LAN887x_KF_LOOP_SAT_CONFIG_100 0x0454
+
+/* DSP 1000M registers */
+#define LAN887X_LOCK1_EQLSR_CONFIG 0x0811
+#define LAN887X_LOCK3_EQLSR_CONFIG 0x0813
+#define LAN887X_PROT_DISABLE 0x0825
+#define LAN887X_FFE_GAIN6 0x0843
+#define LAN887X_FFE_GAIN7 0x0844
+#define LAN887X_FFE_GAIN8 0x0845
+#define LAN887X_FFE_GAIN9 0x0846
+#define LAN887X_ECHO_DELAY_CONFIG 0x08ec
+#define LAN887X_FFE_MAX_CONFIG 0x08ee
+
+/* PCS 1000M registers */
+#define LAN887X_SCR_CONFIG_3 0x8043
+#define LAN887X_INFO_FLD_CONFIG_5 0x8048
+
+/* T1 afe registers */
+#define LAN887X_ZQCAL_CONTROL_1 0x8080
+#define LAN887X_AFE_PORT_TESTBUS_CTRL2 0x8089
+#define LAN887X_AFE_PORT_TESTBUS_CTRL4 0x808b
+#define LAN887X_AFE_PORT_TESTBUS_CTRL6 0x808d
+#define LAN887X_TX_AMPLT_1000T1_REG 0x80b0
+#define LAN887X_INIT_COEFF_DFE1_100 0x0422
+
+/* PMA registers */
+#define LAN887X_DSP_PMA_CONTROL 0x810e
+#define LAN887X_DSP_PMA_CONTROL_LNK_SYNC BIT(4)
+
+/* PCS 100M registers */
+#define LAN887X_IDLE_ERR_TIMER_WIN 0x8204
+#define LAN887X_IDLE_ERR_CNT_THRESH 0x8213
+
+/* Misc registers */
+#define LAN887X_REG_REG26 0x001a
+#define LAN887X_REG_REG26_HW_INIT_SEQ_EN BIT(8)
+
+/* Mis registers */
+#define LAN887X_MIS_CFG_REG0 0xa00
+#define LAN887X_MIS_CFG_REG0_RCLKOUT_DIS BIT(5)
+#define LAN887X_MIS_CFG_REG0_MAC_MODE_SEL GENMASK(1, 0)
+
+#define LAN887X_MAC_MODE_RGMII 0x01
+#define LAN887X_MAC_MODE_SGMII 0x03
+
+#define LAN887X_MIS_DLL_CFG_REG0 0xa01
+#define LAN887X_MIS_DLL_CFG_REG1 0xa02
+
+#define LAN887X_MIS_DLL_DELAY_EN BIT(15)
+#define LAN887X_MIS_DLL_EN BIT(0)
+#define LAN887X_MIS_DLL_CONF (LAN887X_MIS_DLL_DELAY_EN |\
+ LAN887X_MIS_DLL_EN)
+
+#define LAN887X_MIS_CFG_REG2 0xa03
+#define LAN887X_MIS_CFG_REG2_FE_LPBK_EN BIT(2)
+
+#define LAN887X_MIS_PKT_STAT_REG0 0xa06
+#define LAN887X_MIS_PKT_STAT_REG1 0xa07
+#define LAN887X_MIS_PKT_STAT_REG3 0xa09
+#define LAN887X_MIS_PKT_STAT_REG4 0xa0a
+#define LAN887X_MIS_PKT_STAT_REG5 0xa0b
+#define LAN887X_MIS_PKT_STAT_REG6 0xa0c
+
+/* Chiptop common registers */
+#define LAN887X_COMMON_LED3_LED2 0xc05
+#define LAN887X_COMMON_LED2_MODE_SEL_MASK GENMASK(4, 0)
+#define LAN887X_LED_LINK_ACT_ANY_SPEED 0x0
+
+/* MX chip top registers */
+#define LAN887X_CHIP_SOFT_RST 0xf03f
+#define LAN887X_CHIP_SOFT_RST_RESET BIT(0)
+
+#define LAN887X_SGMII_CTL 0xf01a
+#define LAN887X_SGMII_CTL_SGMII_MUX_EN BIT(0)
+
+#define LAN887X_SGMII_PCS_CFG 0xf034
+#define LAN887X_SGMII_PCS_CFG_PCS_ENA BIT(9)
+
+#define LAN887X_EFUSE_READ_DAT9 0xf209
+#define LAN887X_EFUSE_READ_DAT9_SGMII_DIS BIT(9)
+#define LAN887X_EFUSE_READ_DAT9_MAC_MODE GENMASK(1, 0)
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
-#define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver"
+#define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver"
struct access_ereg_val {
u8 mode;
@@ -105,6 +199,32 @@ struct access_ereg_val {
u16 mask;
};
+struct lan887x_hw_stat {
+ const char *string;
+ u8 mmd;
+ u16 reg;
+ u8 bits;
+};
+
+static const struct lan887x_hw_stat lan887x_hw_stats[] = {
+ { "TX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG0, 14},
+ { "RX Good Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG1, 14},
+ { "RX ERR Count detected by PCS", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG3, 16},
+ { "TX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG4, 8},
+ { "RX CRC ERR Count", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG5, 8},
+ { "RX ERR Count for SGMII MII2GMII", MDIO_MMD_VEND1, LAN887X_MIS_PKT_STAT_REG6, 8},
+};
+
+struct lan887x_regwr_map {
+ u8 mmd;
+ u16 reg;
+ u16 val;
+};
+
+struct lan887x_priv {
+ u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+};
+
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
{
u8 prev_bank;
@@ -860,6 +980,446 @@ static int lan87xx_get_sqi_max(struct phy_device *phydev)
return LAN87XX_MAX_SQI;
}
+static int lan887x_rgmii_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* SGMII mux disable */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_CTL,
+ LAN887X_SGMII_CTL_SGMII_MUX_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Select MAC_MODE as RGMII */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_MAC_MODE_SEL,
+ LAN887X_MAC_MODE_RGMII);
+ if (ret < 0)
+ return ret;
+
+ /* Disable PCS */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_PCS_CFG,
+ LAN887X_SGMII_PCS_CFG_PCS_ENA);
+ if (ret < 0)
+ return ret;
+
+ /* LAN887x Errata: RGMII rx clock active in SGMII mode
+ * Disabled it for SGMII mode
+ * Re-enabling it for RGMII mode
+ */
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_RCLKOUT_DIS);
+}
+
+static int lan887x_sgmii_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* SGMII mux enable */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_SGMII_CTL,
+ LAN887X_SGMII_CTL_SGMII_MUX_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Select MAC_MODE as SGMII */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_MAC_MODE_SEL,
+ LAN887X_MAC_MODE_SGMII);
+ if (ret < 0)
+ return ret;
+
+ /* LAN887x Errata: RGMII rx clock active in SGMII mode.
+ * So disabling it for SGMII mode
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_CFG_REG0,
+ LAN887X_MIS_CFG_REG0_RCLKOUT_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* Enable PCS */
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_SGMII_PCS_CFG,
+ LAN887X_SGMII_PCS_CFG_PCS_ENA);
+}
+
+static int lan887x_config_rgmii_en(struct phy_device *phydev)
+{
+ int txc;
+ int rxc;
+ int ret;
+
+ ret = lan887x_rgmii_init(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Control bit to enable/disable TX DLL delay line in signal path */
+ txc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0);
+ if (txc < 0)
+ return txc;
+
+ /* Control bit to enable/disable RX DLL delay line in signal path */
+ rxc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1);
+ if (rxc < 0)
+ return rxc;
+
+ /* Configures the phy to enable RX/TX delay
+ * RGMII - TX & RX delays are either added by MAC or not needed,
+ * phy should not add
+ * RGMII_ID - Configures phy to enable TX & RX delays, MAC shouldn't add
+ * RGMII_RX_ID - Configures the PHY to enable the RX delay.
+ * The MAC shouldn't add the RX delay
+ * RGMII_TX_ID - Configures the PHY to enable the TX delay.
+ * The MAC shouldn't add the TX delay in this case
+ */
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ txc &= ~LAN887X_MIS_DLL_CONF;
+ rxc &= ~LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ txc |= LAN887X_MIS_DLL_CONF;
+ rxc |= LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ txc &= ~LAN887X_MIS_DLL_CONF;
+ rxc |= LAN887X_MIS_DLL_CONF;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ txc |= LAN887X_MIS_DLL_CONF;
+ rxc &= ~LAN887X_MIS_DLL_CONF;
+ break;
+ default:
+ WARN_ONCE(1, "Invalid phydev interface %d\n", phydev->interface);
+ return 0;
+ }
+
+ /* Configures the PHY to enable/disable RX delay in signal path */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG1,
+ LAN887X_MIS_DLL_CONF, rxc);
+ if (ret < 0)
+ return ret;
+
+ /* Configures the PHY to enable/disable the TX delay in signal path */
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_MIS_DLL_CFG_REG0,
+ LAN887X_MIS_DLL_CONF, txc);
+}
+
+static int lan887x_config_phy_interface(struct phy_device *phydev)
+{
+ int interface_mode;
+ int sgmii_dis;
+ int ret;
+
+ /* Read sku efuse data for interfaces supported by sku */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_EFUSE_READ_DAT9);
+ if (ret < 0)
+ return ret;
+
+ /* If interface_mode is 1 then efuse sets RGMII operations.
+ * If interface mode is 3 then efuse sets SGMII operations.
+ */
+ interface_mode = ret & LAN887X_EFUSE_READ_DAT9_MAC_MODE;
+ /* SGMII disable is set for RGMII operations */
+ sgmii_dis = ret & LAN887X_EFUSE_READ_DAT9_SGMII_DIS;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* Reject RGMII settings for SGMII only sku */
+ ret = -EOPNOTSUPP;
+
+ if (!((interface_mode & LAN887X_MAC_MODE_SGMII) ==
+ LAN887X_MAC_MODE_SGMII))
+ ret = lan887x_config_rgmii_en(phydev);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ /* Reject SGMII setting for RGMII only sku */
+ ret = -EOPNOTSUPP;
+
+ if (!sgmii_dis)
+ ret = lan887x_sgmii_init(phydev);
+ break;
+ default:
+ /* Reject setting for unsupported interfaces */
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int lan887x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable twisted pair */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
+
+ /* First patch only supports 100Mbps and 1000Mbps force-mode.
+ * T1 Auto-Negotiation (Clause 98 of IEEE 802.3) will be added later.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int lan887x_phy_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear loopback */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_MIS_CFG_REG2,
+ LAN887X_MIS_CFG_REG2_FE_LPBK_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Configure default behavior of led to link and activity for any
+ * speed
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_COMMON_LED3_LED2,
+ LAN887X_COMMON_LED2_MODE_SEL_MASK,
+ LAN887X_LED_LINK_ACT_ANY_SPEED);
+ if (ret < 0)
+ return ret;
+
+ /* PHY interface setup */
+ return lan887x_config_phy_interface(phydev);
+}
+
+static int lan887x_phy_config(struct phy_device *phydev,
+ const struct lan887x_regwr_map *reg_map, int cnt)
+{
+ int ret;
+
+ for (int i = 0; i < cnt; i++) {
+ ret = phy_write_mmd(phydev, reg_map[i].mmd,
+ reg_map[i].reg, reg_map[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan887x_phy_setup(struct phy_device *phydev)
+{
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ /* PORT_AFE writes */
+ {MDIO_MMD_PMAPMD, LAN887X_ZQCAL_CONTROL_1, 0x4008},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL2, 0x0000},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL6, 0x0040},
+ /* 100T1_PCS_VENDOR writes */
+ {MDIO_MMD_PCS, LAN887X_IDLE_ERR_CNT_THRESH, 0x0008},
+ {MDIO_MMD_PCS, LAN887X_IDLE_ERR_TIMER_WIN, 0x800d},
+ /* 100T1 DSP writes */
+ {MDIO_MMD_VEND1, LAN887x_CDR_CONFIG1_100, 0x0ab1},
+ {MDIO_MMD_VEND1, LAN887x_LOCK1_EQLSR_CONFIG_100, 0x5274},
+ {MDIO_MMD_VEND1, LAN887x_SLV_HD_MUFAC_CONFIG_100, 0x0d74},
+ {MDIO_MMD_VEND1, LAN887x_PLOCK_MUFAC_CONFIG_100, 0x0aea},
+ {MDIO_MMD_VEND1, LAN887x_PROT_DISABLE_100, 0x0360},
+ {MDIO_MMD_VEND1, LAN887x_KF_LOOP_SAT_CONFIG_100, 0x0c30},
+ /* 1000T1 DSP writes */
+ {MDIO_MMD_VEND1, LAN887X_LOCK1_EQLSR_CONFIG, 0x2a78},
+ {MDIO_MMD_VEND1, LAN887X_LOCK3_EQLSR_CONFIG, 0x1368},
+ {MDIO_MMD_VEND1, LAN887X_PROT_DISABLE, 0x1354},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN6, 0x3C84},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN7, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN8, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_FFE_GAIN9, 0x3ca5},
+ {MDIO_MMD_VEND1, LAN887X_ECHO_DELAY_CONFIG, 0x0024},
+ {MDIO_MMD_VEND1, LAN887X_FFE_MAX_CONFIG, 0x227f},
+ /* 1000T1 PCS writes */
+ {MDIO_MMD_PCS, LAN887X_SCR_CONFIG_3, 0x1e00},
+ {MDIO_MMD_PCS, LAN887X_INFO_FLD_CONFIG_5, 0x0fa1},
+ };
+
+ return lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+}
+
+static int lan887x_100M_setup(struct phy_device *phydev)
+{
+ int ret;
+
+ /* (Re)configure the speed/mode dependent T1 settings */
+ if (phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_FORCE ||
+ phydev->master_slave_set == MASTER_SLAVE_CFG_MASTER_PREFERRED){
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8},
+ {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x0038},
+ {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x000f},
+ };
+
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ } else {
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x0038},
+ {MDIO_MMD_VEND1, LAN887X_INIT_COEFF_DFE1_100, 0x0014},
+ };
+
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ }
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+}
+
+static int lan887x_1000M_setup(struct phy_device *phydev)
+{
+ static const struct lan887x_regwr_map phy_cfg[] = {
+ {MDIO_MMD_PMAPMD, LAN887X_TX_AMPLT_1000T1_REG, 0x003f},
+ {MDIO_MMD_PMAPMD, LAN887X_AFE_PORT_TESTBUS_CTRL4, 0x00b8},
+ };
+ int ret;
+
+ /* (Re)configure the speed/mode dependent T1 settings */
+ ret = lan887x_phy_config(phydev, phy_cfg, ARRAY_SIZE(phy_cfg));
+ if (ret < 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL,
+ LAN887X_DSP_PMA_CONTROL_LNK_SYNC);
+}
+
+static int lan887x_link_setup(struct phy_device *phydev)
+{
+ int ret = -EINVAL;
+
+ if (phydev->speed == SPEED_1000)
+ ret = lan887x_1000M_setup(phydev);
+ else if (phydev->speed == SPEED_100)
+ ret = lan887x_100M_setup(phydev);
+
+ return ret;
+}
+
+/* LAN887x Errata: speed configuration changes require soft reset
+ * and chip soft reset
+ */
+static int lan887x_phy_reset(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Clear 1000M link sync */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, LAN887X_DSP_PMA_CONTROL,
+ LAN887X_DSP_PMA_CONTROL_LNK_SYNC);
+ if (ret < 0)
+ return ret;
+
+ /* Clear 100M link sync */
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (ret < 0)
+ return ret;
+
+ /* Chiptop soft-reset to allow the speed/mode change */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_SOFT_RST,
+ LAN887X_CHIP_SOFT_RST_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* CL22 soft-reset to let the link re-train */
+ ret = phy_modify(phydev, MII_BMCR, BMCR_RESET, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for reset complete or timeout if > 10ms */
+ return phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 5000, 10000, true);
+}
+
+static int lan887x_phy_reconfig(struct phy_device *phydev)
+{
+ int ret;
+
+ linkmode_zero(phydev->advertising);
+
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan887x_link_setup(phydev);
+}
+
+static int lan887x_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ /* LAN887x Errata: speed configuration changes require soft reset
+ * and chip soft reset
+ */
+ ret = lan887x_phy_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan887x_phy_reconfig(phydev);
+}
+
+static int lan887x_probe(struct phy_device *phydev)
+{
+ struct lan887x_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return lan887x_phy_setup(phydev);
+}
+
+static u64 lan887x_get_stat(struct phy_device *phydev, int i)
+{
+ struct lan887x_hw_stat stat = lan887x_hw_stats[i];
+ struct lan887x_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.mmd)
+ val = phy_read_mmd(phydev, stat.mmd, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void lan887x_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++)
+ data[i] = lan887x_get_stat(phydev, i);
+}
+
+static int lan887x_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(lan887x_hw_stats);
+}
+
+static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
+{
+ for (int i = 0; i < ARRAY_SIZE(lan887x_hw_stats); i++)
+ ethtool_puts(&data, lan887x_hw_stats[i].string);
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -894,6 +1454,20 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.get_sqi_max = lan87xx_get_sqi_max,
.cable_test_start = lan87xx_cable_test_start,
.cable_test_get_status = lan87xx_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN887X),
+ .name = "Microchip LAN887x T1 PHY",
+ .probe = lan887x_probe,
+ .get_features = lan887x_get_features,
+ .config_init = lan887x_phy_init,
+ .config_aneg = lan887x_config_aneg,
+ .get_stats = lan887x_get_stats,
+ .get_sset_count = lan887x_get_sset_count,
+ .get_strings = lan887x_get_strings,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = genphy_c45_read_status,
}
};
@@ -902,6 +1476,7 @@ module_phy_driver(microchip_t1_phy_driver);
static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN887X) },
{ }
};
diff --git a/drivers/net/phy/open_alliance_helpers.c b/drivers/net/phy/open_alliance_helpers.c
new file mode 100644
index 000000000000..36a70451d7da
--- /dev/null
+++ b/drivers/net/phy/open_alliance_helpers.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * open_alliance_helpers.c - OPEN Alliance specific PHY diagnostic helpers
+ *
+ * This file contains helper functions for implementing advanced diagnostic
+ * features as specified by the OPEN Alliance for automotive Ethernet PHYs.
+ * These helpers include functionality for Time Delay Reflection (TDR), dynamic
+ * channel quality assessment, and other PHY diagnostics.
+ *
+ * For more information on the specifications, refer to the OPEN Alliance
+ * documentation: https://opensig.org/automotive-ethernet-specifications/
+ * Currently following specifications are partially or fully implemented:
+ * - Advanced diagnostic features for 1000BASE-T1 automotive Ethernet PHYs.
+ * TC12 - advanced PHY features.
+ * https://opensig.org/wp-content/uploads/2024/03/Advanced_PHY_features_for_automotive_Ethernet_v2.0_fin.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
+
+#include "open_alliance_helpers.h"
+
+/**
+ * oa_1000bt1_get_ethtool_cable_result_code - Convert TDR status to ethtool
+ * result code
+ * @reg_value: Value read from the TDR register
+ *
+ * This function takes a register value from the HDD.TDR register and converts
+ * the TDR status to the corresponding ethtool cable test result code.
+ *
+ * Return: The appropriate ethtool result code based on the TDR status
+ */
+int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value)
+{
+ u8 tdr_status = FIELD_GET(OA_1000BT1_HDD_TDR_STATUS_MASK, reg_value);
+ u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value);
+
+ switch (tdr_status) {
+ case OA_1000BT1_HDD_TDR_STATUS_CABLE_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case OA_1000BT1_HDD_TDR_STATUS_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case OA_1000BT1_HDD_TDR_STATUS_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case OA_1000BT1_HDD_TDR_STATUS_NOISE:
+ return ETHTOOL_A_CABLE_RESULT_CODE_NOISE;
+ default:
+ if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE)
+ return ETHTOOL_A_CABLE_RESULT_CODE_RESOLUTION_NOT_POSSIBLE;
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+EXPORT_SYMBOL_GPL(oa_1000bt1_get_ethtool_cable_result_code);
+
+/**
+ * oa_1000bt1_get_tdr_distance - Get distance to the main fault from TDR
+ * register value
+ * @reg_value: Value read from the TDR register
+ *
+ * This function takes a register value from the HDD.TDR register and extracts
+ * the distance to the main fault detected by the TDR feature. The distance is
+ * measured in centimeters and ranges from 0 to 3100 centimeters. If the
+ * distance is not available (0x3f), the function returns -ERANGE.
+ *
+ * Return: The distance to the main fault in centimeters, or -ERANGE if the
+ * resolution is not possible.
+ */
+int oa_1000bt1_get_tdr_distance(u16 reg_value)
+{
+ u8 dist_val = FIELD_GET(OA_1000BT1_HDD_TDR_DISTANCE_MASK, reg_value);
+
+ if (dist_val == OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE)
+ return -ERANGE;
+
+ return dist_val * 100;
+}
+EXPORT_SYMBOL_GPL(oa_1000bt1_get_tdr_distance);
diff --git a/drivers/net/phy/open_alliance_helpers.h b/drivers/net/phy/open_alliance_helpers.h
new file mode 100644
index 000000000000..8b7d97bc6f18
--- /dev/null
+++ b/drivers/net/phy/open_alliance_helpers.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef OPEN_ALLIANCE_HELPERS_H
+#define OPEN_ALLIANCE_HELPERS_H
+
+/*
+ * These defines reflect the TDR (Time Delay Reflection) diagnostic feature
+ * for 1000BASE-T1 automotive Ethernet PHYs as specified by the OPEN Alliance.
+ *
+ * The register values are part of the HDD.TDR register, which provides
+ * information about the cable status and faults. The exact register offset
+ * is device-specific and should be provided by the driver.
+ */
+#define OA_1000BT1_HDD_TDR_ACTIVATION_MASK GENMASK(1, 0)
+#define OA_1000BT1_HDD_TDR_ACTIVATION_OFF 1
+#define OA_1000BT1_HDD_TDR_ACTIVATION_ON 2
+
+#define OA_1000BT1_HDD_TDR_STATUS_MASK GENMASK(7, 4)
+#define OA_1000BT1_HDD_TDR_STATUS_SHORT 3
+#define OA_1000BT1_HDD_TDR_STATUS_OPEN 6
+#define OA_1000BT1_HDD_TDR_STATUS_NOISE 5
+#define OA_1000BT1_HDD_TDR_STATUS_CABLE_OK 7
+#define OA_1000BT1_HDD_TDR_STATUS_TEST_IN_PROGRESS 8
+#define OA_1000BT1_HDD_TDR_STATUS_TEST_NOT_POSSIBLE 13
+
+/*
+ * OA_1000BT1_HDD_TDR_DISTANCE_MASK:
+ * This mask is used to extract the distance to the first/main fault
+ * detected by the TDR feature. Each bit represents an approximate distance
+ * of 1 meter, ranging from 0 to 31 meters. The exact interpretation of the
+ * bits may vary, but generally:
+ * 000000 = no error
+ * 000001 = error about 0-1m away
+ * 000010 = error between 1-2m away
+ * ...
+ * 011111 = error about 30-31m away
+ * 111111 = resolution not possible / out of distance
+ */
+#define OA_1000BT1_HDD_TDR_DISTANCE_MASK GENMASK(13, 8)
+#define OA_1000BT1_HDD_TDR_DISTANCE_NO_ERROR 0
+#define OA_1000BT1_HDD_TDR_DISTANCE_RESOLUTION_NOT_POSSIBLE 0x3f
+
+int oa_1000bt1_get_ethtool_cable_result_code(u16 reg_value);
+int oa_1000bt1_get_tdr_distance(u16 reg_value);
+
+#endif /* OPEN_ALLIANCE_HELPERS_H */
+
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 785182fa5fe0..cba3af926429 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1089,7 +1089,10 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+ if (autoneg == AUTONEG_ENABLE &&
+ (linkmode_empty(advertising) ||
+ !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported)))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7752e9386b40..8f5314c1fecc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -29,6 +29,7 @@
#include <linux/phy.h>
#include <linux/phylib_stubs.h>
#include <linux/phy_led_triggers.h>
+#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/rtnetlink.h>
@@ -279,6 +280,15 @@ static struct phy_driver genphy_driver;
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static bool phy_drv_wol_enabled(struct phy_device *phydev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(phydev, &wol);
+
+ return wol.wolopts != 0;
+}
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->mdio.dev.driver;
@@ -288,6 +298,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!drv || !phydrv->suspend)
return false;
+ /* If the PHY on the mido bus is not attached but has WOL enabled
+ * we cannot suspend the PHY.
+ */
+ if (!netdev && phy_drv_wol_enabled(phydev))
+ return false;
+
/* PHY not attached? May suspend if the PHY has not already been
* suspended as part of a prior call to phy_disconnect() ->
* phy_detach() -> phy_suspend() because the parent netdev might be the
@@ -1370,6 +1386,48 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(phy_standalone);
/**
+ * phy_sfp_connect_phy - Connect the SFP module's PHY to the upstream PHY
+ * @upstream: pointer to the upstream phy device
+ * @phy: pointer to the SFP module's phy device
+ *
+ * This helper allows keeping track of PHY devices on the link. It adds the
+ * SFP module's phy to the phy namespace of the upstream phy
+ *
+ * Return: 0 on success, otherwise a negative error code.
+ */
+int phy_sfp_connect_phy(void *upstream, struct phy_device *phy)
+{
+ struct phy_device *phydev = upstream;
+ struct net_device *dev = phydev->attached_dev;
+
+ if (dev)
+ return phy_link_topo_add_phy(dev, phy, PHY_UPSTREAM_PHY, phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_sfp_connect_phy);
+
+/**
+ * phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY
+ * @upstream: pointer to the upstream phy device
+ * @phy: pointer to the SFP module's phy device
+ *
+ * This helper allows keeping track of PHY devices on the link. It removes the
+ * SFP module's phy to the phy namespace of the upstream phy. As the module phy
+ * will be destroyed, re-inserting the same module will add a new phy with a
+ * new index.
+ */
+void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy)
+{
+ struct phy_device *phydev = upstream;
+ struct net_device *dev = phydev->attached_dev;
+
+ if (dev)
+ phy_link_topo_del_phy(dev, phy);
+}
+EXPORT_SYMBOL(phy_sfp_disconnect_phy);
+
+/**
* phy_sfp_attach - attach the SFP bus to the PHY upstream network device
* @upstream: pointer to the phy device
* @bus: sfp bus representing cage being attached
@@ -1511,6 +1569,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->sfp_bus_attached)
dev->sfp_bus = phydev->sfp_bus;
+
+ err = phy_link_topo_add_phy(dev, phydev, PHY_UPSTREAM_MAC, dev);
+ if (err)
+ goto error;
}
/* Some Ethernet drivers try to connect to a PHY device before
@@ -1938,6 +2000,7 @@ void phy_detach(struct phy_device *phydev)
if (dev) {
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
+ phy_link_topo_del_phy(dev, phydev);
}
phydev->phylink = NULL;
@@ -1975,7 +2038,6 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
const struct phy_driver *phydrv = phydev->drv;
int ret;
@@ -1983,8 +2045,7 @@ int phy_suspend(struct phy_device *phydev)
if (phydev->suspended || !phydrv)
return 0;
- phy_ethtool_get_wol(phydev, &wol);
- phydev->wol_enabled = wol.wolopts ||
+ phydev->wol_enabled = phy_drv_wol_enabled(phydev) ||
(netdev && netdev->ethtool->wol_enabled);
/* If the device has WOL enabled, we cannot suspend the PHY */
if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
@@ -2095,22 +2156,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable);
/**
* genphy_config_advert - sanitize and advertise auto-negotiation parameters
* @phydev: target phy_device struct
+ * @advert: auto-negotiation parameters to advertise
*
* Description: Writes MII_ADVERTISE with the appropriate values,
* after sanitizing the values to make sure we only advertise
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-static int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev,
+ const unsigned long *advert)
{
int err, bmsr, changed = 0;
u32 adv;
- /* Only allow advertising what this PHY supports */
- linkmode_and(phydev->advertising, phydev->advertising,
- phydev->supported);
-
- adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
+ adv = linkmode_adv_to_mii_adv_t(advert);
/* Setup standard advertisement */
err = phy_modify_changed(phydev, MII_ADVERTISE,
@@ -2133,7 +2192,7 @@ static int genphy_config_advert(struct phy_device *phydev)
if (!(bmsr & BMSR_ESTATEN))
return changed;
- adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+ adv = linkmode_adv_to_mii_ctrl1000_t(advert);
err = phy_modify_changed(phydev, MII_CTRL1000,
ADVERTISE_1000FULL | ADVERTISE_1000HALF,
@@ -2357,6 +2416,9 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg);
*/
int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert);
+ const struct phy_setting *set;
+ unsigned long *advert;
int err;
err = genphy_c45_an_config_eee_aneg(phydev);
@@ -2371,10 +2433,25 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
else if (err)
changed = true;
- if (AUTONEG_ENABLE != phydev->autoneg)
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ advert = phydev->advertising;
+ } else if (phydev->speed < SPEED_1000) {
return genphy_setup_forced(phydev);
+ } else {
+ linkmode_zero(fixed_advert);
+
+ set = phy_lookup_setting(phydev->speed, phydev->duplex,
+ phydev->supported, true);
+ if (set)
+ linkmode_set_bit(set->bit, fixed_advert);
+
+ advert = fixed_advert;
+ }
- err = genphy_config_advert(phydev);
+ err = genphy_config_advert(phydev, advert);
if (err < 0) /* error */
return err;
else if (err)
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
new file mode 100644
index 000000000000..4a5d73002a1a
--- /dev/null
+++ b/drivers/net/phy/phy_link_topology.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Infrastructure to handle all PHY devices connected to a given netdev,
+ * either directly or indirectly attached.
+ *
+ * Copyright (c) 2023 Maxime Chevallier<maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/phy_link_topology.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include <linux/xarray.h>
+
+static int netdev_alloc_phy_link_topology(struct net_device *dev)
+{
+ struct phy_link_topology *topo;
+
+ topo = kzalloc(sizeof(*topo), GFP_KERNEL);
+ if (!topo)
+ return -ENOMEM;
+
+ xa_init_flags(&topo->phys, XA_FLAGS_ALLOC1);
+ topo->next_phy_index = 1;
+
+ dev->link_topo = topo;
+
+ return 0;
+}
+
+int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+ int ret;
+
+ if (!topo) {
+ ret = netdev_alloc_phy_link_topology(dev);
+ if (ret)
+ return ret;
+
+ topo = dev->link_topo;
+ }
+
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+ if (!pdn)
+ return -ENOMEM;
+
+ pdn->phy = phy;
+ switch (upt) {
+ case PHY_UPSTREAM_MAC:
+ pdn->upstream.netdev = (struct net_device *)upstream;
+ if (phy_on_sfp(phy))
+ pdn->parent_sfp_bus = pdn->upstream.netdev->sfp_bus;
+ break;
+ case PHY_UPSTREAM_PHY:
+ pdn->upstream.phydev = (struct phy_device *)upstream;
+ if (phy_on_sfp(phy))
+ pdn->parent_sfp_bus = pdn->upstream.phydev->sfp_bus;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ pdn->upstream_type = upt;
+
+ /* Attempt to re-use a previously allocated phy_index */
+ if (phy->phyindex)
+ ret = xa_insert(&topo->phys, phy->phyindex, pdn, GFP_KERNEL);
+ else
+ ret = xa_alloc_cyclic(&topo->phys, &phy->phyindex, pdn,
+ xa_limit_32b, &topo->next_phy_index,
+ GFP_KERNEL);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(pdn);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_link_topo_add_phy);
+
+void phy_link_topo_del_phy(struct net_device *dev,
+ struct phy_device *phy)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+
+ if (!topo)
+ return;
+
+ pdn = xa_erase(&topo->phys, phy->phyindex);
+
+ /* We delete the PHY from the topology, however we don't re-set the
+ * phy->phyindex field. If the PHY isn't gone, we can re-assign it the
+ * same index next time it's added back to the topology
+ */
+
+ kfree(pdn);
+}
+EXPORT_SYMBOL_GPL(phy_link_topo_del_phy);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 51c526d227fa..ab4e9fc03017 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -3423,7 +3423,8 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
return ret;
}
-static void phylink_sfp_disconnect_phy(void *upstream)
+static void phylink_sfp_disconnect_phy(void *upstream,
+ struct phy_device *phydev)
{
phylink_disconnect_phy(upstream);
}
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index c8f83e5f78ab..105602581a03 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -770,6 +770,8 @@ static const struct sfp_upstream_ops at8031_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
.module_insert = at8031_sfp_insert,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int at8031_parse_dt(struct phy_device *phydev)
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 672c6929119a..bd8a51ec0ecd 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -699,6 +699,8 @@ static const struct sfp_upstream_ops qca807x_sfp_ops = {
.detach = phy_sfp_detach,
.module_insert = qca807x_sfp_insert,
.module_remove = qca807x_sfp_remove,
+ .connect_phy = phy_sfp_connect_phy,
+ .disconnect_phy = phy_sfp_disconnect_phy,
};
static int qca807x_probe(struct phy_device *phydev)
@@ -733,16 +735,6 @@ static int qca807x_probe(struct phy_device *phydev)
"qcom,dac-disable-bias-current-tweak");
#if IS_ENABLED(CONFIG_GPIOLIB)
- /* Make sure we don't have mixed leds node and gpio-controller
- * to prevent registering leds and having gpio-controller usage
- * conflicting with them.
- */
- if (of_find_property(node, "leds", NULL) &&
- of_find_property(node, "gpio-controller", NULL)) {
- phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
- return -EINVAL;
- }
-
/* Do not register a GPIO controller unless flagged for it */
if (of_property_read_bool(node, "gpio-controller")) {
ret = qca807x_gpio(phydev);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 2f44fc51848f..f13c00b5b449 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -487,7 +487,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
bus->socket_ops->stop(bus->sfp);
bus->socket_ops->detach(bus->sfp);
if (bus->phydev && ops && ops->disconnect_phy)
- ops->disconnect_phy(bus->upstream);
+ ops->disconnect_phy(bus->upstream, bus->phydev);
}
bus->registered = false;
}
@@ -722,6 +722,28 @@ void sfp_bus_del_upstream(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_bus_del_upstream);
+/**
+ * sfp_get_name() - Get the SFP device name
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Gets the SFP device's name, if @bus has a registered socket. Callers must
+ * hold RTNL, and the returned name is only valid until RTNL is released.
+ *
+ * Returns:
+ * - The name of the SFP device registered with sfp_register_socket()
+ * - %NULL if no device was registered on @bus
+ */
+const char *sfp_get_name(struct sfp_bus *bus)
+{
+ ASSERT_RTNL();
+
+ if (bus->sfp_dev)
+ return dev_name(bus->sfp_dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sfp_get_name);
+
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
{
@@ -743,7 +765,7 @@ void sfp_remove_phy(struct sfp_bus *bus)
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
if (ops && ops->disconnect_phy)
- ops->disconnect_phy(bus->upstream);
+ ops->disconnect_phy(bus->upstream, bus->phydev);
bus->phydev = NULL;
}
EXPORT_SYMBOL_GPL(sfp_remove_phy);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 3b5fcaf0dd36..2377179de017 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -10,8 +10,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/bitfield.h>
/* Vitesse Extended Page Magic Register(s) */
+#define MII_VSC73XX_EXT_PAGE_1E 0x01
#define MII_VSC82X4_EXT_PAGE_16E 0x10
#define MII_VSC82X4_EXT_PAGE_17E 0x11
#define MII_VSC82X4_EXT_PAGE_18E 0x12
@@ -60,6 +62,28 @@
/* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+/* Vitesse VSC73XX Extended Control Register */
+#define MII_VSC73XX_PHY_CTRL_EXT3 0x14
+
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN BIT(4)
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT GENMASK(3, 2)
+#define MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_STA BIT(1)
+#define MII_VSC73XX_DOWNSHIFT_MAX 5
+#define MII_VSC73XX_DOWNSHIFT_INVAL 1
+
+/* VSC73XX PHY_BYPASS_CTRL register*/
+#define MII_VSC73XX_PHY_BYPASS_CTRL MII_DCOUNTER
+#define MII_VSC73XX_PBC_TX_DIS BIT(15)
+#define MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS BIT(7)
+#define MII_VSC73XX_PBC_PAIR_SWAP_DIS BIT(5)
+#define MII_VSC73XX_PBC_POL_INV_DIS BIT(4)
+#define MII_VSC73XX_PBC_PARALLEL_DET_DIS BIT(3)
+#define MII_VSC73XX_PBC_AUTO_NP_EXCHANGE_DIS BIT(1)
+
+/* VSC73XX PHY_AUX_CTRL_STAT register */
+#define MII_VSC73XX_PHY_AUX_CTRL_STAT MII_NCONFIG
+#define MII_VSC73XX_PACS_NO_MDI_X_IND BIT(13)
+
/* Vitesse VSC8601 Extended PHY Control Register 1 */
#define MII_VSC8601_EPHY_CTL 0x17
#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
@@ -128,6 +152,74 @@ static int vsc73xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, VSC73XX_EXT_PAGE_ACCESS, page);
}
+static int vsc73xx_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, enable, cnt;
+
+ val = phy_read_paged(phydev, MII_VSC73XX_EXT_PAGE_1E,
+ MII_VSC73XX_PHY_CTRL_EXT3);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT, val) + 2;
+
+ *data = enable ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int vsc73xx_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, val;
+ int ret;
+
+ if (cnt > MII_VSC73XX_DOWNSHIFT_MAX)
+ return -E2BIG;
+ else if (cnt == MII_VSC73XX_DOWNSHIFT_INVAL)
+ return -EINVAL;
+
+ mask = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN;
+
+ if (!cnt) {
+ val = 0;
+ } else {
+ mask |= MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT;
+ val = MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_EN |
+ FIELD_PREP(MII_VSC73XX_PHY_CTRL_EXT3_DOWNSHIFT_CNT,
+ cnt - 2);
+ }
+
+ ret = phy_modify_paged(phydev, MII_VSC73XX_EXT_PAGE_1E,
+ MII_VSC73XX_PHY_CTRL_EXT3, mask, val);
+ if (ret < 0)
+ return ret;
+
+ return genphy_soft_reset(phydev);
+}
+
+static int vsc73xx_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc73xx_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int vsc73xx_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return vsc73xx_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void vsc73xx_config_init(struct phy_device *phydev)
{
/* Receiver init */
@@ -137,6 +229,12 @@ static void vsc73xx_config_init(struct phy_device *phydev)
/* Config LEDs 0x61 */
phy_modify(phydev, MII_TPISTATUS, 0xff00, 0x0061);
+
+ /* Enable downshift by default */
+ vsc73xx_set_downshift(phydev, MII_VSC73XX_DOWNSHIFT_MAX);
+
+ /* Set Auto MDI-X by default */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
}
static int vsc738x_config_init(struct phy_device *phydev)
@@ -237,6 +335,75 @@ static int vsc739x_config_init(struct phy_device *phydev)
return 0;
}
+static int vsc73xx_mdix_set(struct phy_device *phydev, u8 mdix)
+{
+ int ret;
+ u16 val;
+
+ val = phy_read(phydev, MII_VSC73XX_PHY_BYPASS_CTRL);
+
+ switch (mdix) {
+ case ETH_TP_MDI:
+ val |= MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS |
+ MII_VSC73XX_PBC_PAIR_SWAP_DIS |
+ MII_VSC73XX_PBC_POL_INV_DIS;
+ break;
+ case ETH_TP_MDI_X:
+ /* When MDI-X auto configuration is disabled, is possible
+ * to force only MDI mode. Let's use autoconfig for forced
+ * MDIX mode.
+ */
+ case ETH_TP_MDI_AUTO:
+ val &= ~(MII_VSC73XX_PBC_FOR_SPD_AUTO_MDIX_DIS |
+ MII_VSC73XX_PBC_PAIR_SWAP_DIS |
+ MII_VSC73XX_PBC_POL_INV_DIS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_write(phydev, MII_VSC73XX_PHY_BYPASS_CTRL, val);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static int vsc73xx_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = vsc73xx_mdix_set(phydev, phydev->mdix_ctrl);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int vsc73xx_mdix_get(struct phy_device *phydev, u8 *mdix)
+{
+ u16 reg_val;
+
+ reg_val = phy_read(phydev, MII_VSC73XX_PHY_AUX_CTRL_STAT);
+ if (reg_val & MII_VSC73XX_PACS_NO_MDI_X_IND)
+ *mdix = ETH_TP_MDI;
+ else
+ *mdix = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int vsc73xx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = vsc73xx_mdix_get(phydev, &phydev->mdix);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
@@ -434,32 +601,48 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7388,
.name = "Vitesse VSC7388",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7395,
.name = "Vitesse VSC7395",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC7398,
.name = "Vitesse VSC7398",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
+ .config_aneg = vsc73xx_config_aneg,
+ .read_status = vsc73xx_read_status,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
+ .get_tunable = vsc73xx_get_tunable,
+ .set_tunable = vsc73xx_set_tunable,
}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 2ea75686a319..5c4e88be46ee 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -737,6 +738,7 @@ static int tps23881_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tps23881_priv *priv;
+ struct gpio_desc *reset;
int ret;
u8 val;
@@ -749,6 +751,25 @@ static int tps23881_i2c_probe(struct i2c_client *client)
if (!priv)
return -ENOMEM;
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return dev_err_probe(&client->dev, PTR_ERR(reset), "Failed to get reset GPIO\n");
+
+ if (reset) {
+ /* TPS23880 datasheet (Rev G) indicates minimum reset pulse is 5us */
+ usleep_range(5, 10);
+ gpiod_set_value_cansleep(reset, 0); /* De-assert reset */
+
+ /* TPS23880 datasheet indicates the minimum time after power on reset
+ * should be 20ms, but the document describing how to load SRAM ("How
+ * to Load TPS2388x SRAM and Parity Code over I2C" (Rev E))
+ * indicates we should delay that programming by at least 50ms. So
+ * we'll wait the entire 50ms here to ensure we're safe to go to the
+ * SRAM loading proceedure.
+ */
+ msleep(50);
+ }
+
ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID);
if (ret < 0)
return ret;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index d591e33268e5..55aa8d0c8e1f 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -893,7 +893,7 @@ static const struct mii_phy_ops bcm5201_phy_ops = {
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5201_phy_def = {
+static const struct mii_phy_def bcm5201_phy_def = {
.phy_id = 0x00406210,
.phy_id_mask = 0xfffffff0,
.name = "BCM5201",
@@ -912,7 +912,7 @@ static const struct mii_phy_ops bcm5221_phy_ops = {
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5221_phy_def = {
+static const struct mii_phy_def bcm5221_phy_def = {
.phy_id = 0x004061e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5221",
@@ -930,7 +930,8 @@ static const struct mii_phy_ops bcm5241_phy_ops = {
.poll_link = genmii_poll_link,
.read_link = genmii_read_link,
};
-static struct mii_phy_def bcm5241_phy_def = {
+
+static const struct mii_phy_def bcm5241_phy_def = {
.phy_id = 0x0143bc30,
.phy_id_mask = 0xfffffff0,
.name = "BCM5241",
@@ -949,7 +950,7 @@ static const struct mii_phy_ops bcm5400_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5400_phy_def = {
+static const struct mii_phy_def bcm5400_phy_def = {
.phy_id = 0x00206040,
.phy_id_mask = 0xfffffff0,
.name = "BCM5400",
@@ -968,7 +969,7 @@ static const struct mii_phy_ops bcm5401_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5401_phy_def = {
+static const struct mii_phy_def bcm5401_phy_def = {
.phy_id = 0x00206050,
.phy_id_mask = 0xfffffff0,
.name = "BCM5401",
@@ -987,7 +988,7 @@ static const struct mii_phy_ops bcm5411_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5411_phy_def = {
+static const struct mii_phy_def bcm5411_phy_def = {
.phy_id = 0x00206070,
.phy_id_mask = 0xfffffff0,
.name = "BCM5411",
@@ -1007,7 +1008,7 @@ static const struct mii_phy_ops bcm5421_phy_ops = {
.enable_fiber = bcm5421_enable_fiber,
};
-static struct mii_phy_def bcm5421_phy_def = {
+static const struct mii_phy_def bcm5421_phy_def = {
.phy_id = 0x002060e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421",
@@ -1026,7 +1027,7 @@ static const struct mii_phy_ops bcm5421k2_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5421k2_phy_def = {
+static const struct mii_phy_def bcm5421k2_phy_def = {
.phy_id = 0x002062e0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5421-K2",
@@ -1045,7 +1046,7 @@ static const struct mii_phy_ops bcm5461_phy_ops = {
.enable_fiber = bcm5461_enable_fiber,
};
-static struct mii_phy_def bcm5461_phy_def = {
+static const struct mii_phy_def bcm5461_phy_def = {
.phy_id = 0x002060c0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5461",
@@ -1064,7 +1065,7 @@ static const struct mii_phy_ops bcm5462V_phy_ops = {
.read_link = bcm54xx_read_link,
};
-static struct mii_phy_def bcm5462V_phy_def = {
+static const struct mii_phy_def bcm5462V_phy_def = {
.phy_id = 0x002060d0,
.phy_id_mask = 0xfffffff0,
.name = "BCM5462-Vesta",
@@ -1094,7 +1095,7 @@ static const struct mii_phy_ops marvell88e1111_phy_ops = {
/* two revs in darwin for the 88e1101 ... I could use a datasheet
* to get the proper names...
*/
-static struct mii_phy_def marvell88e1101v1_phy_def = {
+static const struct mii_phy_def marvell88e1101v1_phy_def = {
.phy_id = 0x01410c20,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v1",
@@ -1102,7 +1103,8 @@ static struct mii_phy_def marvell88e1101v1_phy_def = {
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
-static struct mii_phy_def marvell88e1101v2_phy_def = {
+
+static const struct mii_phy_def marvell88e1101v2_phy_def = {
.phy_id = 0x01410c60,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1101v2",
@@ -1110,7 +1112,8 @@ static struct mii_phy_def marvell88e1101v2_phy_def = {
.magic_aneg = 1,
.ops = &marvell88e1101_phy_ops
};
-static struct mii_phy_def marvell88e1111_phy_def = {
+
+static const struct mii_phy_def marvell88e1111_phy_def = {
.phy_id = 0x01410cc0,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1111",
@@ -1127,7 +1130,7 @@ static const struct mii_phy_ops generic_phy_ops = {
.read_link = genmii_read_link
};
-static struct mii_phy_def genmii_phy_def = {
+static const struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
@@ -1136,7 +1139,7 @@ static struct mii_phy_def genmii_phy_def = {
.ops = &generic_phy_ops
};
-static struct mii_phy_def* mii_phy_table[] = {
+static const struct mii_phy_def *mii_phy_table[] = {
&bcm5201_phy_def,
&bcm5221_phy_def,
&bcm5241_phy_def,
@@ -1156,9 +1159,9 @@ static struct mii_phy_def* mii_phy_table[] = {
int sungem_phy_probe(struct mii_phy *phy, int mii_id)
{
+ const struct mii_phy_def *def;
int rc;
u32 id;
- struct mii_phy_def* def;
int i;
/* We do not reset the mii_phy structure as the driver
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 6d61052353f0..a6469235d904 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -418,7 +418,8 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
event->wValue ? "on" : "off");
- usbnet_link_change(dev, !!event->wValue, 0);
+ if (netif_carrier_ok(dev->net) != !!event->wValue)
+ usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 426e68a95067..34499b91a8bd 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1696,6 +1696,7 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_NO_QUEUE;
dev->priv_flags |= IFF_PHONY_HEADROOM;
+ dev->priv_flags |= IFF_DISABLE_NETPOLL;
dev->netdev_ops = &veth_netdev_ops;
dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c6af18948092..61ffa8851d7b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2885,6 +2885,25 @@ static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
net_dim_work_cancel(dim);
}
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+ u32 speed;
+ u8 duplex;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+ return;
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
+
+ if (ethtool_validate_speed(speed))
+ vi->speed = speed;
+
+ virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
+
+ if (ethtool_validate_duplex(duplex))
+ vi->duplex = duplex;
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2903,6 +2922,15 @@ static int virtnet_open(struct net_device *dev)
goto err_enable_qp;
}
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ if (vi->status & VIRTIO_NET_S_LINK_UP)
+ netif_carrier_on(vi->dev);
+ virtio_config_driver_enable(vi->vdev);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
+
return 0;
err_enable_qp:
@@ -3381,12 +3409,22 @@ static int virtnet_close(struct net_device *dev)
disable_delayed_refill(vi);
/* Make sure refill_work doesn't re-enable napi! */
cancel_delayed_work_sync(&vi->refill);
+ /* Prevent the config change callback from changing carrier
+ * after close
+ */
+ virtio_config_driver_disable(vi->vdev);
+ /* Stop getting status/speed updates: we don't care until next
+ * open
+ */
+ cancel_work_sync(&vi->config_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
+ netif_carrier_off(dev);
+
return 0;
}
@@ -5095,25 +5133,6 @@ static void virtnet_init_settings(struct net_device *dev)
vi->duplex = DUPLEX_UNKNOWN;
}
-static void virtnet_update_settings(struct virtnet_info *vi)
-{
- u32 speed;
- u8 duplex;
-
- if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
- return;
-
- virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
-
- if (ethtool_validate_speed(speed))
- vi->speed = speed;
-
- virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
-
- if (ethtool_validate_duplex(duplex))
- vi->duplex = duplex;
-}
-
static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
{
return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
@@ -6524,6 +6543,9 @@ static int virtnet_probe(struct virtio_device *vdev)
goto free_failover;
}
+ /* Disable config change notification until ndo_open. */
+ virtio_config_driver_disable(vi->vdev);
+
virtio_device_ready(vdev);
virtnet_set_queues(vi, vi->curr_queue_pairs);
@@ -6573,19 +6595,11 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->device_stats_cap = le64_to_cpu(v);
}
- rtnl_unlock();
-
- err = virtnet_cpu_notif_add(vi);
- if (err) {
- pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_unregister_netdev;
- }
-
/* Assume link up if device can't report link status,
otherwise get link status from config. */
netif_carrier_off(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
- schedule_work(&vi->config_work);
+ virtnet_config_changed_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_settings(vi);
@@ -6597,6 +6611,14 @@ static int virtnet_probe(struct virtio_device *vdev)
set_bit(guest_offloads[i], &vi->guest_offloads);
vi->guest_offloads_capable = vi->guest_offloads;
+ rtnl_unlock();
+
+ err = virtnet_cpu_notif_add(vi);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_unregister_netdev;
+ }
+
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index ba59e92ab941..34391c18bba7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -277,8 +277,7 @@ static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
@@ -2690,11 +2689,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
- bool did_rsc = false;
struct vxlan_fdb *f;
struct ethhdr *eth;
__be32 vni = 0;
u32 nhid = 0;
+ bool did_rsc;
info = skb_tunnel_info(skb);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 8d864d4ed77f..79f17100f70b 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -53,6 +53,7 @@
#define RGU_RESET_DELAY_MS 10
#define PORT_RESET_DELAY_MS 2000
+#define FASTBOOT_RESET_DELAY_MS 2000
#define EX_HS_TIMEOUT_MS 5000
#define EX_HS_POLL_DELAY_MS 10
@@ -167,19 +168,52 @@ static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
}
kfree(buffer.pointer);
+#else
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+ ret = pci_reset_function(t7xx_dev->pdev);
+ if (ret) {
+ dev_err(dev, "Failed to reset device, error:%d\n", ret);
+ return ret;
+ }
#endif
return 0;
}
-int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
+static void t7xx_host_event_notify(struct t7xx_pci_dev *t7xx_dev, unsigned int event_id)
{
- return t7xx_acpi_reset(t7xx_dev, "_RST");
+ u32 value;
+
+ value = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
}
-int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type)
{
- return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ int ret = 0;
+
+ pci_save_state(t7xx_dev->pdev);
+ t7xx_pci_reprobe_early(t7xx_dev);
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
+
+ if (type == FLDR) {
+ ret = t7xx_acpi_reset(t7xx_dev, "_RST");
+ } else if (type == PLDR) {
+ ret = t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ } else if (type == FASTBOOT) {
+ t7xx_host_event_notify(t7xx_dev, FASTBOOT_DL_NOTIFY);
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ msleep(FASTBOOT_RESET_DELAY_MS);
+ }
+
+ pci_restore_state(t7xx_dev->pdev);
+ if (ret)
+ return ret;
+
+ return t7xx_pci_reprobe(t7xx_dev, true);
}
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
@@ -188,16 +222,15 @@ static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
if (val & MISC_RESET_TYPE_PLDR)
- t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+ t7xx_reset_device(t7xx_dev, PLDR);
else if (val & MISC_RESET_TYPE_FLDR)
- t7xx_acpi_fldr_func(t7xx_dev);
+ t7xx_reset_device(t7xx_dev, FLDR);
}
static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
- t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index b39e945a92e0..39ed0000fbba 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -78,14 +78,19 @@ struct t7xx_modem {
spinlock_t exp_lock; /* Protects exception events */
};
+enum reset_type {
+ FLDR,
+ PLDR,
+ FASTBOOT,
+};
+
void t7xx_md_exception_handshake(struct t7xx_modem *md);
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id);
int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev);
int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
-int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
-int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_reset_device(struct t7xx_pci_dev *t7xx_dev, enum reset_type type);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 10a8c1080b10..e556e5bd49ab 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -69,6 +69,7 @@ static ssize_t t7xx_mode_store(struct device *dev,
{
struct t7xx_pci_dev *t7xx_dev;
struct pci_dev *pdev;
+ enum t7xx_mode mode;
int index = 0;
pdev = to_pci_dev(dev);
@@ -76,12 +77,22 @@ static ssize_t t7xx_mode_store(struct device *dev,
if (!t7xx_dev)
return -ENODEV;
+ mode = READ_ONCE(t7xx_dev->mode);
+
index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == mode)
+ return -EBUSY;
+
if (index == T7XX_FASTBOOT_SWITCHING) {
+ if (mode == T7XX_FASTBOOT_DOWNLOAD)
+ return count;
+
WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ pm_runtime_resume(dev);
+ t7xx_reset_device(t7xx_dev, FASTBOOT);
} else if (index == T7XX_RESET) {
- WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
- t7xx_acpi_pldr_func(t7xx_dev);
+ pm_runtime_resume(dev);
+ t7xx_reset_device(t7xx_dev, PLDR);
}
return count;
@@ -446,7 +457,7 @@ static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
if (is_d3) {
t7xx_mhccif_init(t7xx_dev);
- return t7xx_pci_pm_reinit(t7xx_dev);
+ t7xx_pci_pm_reinit(t7xx_dev);
}
return 0;
@@ -481,6 +492,33 @@ static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
return ret;
}
+int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev)
+{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
+ int ret;
+
+ if (mode == T7XX_FASTBOOT_DOWNLOAD)
+ pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+
+ ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot)
+{
+ int ret;
+
+ ret = t7xx_pcie_reinit(t7xx_dev, boot);
+ if (ret)
+ return ret;
+
+ t7xx_clear_rgu_irq(t7xx_dev);
+ return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+}
+
static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
{
struct t7xx_pci_dev *t7xx_dev;
@@ -507,16 +545,11 @@ static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
if (prev_state == PM_RESUME_REG_STATE_L3 ||
(prev_state == PM_RESUME_REG_STATE_INIT &&
atr_reg_val == ATR_SRC_ADDR_INVALID)) {
- ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
- if (ret)
- return ret;
-
- ret = t7xx_pcie_reinit(t7xx_dev, true);
+ ret = t7xx_pci_reprobe_early(t7xx_dev);
if (ret)
return ret;
- t7xx_clear_rgu_irq(t7xx_dev);
- return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
+ return t7xx_pci_reprobe(t7xx_dev, true);
}
if (prev_state == PM_RESUME_REG_STATE_EXP ||
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index 49a11586d8d8..cd8ea17c2644 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -133,4 +133,7 @@ int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_en
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
+int t7xx_pci_reprobe(struct t7xx_pci_dev *t7xx_dev, bool boot);
+int t7xx_pci_reprobe_early(struct t7xx_pci_dev *t7xx_dev);
+
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 7d6388bf1d7c..35743e7de0c3 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -553,7 +553,6 @@ static int t7xx_proxy_alloc(struct t7xx_modem *md)
md->port_prox = port_prox;
port_prox->dev = dev;
- t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
index 6a3f36385865..4ed8b4e29bf1 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_trace.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -59,6 +59,7 @@ static void t7xx_trace_port_uninit(struct t7xx_port *port)
relay_close(relaych);
debugfs_remove_recursive(debugfs_dir);
+ port->log.relaych = NULL;
}
static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 9889ca4621cf..3931c7a13f5a 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -213,16 +213,6 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
-static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
-{
- u32 value;
-
- value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
- value &= ~HOST_EVENT_MASK;
- value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
- iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
-}
-
static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
{
struct t7xx_modem *md = ctl->md;
@@ -264,8 +254,14 @@ static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
+ enum t7xx_mode mode;
+
ctl->curr_state = FSM_STATE_STOPPED;
+ mode = READ_ONCE(ctl->md->t7xx_dev->mode);
+ if (mode == T7XX_FASTBOOT_DOWNLOAD || mode == T7XX_FASTBOOT_DUMP)
+ return 0;
+
t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
return t7xx_md_reset(ctl->md->t7xx_dev);
}
@@ -284,8 +280,6 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
{
struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
- enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
- int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
fsm_finish_command(ctl, cmd, -EINVAL);
@@ -296,21 +290,10 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (mode == T7XX_FASTBOOT_SWITCHING)
- t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
-
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
/* Wait for the DRM disable to take effect */
msleep(FSM_DRM_DISABLE_DELAY_MS);
- if (mode == T7XX_FASTBOOT_SWITCHING) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
- } else {
- err = t7xx_acpi_fldr_func(t7xx_dev);
- if (err)
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
- }
-
fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
}
@@ -414,7 +397,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
case T7XX_DEV_STAGE_LK:
dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
t7xx_lk_stage_event_handling(ctl, status);
+
break;
case T7XX_DEV_STAGE_LINUX:
@@ -436,6 +421,9 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
}
finish_command:
+ if (ret)
+ t7xx_mode_update(md->t7xx_dev, T7XX_UNKNOWN);
+
fsm_finish_command(ctl, cmd, ret);
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index ff96f22648ef..45ddce35f6d2 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -95,7 +95,7 @@ static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
static void xenvif_flush_hash(struct xenvif *vif)
{
- struct xenvif_hash_cache_entry *entry;
+ struct xenvif_hash_cache_entry *entry, *n;
unsigned long flags;
if (xenvif_hash_cache_size == 0)
@@ -103,8 +103,7 @@ static void xenvif_flush_hash(struct xenvif *vif)
spin_lock_irqsave(&vif->hash.cache.lock, flags);
- list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
- lockdep_is_held(&vif->hash.cache.lock)) {
+ list_for_each_entry_safe(entry, n, &vif->hash.cache.list, link) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index a187f0e0b0f7..ffd7367ce119 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -254,7 +254,6 @@ struct pn533_acr122_ccid_hdr {
* byte for reposnse msg
*/
u8 params[3];
- u8 data[]; /* payload */
} __packed;
struct pn533_acr122_apdu_hdr {
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bf664ec9341b..802153e23073 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -244,7 +244,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
restart_tx = true;
}
- consume_skb(skb);
+ virtio_transport_consume_skb_sent(skb, true);
}
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
@@ -451,6 +451,8 @@ static struct virtio_transport vhost_transport = {
.notify_buffer_size = virtio_transport_notify_buffer_size,
.notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
+ .unsent_bytes = virtio_transport_unsent_bytes,
+
.read_skb = virtio_transport_read_skb,
},
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index bc1f962e483b..b9095751e43b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -127,10 +127,12 @@ static void __virtio_config_changed(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- if (!dev->config_enabled)
+ if (!dev->config_core_enabled || dev->config_driver_disabled)
dev->config_change_pending = true;
- else if (drv && drv->config_changed)
+ else if (drv && drv->config_changed) {
drv->config_changed(dev);
+ dev->config_change_pending = false;
+ }
}
void virtio_config_changed(struct virtio_device *dev)
@@ -143,20 +145,51 @@ void virtio_config_changed(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_config_changed);
-static void virtio_config_disable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_disable - disable config change reporting by drivers
+ * @dev: the device to reset
+ *
+ * This is only allowed to be called by a driver and disabling can't
+ * be nested.
+ */
+void virtio_config_driver_disable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_driver_disabled = true;
spin_unlock_irq(&dev->config_lock);
}
+EXPORT_SYMBOL_GPL(virtio_config_driver_disable);
-static void virtio_config_enable(struct virtio_device *dev)
+/**
+ * virtio_config_driver_enable - enable config change reporting by drivers
+ * @dev: the device to reset
+ *
+ * This is only allowed to be called by a driver and enabling can't
+ * be nested.
+ */
+void virtio_config_driver_enable(struct virtio_device *dev)
{
spin_lock_irq(&dev->config_lock);
- dev->config_enabled = true;
+ dev->config_driver_disabled = false;
+ if (dev->config_change_pending)
+ __virtio_config_changed(dev);
+ spin_unlock_irq(&dev->config_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_config_driver_enable);
+
+static void virtio_config_core_disable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = false;
+ spin_unlock_irq(&dev->config_lock);
+}
+
+static void virtio_config_core_enable(struct virtio_device *dev)
+{
+ spin_lock_irq(&dev->config_lock);
+ dev->config_core_enabled = true;
if (dev->config_change_pending)
__virtio_config_changed(dev);
- dev->config_change_pending = false;
spin_unlock_irq(&dev->config_lock);
}
@@ -316,7 +349,7 @@ static int virtio_dev_probe(struct device *_d)
if (drv->scan)
drv->scan(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;
@@ -331,7 +364,7 @@ static void virtio_dev_remove(struct device *_d)
struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
- virtio_config_disable(dev);
+ virtio_config_core_disable(dev);
drv->remove(dev);
@@ -443,7 +476,7 @@ int register_virtio_device(struct virtio_device *dev)
goto out_ida_remove;
spin_lock_init(&dev->config_lock);
- dev->config_enabled = false;
+ dev->config_core_enabled = false;
dev->config_change_pending = false;
INIT_LIST_HEAD(&dev->vqs);
@@ -500,14 +533,14 @@ int virtio_device_freeze(struct virtio_device *dev)
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
int ret;
- virtio_config_disable(dev);
+ virtio_config_core_disable(dev);
dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
if (drv && drv->freeze) {
ret = drv->freeze(dev);
if (ret) {
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return ret;
}
}
@@ -557,7 +590,7 @@ int virtio_device_restore(struct virtio_device *dev)
if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
virtio_device_ready(dev);
- virtio_config_enable(dev);
+ virtio_config_core_enable(dev);
return 0;