summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c37
-rw-r--r--drivers/isdn/mISDN/core.c14
-rw-r--r--drivers/isdn/mISDN/core.h1
-rw-r--r--drivers/net/bareudp.c16
-rw-r--r--drivers/net/can/dev/dev.c2
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/kvaser_pciefd.c81
-rw-r--r--drivers/net/can/m_can/m_can.c22
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c30
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/sun4i_can.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c133
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c38
-rw-r--r--drivers/net/dsa/b53/b53_common.c14
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c118
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c4
-rw-r--r--drivers/net/dsa/mt7530.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c58
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c5
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c10
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c7
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c8
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c39
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c103
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c64
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c28
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c330
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h29
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c13
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c16
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c35
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c604
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c74
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h29
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c160
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c134
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c199
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h28
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c120
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c458
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c231
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c79
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c12
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c550
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h98
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c444
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c130
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c164
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c32
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c270
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2658
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h81
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c459
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h72
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1074
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c25
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c34
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c27
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c39
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h1
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c23
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1056
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c10
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c365
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c159
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1377
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c450
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h20
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c543
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c160
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c53
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c72
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c12
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c238
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h16
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h17
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h145
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c57
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h5
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c40
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h22
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c9
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c69
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c115
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c38
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h1
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c14
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c119
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h48
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c277
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c22
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c21
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c175
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c3
-rw-r--r--drivers/net/geneve.c12
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/mctp/mctp-i2c.c3
-rw-r--r--drivers/net/mdio/mdio-octeon.c25
-rw-r--r--drivers/net/netconsole.c62
-rw-r--r--drivers/net/netdevsim/ethtool.c16
-rw-r--r--drivers/net/netdevsim/netdev.c268
-rw-r--r--drivers/net/netdevsim/netdevsim.h8
-rw-r--r--drivers/net/netkit.c66
-rw-r--r--drivers/net/pcs/pcs-lynx.c39
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c25
-rw-r--r--drivers/net/pcs/pcs-xpcs.c40
-rw-r--r--drivers/net/phy/Kconfig14
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/adin1100.c2
-rw-r--r--drivers/net/phy/air_en8811h.c2
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c2
-rw-r--r--drivers/net/phy/ax88796b.c2
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c12
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c385
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83869.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c114
-rw-r--r--drivers/net/phy/dp83tg720.c163
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/intel-xway.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c2
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c2
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c2
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1039
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h223
-rw-r--r--drivers/net/phy/microchip_t1.c43
-rw-r--r--drivers/net/phy/microchip_t1s.c2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/mxl-gpy.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c2
-rw-r--r--drivers/net/phy/nxp-cbtx.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-c45.c14
-rw-r--r--drivers/net/phy/phy.c152
-rw-r--r--drivers/net/phy/phy_device.c29
-rw-r--r--drivers/net/phy/phylink.c430
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/phy/qcom/qca808x.c2
-rw-r--r--drivers/net/phy/qcom/qca83xx.c2
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek/Kconfig11
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c79
-rw-r--r--drivers/net/phy/realtek/realtek_main.c (renamed from drivers/net/phy/realtek.c)24
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c8
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c2
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/pse-pd/pd692x0.c224
-rw-r--r--drivers/net/pse-pd/pse_core.c183
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/tps23881.c449
-rw-r--r--drivers/net/tap.c6
-rw-r--r--drivers/net/team/team_core.c7
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/lan78xx.c938
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/vrf.c49
-rw-r--r--drivers/net/vxlan/vxlan_core.c187
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c2
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c132
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c482
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h134
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c681
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h218
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c70
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c96
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1959
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c223
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c167
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c207
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h115
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c68
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c47
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c164
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h145
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c266
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h69
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h43
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c289
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c313
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h33
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c42
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c50
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c54
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c57
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c39
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c56
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c23
-rw-r--r--drivers/nfc/st21nfca/dep.c18
-rw-r--r--drivers/ptp/ptp_ocp.c16
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c4
574 files changed, 28997 insertions, 7669 deletions
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 520034acf73a..162814ae8cb4 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -943,7 +943,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
}
dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst.counter_id = mlx5_fc_id(opfc->fc);
+ dst.counter = opfc->fc;
flow_act.action =
MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
@@ -1113,8 +1113,8 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
handler->ibcounters = flow_act.counters;
dest_arr[dest_num].type =
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest_arr[dest_num].counter_id =
- mlx5_fc_id(mcounters->hw_cntrs_hndl);
+ dest_arr[dest_num].counter =
+ mcounters->hw_cntrs_hndl;
dest_num++;
}
@@ -1603,7 +1603,7 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
static struct mlx5_ib_flow_handler *raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
- u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
+ struct mlx5_fc *counter, void *cmd_in, int inlen, int dest_id, int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
@@ -1652,8 +1652,12 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
}
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (WARN_ON(!counter)) {
+ err = -EINVAL;
+ goto unlock;
+ }
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dst[dst_num].counter_id = counter_id;
+ dst[dst_num].counter = counter;
dst_num++;
}
@@ -1878,7 +1882,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
return 0;
}
-static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
+static bool
+is_flow_counter(void *obj, u32 offset, u32 *counter_id, u32 *fc_bulk_size)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
@@ -1888,6 +1893,7 @@ static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
if (offset && offset >= devx_obj->flow_counter_bulk_size)
return false;
+ *fc_bulk_size = devx_obj->flow_counter_bulk_size;
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
@@ -1904,13 +1910,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
{
struct mlx5_flow_context flow_context = {.flow_tag =
MLX5_FS_DEFAULT_FLOW_TAG};
- u32 *offset_attr, offset = 0, counter_id = 0;
int dest_id, dest_type = -1, inlen, len, ret, i;
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
struct ib_uobject **arr_flow_actions;
struct ib_uflow_resources *uflow_res;
struct mlx5_flow_act flow_act = {};
+ struct mlx5_fc *counter = NULL;
struct ib_qp *qp = NULL;
void *devx_obj, *cmd_in;
struct ib_uobject *uobj;
@@ -1937,6 +1943,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
+ u32 *offset_attr, fc_bulk_size, offset = 0, counter_id = 0;
devx_obj = arr_flow_actions[0]->object;
if (uverbs_attr_is_valid(attrs,
@@ -1956,8 +1963,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
offset = *offset_attr;
}
- if (!is_flow_counter(devx_obj, offset, &counter_id))
+ if (!is_flow_counter(devx_obj, offset, &counter_id, &fc_bulk_size))
return -EINVAL;
+ counter = mlx5_fc_local_create(counter_id, offset, fc_bulk_size);
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -1968,8 +1978,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
- if (!uflow_res)
- return -ENOMEM;
+ if (!uflow_res) {
+ ret = -ENOMEM;
+ goto destroy_counter;
+ }
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
@@ -1996,7 +2008,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_handler =
raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
- counter_id, cmd_in, inlen, dest_id, dest_type);
+ counter, cmd_in, inlen, dest_id, dest_type);
if (IS_ERR(flow_handler)) {
ret = PTR_ERR(flow_handler);
goto err_out;
@@ -2007,6 +2019,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
return 0;
err_out:
ib_uverbs_flow_resources_free(uflow_res);
+destroy_counter:
+ if (counter)
+ mlx5_fc_local_destroy(counter);
return ret;
}
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index e34a7a46754e..8ec2d4d4f135 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -294,20 +294,6 @@ get_Bprotocol4mask(u_int m)
return NULL;
}
-struct Bprotocol *
-get_Bprotocol4id(u_int id)
-{
- u_int m;
-
- if (id < ISDN_P_B_START || id > 63) {
- printk(KERN_WARNING "%s id not in range %d\n",
- __func__, id);
- return NULL;
- }
- m = 1 << (id & ISDN_P_B_MASK);
- return get_Bprotocol4mask(m);
-}
-
int
mISDN_register_Bprotocol(struct Bprotocol *bp)
{
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
index 42599f49c189..5617c06de8e4 100644
--- a/drivers/isdn/mISDN/core.h
+++ b/drivers/isdn/mISDN/core.h
@@ -55,7 +55,6 @@ extern void __add_layer2(struct mISDNchannel *, struct mISDNstack *);
extern u_int get_all_Bprotocols(void);
struct Bprotocol *get_Bprotocol4mask(u_int);
-struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a2abfade82dd..70814303aab8 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -84,7 +84,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
sizeof(ipversion))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
ipversion >>= 4;
@@ -94,7 +94,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} else if (ipversion == 6 && bareudp->multi_proto_mode) {
proto = htons(ETH_P_IPV6);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
@@ -108,7 +108,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
ipv4_is_multicast(tunnel_hdr->daddr)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
} else {
@@ -124,7 +124,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
(addr_type & IPV6_ADDR_MULTICAST)) {
proto = htons(ETH_P_MPLS_MC);
} else {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
}
@@ -136,7 +136,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
proto,
!net_eq(bareudp->net,
dev_net(bareudp->dev)))) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
@@ -144,7 +144,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
- dev_core_stats_rx_dropped_inc(bareudp->dev);
+ dev_dstats_rx_dropped(bareudp->dev);
goto drop;
}
skb_dst_set(skb, &tun_dst->dst);
@@ -194,7 +194,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(bareudp->dev, len);
+ dev_dstats_rx_add(bareudp->dev, len);
return 0;
drop:
@@ -589,7 +589,7 @@ static void bareudp_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 681643ab3780..5ec3170b896a 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -85,8 +85,6 @@ const char *can_get_state_str(const enum can_state state)
default:
return "<unknown>";
}
-
- return "<unknown>";
}
EXPORT_SYMBOL_GPL(can_get_state_str);
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index cdf0ec9fa7f3..21a61b86f67d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1073,9 +1073,10 @@ static int grcan_open(struct net_device *dev)
if (err)
goto exit_close_candev;
+ napi_enable(&priv->napi);
+
spin_lock_irqsave(&priv->lock, flags);
- napi_enable(&priv->napi);
grcan_start(dev);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index fee012b57f33..fa04a7ced02b 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -999,7 +999,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
@@ -1234,11 +1235,15 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ const struct can_berr_counter *bec,
struct can_frame *cf,
enum can_state new_state,
enum can_state tx_state,
enum can_state rx_state)
{
+ enum can_state old_state;
+
+ old_state = can->can.state;
can_change_state(can->can.dev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1254,6 +1259,18 @@ static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
can_bus_off(ndev);
}
}
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
}
static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
@@ -1288,7 +1305,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
struct net_device *ndev = can->can.dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct can_frame *cf = NULL;
old_state = can->can.state;
@@ -1297,16 +1314,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]);
kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state);
- skb = alloc_can_err_skb(ndev, &cf);
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(ndev, &cf);
if (new_state != old_state) {
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- if (skb)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
}
can->err_rep_cnt++;
@@ -1319,18 +1330,19 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
- if (!skb) {
- ndev->stats.rx_dropped++;
- return -ENOMEM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ if (!skb) {
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ndev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
}
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
- cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
-
return 0;
}
@@ -1359,6 +1371,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
{
struct can_berr_counter bec;
enum can_state old_state, new_state, tx_state, rx_state;
+ int ret = 0;
old_state = can->can.state;
@@ -1372,25 +1385,15 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
- if (!skb) {
+ kvaser_pciefd_change_state(can, &bec, cf, new_state, tx_state, rx_state);
+ if (skb) {
+ kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
+ netif_rx(skb);
+ } else {
ndev->stats.rx_dropped++;
- return -ENOMEM;
+ netdev_warn(ndev, "No memory left for err_skb\n");
+ ret = -ENOMEM;
}
-
- kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state);
- if (old_state == CAN_STATE_BUS_OFF &&
- new_state == CAN_STATE_ERROR_ACTIVE &&
- can->can.restart_ms) {
- can->can.can_stats.restarts++;
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp);
-
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
-
- netif_rx(skb);
}
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
@@ -1398,7 +1401,7 @@ static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
if (bec.txerr || bec.rxerr)
mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
- return 0;
+ return ret;
}
static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 97cd8bbf2e32..d025d4163fd1 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1785,6 +1785,13 @@ static void m_can_stop(struct net_device *dev)
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
+
+ if (cdev->ops->deinit) {
+ ret = cdev->ops->deinit(cdev);
+ if (ret)
+ netdev_err(dev, "failed to deinitialize: %pe\n",
+ ERR_PTR(ret));
+ }
}
static int m_can_close(struct net_device *dev)
@@ -2466,6 +2473,7 @@ int m_can_class_suspend(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
if (netif_running(ndev)) {
netif_stop_queue(ndev);
@@ -2478,6 +2486,9 @@ int m_can_class_suspend(struct device *dev)
if (cdev->pm_wake_source) {
hrtimer_cancel(&cdev->hrtimer);
m_can_write(cdev, M_CAN_IE, IR_RF0N);
+
+ if (cdev->ops->deinit)
+ ret = cdev->ops->deinit(cdev);
} else {
m_can_stop(ndev);
}
@@ -2489,7 +2500,7 @@ int m_can_class_suspend(struct device *dev)
cdev->can.state = CAN_STATE_SLEEPING;
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_suspend);
@@ -2497,14 +2508,13 @@ int m_can_class_resume(struct device *dev)
{
struct m_can_classdev *cdev = dev_get_drvdata(dev);
struct net_device *ndev = cdev->net;
+ int ret = 0;
pinctrl_pm_select_default_state(dev);
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
- int ret;
-
ret = m_can_clk_start(cdev);
if (ret)
return ret;
@@ -2517,6 +2527,10 @@ int m_can_class_resume(struct device *dev)
* again.
*/
cdev->active_interrupts |= IR_RF0N | IR_TEFN;
+
+ if (cdev->ops->init)
+ ret = cdev->ops->init(cdev);
+
m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
} else {
ret = m_can_start(ndev);
@@ -2530,7 +2544,7 @@ int m_can_class_resume(struct device *dev)
netif_start_queue(ndev);
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index ef39e8e527ab..bd4746c63af3 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -68,6 +68,7 @@ struct m_can_ops {
int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
const void *val, size_t val_count);
int (*init)(struct m_can_classdev *cdev);
+ int (*deinit)(struct m_can_classdev *cdev);
};
struct m_can_tx_op {
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 2f73bf3abad8..e5c162f8c589 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -92,6 +92,8 @@
#define TCAN4X5X_MODE_STANDBY BIT(6)
#define TCAN4X5X_MODE_NORMAL BIT(7)
+#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19)
+
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
@@ -267,9 +269,24 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
+ if (tcan4x5x->nwkrq_voltage_vio) {
+ ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_NWKRQ_VOLTAGE_VIO);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
+static int tcan4x5x_deinit(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_STANDBY);
+};
+
static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
@@ -318,6 +335,14 @@ static const struct tcan4x5x_version_info
return &tcan4x5x_versions[TCAN4X5X];
}
+static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+
+ tcan4x5x->nwkrq_voltage_vio =
+ of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
+}
+
static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
const struct tcan4x5x_version_info *version_info)
{
@@ -359,6 +384,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
+ .deinit = tcan4x5x_deinit,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
.write_fifo = tcan4x5x_write_fifo,
@@ -392,7 +418,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->power = NULL;
}
- m_can_class_get_clocks(mcan_class);
+ mcan_class->cclk = devm_clk_get(mcan_class->dev, "cclk");
if (IS_ERR(mcan_class->cclk)) {
dev_err(&spi->dev, "no CAN clock source defined\n");
freq = TCAN4X5X_EXT_CLK_DEF;
@@ -453,6 +479,8 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ tcan4x5x_get_dt_data(mcan_class);
+
tcan4x5x_check_wake(priv);
ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h
index e62c030d3e1e..203399d5e8cc 100644
--- a/drivers/net/can/m_can/tcan4x5x.h
+++ b/drivers/net/can/m_can/tcan4x5x.h
@@ -42,6 +42,8 @@ struct tcan4x5x_priv {
struct tcan4x5x_map_buf map_buf_rx;
struct tcan4x5x_map_buf map_buf_tx;
+
+ bool nwkrq_voltage_vio;
};
static inline void
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index c42ebe9da55a..2d555f854008 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -230,18 +230,9 @@ static int sp_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), DRV_NAME))
- return -EBUSY;
-
- addr = devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
- if (!addr)
- return -ENOMEM;
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
if (of) {
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 4311c1f0eafd..6fcb301ef611 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -570,7 +570,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
- if (skb && state != CAN_STATE_BUS_OFF) {
+ if (likely(skb) && state != CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 7d12776ab63e..dcb0bcbe0565 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -818,7 +818,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
init_completion(&priv->stop_comp);
init_completion(&priv->flush_comp);
init_completion(&priv->get_busparams_comp);
- priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
priv->dev = dev;
priv->netdev = netdev;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 3764b263add3..8e88b5917796 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -926,6 +926,42 @@ kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
}
}
+static void kvaser_usb_hydra_change_state(struct kvaser_usb_net_priv *priv,
+ const struct can_berr_counter *bec,
+ struct can_frame *cf,
+ enum can_state new_state)
+{
+ struct net_device *netdev = priv->netdev;
+ enum can_state old_state = priv->can.state;
+ enum can_state tx_state, rx_state;
+
+ tx_state = (bec->txerr >= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (bec->txerr <= bec->rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ can_change_state(netdev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms == 0)
+ kvaser_usb_hydra_send_simple_cmd_async(priv, CMD_STOP_CHIP_REQ);
+
+ can_bus_off(netdev);
+ }
+
+ if (priv->can.restart_ms &&
+ old_state >= CAN_STATE_BUS_OFF &&
+ new_state < CAN_STATE_BUS_OFF) {
+ priv->can.can_stats.restarts++;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ if (cf && new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
+}
+
static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
u8 bus_status,
const struct can_berr_counter *bec)
@@ -951,41 +987,11 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
return;
skb = alloc_can_err_skb(netdev, &cf);
- if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec->txerr >= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec->txerr <= bec->rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- can_change_state(netdev, cf, tx_state, rx_state);
- }
-
- if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
-
- can_bus_off(netdev);
- }
-
- if (!skb) {
+ kvaser_usb_hydra_change_state(priv, bec, cf, new_state);
+ if (skb)
+ netif_rx(skb);
+ else
netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- priv->can.can_stats.restarts++;
-
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
- }
-
- netif_rx(skb);
}
static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev,
@@ -1078,9 +1084,8 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
{
struct net_device *netdev = priv->netdev;
struct net_device_stats *stats = &netdev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *shhwtstamps;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct can_berr_counter bec;
enum can_state new_state, old_state;
u8 bus_status;
@@ -1096,52 +1101,26 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec,
&new_state);
- skb = alloc_can_err_skb(netdev, &cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (new_state != old_state)
+ kvaser_usb_hydra_change_state(priv, &bec, cf, new_state);
- if (new_state != old_state) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
if (skb) {
- enum can_state tx_state, rx_state;
-
- tx_state = (bec.txerr >= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
- rx_state = (bec.txerr <= bec.rxerr) ?
- new_state : CAN_STATE_ERROR_ACTIVE;
-
- can_change_state(netdev, cf, tx_state, rx_state);
-
- if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
- new_state < CAN_STATE_BUS_OFF)
- cf->can_id |= CAN_ERR_RESTARTED;
- }
-
- if (new_state == CAN_STATE_BUS_OFF) {
- if (!priv->can.restart_ms)
- kvaser_usb_hydra_send_simple_cmd_async
- (priv, CMD_STOP_CHIP_REQ);
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- can_bus_off(netdev);
+ shhwtstamps->hwtstamp = hwtstamp;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(netdev, "No memory left for err_skb\n");
}
}
- if (!skb) {
- stats->rx_dropped++;
- netdev_warn(netdev, "No memory left for err_skb\n");
- return;
- }
-
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp = hwtstamp;
-
- cf->can_id |= CAN_ERR_BUSERROR;
- if (new_state != CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_CNT;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
- }
-
- netif_rx(skb);
-
priv->bec.txerr = bec.txerr;
priv->bec.rxerr = bec.rxerr;
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 6b9122ab1464..6a45adcc45bd 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -1120,10 +1120,8 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
const struct kvaser_usb_err_summary *es)
{
- struct can_frame *cf;
- struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .len = CAN_ERR_DLC };
- struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb = NULL;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
struct kvaser_usb_net_leaf_priv *leaf;
@@ -1143,18 +1141,10 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (!netif_running(priv->netdev))
return;
- /* Update all of the CAN interface's state and error counters before
- * trying any memory allocation that can actually fail with -ENOMEM.
- *
- * We send a temporary stack-allocated error CAN frame to
- * can_change_state() for the very same reason.
- *
- * TODO: Split can_change_state() responsibility between updating the
- * CAN interface's state and counters, and the setting up of CAN error
- * frame ID and data to userspace. Remove stack allocation afterwards.
- */
old_state = priv->can.state;
- kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ skb = alloc_can_err_skb(priv->netdev, &cf);
+ kvaser_usb_leaf_rx_error_update_can_state(priv, es, cf);
new_state = priv->can.state;
/* If there are errors, request status updates periodically as we do
@@ -1168,13 +1158,6 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
schedule_delayed_work(&leaf->chip_state_req_work,
msecs_to_jiffies(500));
- skb = alloc_can_err_skb(priv->netdev, &cf);
- if (!skb) {
- stats->rx_dropped++;
- return;
- }
- memcpy(cf, &tmp_cf, sizeof(*cf));
-
if (new_state != old_state) {
if (es->status &
(M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
@@ -1187,11 +1170,20 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
if (priv->can.restart_ms &&
old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
- cf->can_id |= CAN_ERR_RESTARTED;
+ if (cf)
+ cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
+ if (!skb) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ stats->rx_dropped++;
+ netdev_warn(priv->netdev, "No memory left for err_skb\n");
+ }
+ return;
+ }
+
switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 285785c942b0..79dc77835681 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2224,25 +2224,19 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
+bool b53_support_eee(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
- return 0;
+ return !is5325(dev) && !is5365(dev);
}
-EXPORT_SYMBOL(b53_get_mac_eee);
+EXPORT_SYMBOL(b53_support_eee);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
struct ethtool_keee *p = &dev->ports[port].eee;
- if (is5325(dev) || is5365(dev))
- return -EOPNOTSUPP;
-
p->eee_enabled = e->eee_enabled;
b53_eee_enable_set(ds, port, e->eee_enabled);
@@ -2298,7 +2292,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phylink_get_caps = b53_phylink_get_caps,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 05141176daf5..9e9b5bc0c5d6 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -384,7 +384,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+bool b53_support_eee(struct dsa_switch *ds, int port);
int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 3f8a491ce885..4730982b6840 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -99,8 +99,8 @@ static void b53_serdes_an_restart(struct phylink_pcs *pcs)
SERDES_MII_BLK, reg);
}
-static void b53_serdes_get_state(struct phylink_pcs *pcs,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
+ struct phylink_link_state *state)
{
struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
u8 lane = pcs_to_b53_pcs(pcs)->lane;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 43bde1f583ff..fa2bf3fa9019 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1232,7 +1232,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_wol = bcm_sf2_sw_set_wol,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = b53_get_mac_eee,
+ .support_eee = b53_support_eee,
.set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 1c6d7fc16772..a2beb27459f1 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -127,10 +127,14 @@ static const struct of_device_id ksz9477_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_i2c_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
.of_match_table = ksz9477_dt_ids,
+ .pm = &ksz_i2c_pm_ops,
},
.probe = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8a03baa6aecc..89f0796894af 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1339,6 +1339,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {false, false, true},
+ .ptp_capable = true,
.wr_table = &ksz8563_register_set,
.rd_table = &ksz8563_register_set,
},
@@ -1550,6 +1551,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
.wr_table = &ksz9477_register_set,
.rd_table = &ksz9477_register_set,
},
@@ -1677,6 +1679,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
.gbit_capable = {true, true, true},
+ .ptp_capable = true,
},
[KSZ8567] = {
@@ -1712,6 +1715,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, false, false},
.gbit_capable = {false, false, false, false, false,
true, true},
+ .ptp_capable = true,
},
[KSZ9567] = {
@@ -1744,6 +1748,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.internal_phy = {true, true, true, true,
true, false, false},
.gbit_capable = {true, true, true, true, true, true, true},
+ .ptp_capable = true,
},
[LAN9370] = {
@@ -1773,6 +1778,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true},
.supports_rgmii = {false, false, false, false, true},
.internal_phy = {true, true, true, true, false},
+ .ptp_capable = true,
},
[LAN9371] = {
@@ -1802,6 +1808,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, false, false, true, true},
.supports_rgmii = {false, false, false, false, true, true},
.internal_phy = {true, true, true, true, false, false},
+ .ptp_capable = true,
},
[LAN9372] = {
@@ -1835,6 +1842,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9373] = {
@@ -1868,6 +1876,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, false,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9374] = {
@@ -1901,6 +1910,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
true, true, false, false},
.internal_phy = {true, true, true, true,
false, false, true, true},
+ .ptp_capable = true,
},
[LAN9646] = {
@@ -2544,7 +2554,11 @@ static int ksz_mdio_register(struct ksz_device *dev)
bus->read = ksz_sw_mdio_read;
bus->write = ksz_sw_mdio_write;
bus->name = "ksz user smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ if (ds->dst->index != 0) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d-%d", ds->dst->index, ds->index);
+ } else {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ }
}
ret = ksz_parse_dt_phy_config(dev, bus, mdio_np);
@@ -2805,16 +2819,21 @@ static int ksz_setup(struct dsa_switch *ds)
if (ret)
goto out_girq;
- ret = ksz_ptp_irq_setup(ds, dp->index);
- if (ret)
- goto out_pirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto out_pirq;
+ }
}
}
- ret = ksz_ptp_clock_register(ds);
- if (ret) {
- dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
- goto out_ptpirq;
+ if (dev->info->ptp_capable) {
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n",
+ ret);
+ goto out_ptpirq;
+ }
}
ret = ksz_mdio_register(dev);
@@ -2834,9 +2853,10 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
out_ptp_clock_unregister:
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
out_ptpirq:
- if (dev->irq > 0)
+ if (dev->irq > 0 && dev->info->ptp_capable)
dsa_switch_for_each_user_port(dp, dev->ds)
ksz_ptp_irq_free(ds, dp->index);
out_pirq:
@@ -2855,11 +2875,13 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
- ksz_ptp_clock_unregister(ds);
+ if (dev->info->ptp_capable)
+ ksz_ptp_clock_unregister(ds);
if (dev->irq > 0) {
dsa_switch_for_each_user_port(dp, dev->ds) {
- ksz_ptp_irq_free(ds, dp->index);
+ if (dev->info->ptp_capable)
+ ksz_ptp_irq_free(ds, dp->index);
ksz_irq_free(&dev->ports[dp->index].pirq);
}
@@ -3444,12 +3466,12 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
-static int ksz_validate_eee(struct dsa_switch *ds, int port)
+static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
if (!dev->info->internal_phy[port])
- return -EOPNOTSUPP;
+ return false;
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
@@ -3461,41 +3483,16 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case LAN9646_CHIP_ID:
- return 0;
+ return true;
}
- return -EOPNOTSUPP;
-}
-
-static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
-
- /* There is no documented control of Tx LPI configuration. */
- e->tx_lpi_enabled = true;
-
- /* There is no documented control of Tx LPI timer. According to tests
- * Tx LPI timer seems to be set by default to minimal value.
- */
- e->tx_lpi_timer = 0;
-
- return 0;
+ return false;
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
- int ret;
-
- ret = ksz_validate_eee(ds, port);
- if (ret)
- return ret;
if (!e->tx_lpi_enabled) {
dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
@@ -4593,6 +4590,23 @@ static int ksz_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static int ksz_suspend(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ cancel_delayed_work_sync(&dev->mib_read);
+ return 0;
+}
+
+static int ksz_resume(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, dev->mib_read_interval);
+ return 0;
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.connect_tag_protocol = ksz_connect_tag_protocol,
@@ -4633,6 +4647,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_max_mtu = ksz_max_mtu,
.get_wol = ksz_get_wol,
.set_wol = ksz_set_wol,
+ .suspend = ksz_suspend,
+ .resume = ksz_resume,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
@@ -4641,7 +4657,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.cls_flower_add = ksz_cls_flower_add,
.cls_flower_del = ksz_cls_flower_del,
.port_setup_tc = ksz_setup_tc,
- .get_mac_eee = ksz_get_mac_eee,
+ .support_eee = ksz_support_eee,
.set_mac_eee = ksz_set_mac_eee,
.port_get_default_prio = ksz_port_get_default_prio,
.port_set_default_prio = ksz_port_set_default_prio,
@@ -5132,6 +5148,24 @@ void ksz_switch_remove(struct ksz_device *dev)
}
EXPORT_SYMBOL(ksz_switch_remove);
+#ifdef CONFIG_PM_SLEEP
+int ksz_switch_suspend(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_suspend(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_suspend);
+
+int ksz_switch_resume(struct device *dev)
+{
+ struct ksz_device *priv = dev_get_drvdata(dev);
+
+ return dsa_switch_resume(priv->ds);
+}
+EXPORT_SYMBOL(ksz_switch_resume);
+#endif
+
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index b3bb75ca0796..af17a9c030d4 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -92,6 +92,7 @@ struct ksz_chip_data {
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ bool ptp_capable;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
};
@@ -444,6 +445,8 @@ struct ksz_dev_ops {
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
+int ksz_switch_suspend(struct device *dev);
+int ksz_switch_resume(struct device *dev);
void ksz_init_mib_timer(struct ksz_device *dev);
bool ksz_is_port_mac_global_usable(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 108a958dc356..b633d263098c 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -239,10 +239,14 @@ static const struct spi_device_id ksz_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(ksz_spi_pm_ops,
+ ksz_switch_suspend, ksz_switch_resume);
+
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
.of_match_table = ksz_dt_ids,
+ .pm = &ksz_spi_pm_ops,
},
.id_table = ksz_spi_ids,
.probe = ksz_spi_probe,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 086b8b3d5b40..1c83af805209 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2994,7 +2994,7 @@ static int mt753x_pcs_validate(struct phylink_pcs *pcs,
return 0;
}
-static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
@@ -3085,18 +3085,6 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
}
-static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
-
- e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
-
- return 0;
-}
-
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -3238,7 +3226,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .get_mac_eee = mt753x_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
.port_setup_tc = mt753x_setup_tc,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 3a792f79270d..35ae084af166 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1289,9 +1289,6 @@ static size_t mv88e6095_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_PORT)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1301,9 +1298,6 @@ static size_t mv88e6250_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & STATS_TYPE_BANK0))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port, 0,
MV88E6XXX_G1_STATS_OP_HIST_RX);
return 1;
@@ -1313,9 +1307,6 @@ static size_t mv88e6320_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_9,
MV88E6XXX_G1_STATS_OP_HIST_RX);
@@ -1326,9 +1317,6 @@ static size_t mv88e6390_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
const struct mv88e6xxx_hw_stat *stat,
uint64_t *data)
{
- if (!(stat->type & (STATS_TYPE_BANK0 | STATS_TYPE_BANK1)))
- return 0;
-
*data = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
MV88E6XXX_G1_STATS_OP_BANK_1_BIT_10,
0);
@@ -1341,6 +1329,9 @@ static size_t mv88e6xxx_stats_get_stat(struct mv88e6xxx_chip *chip, int port,
{
int ret = 0;
+ if (!(stat->type & chip->info->stats_type))
+ return 0;
+
if (chip->info->ops->stats_get_stat) {
mv88e6xxx_reg_lock(chip);
ret = chip->info->ops->stats_get_stat(chip, port, stat, data);
@@ -1522,13 +1513,6 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
@@ -5645,6 +5629,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5665,6 +5650,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 5,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ops = &mv88e6250_ops,
@@ -5687,6 +5673,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5708,6 +5695,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6095_ops,
@@ -5730,6 +5718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5754,6 +5743,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5776,6 +5766,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 9,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.ops = &mv88e6131_ops,
@@ -5800,6 +5791,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5823,6 +5815,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5848,6 +5841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5872,6 +5866,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5897,6 +5892,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5921,6 +5917,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5946,6 +5943,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -5968,6 +5966,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global2_addr = 0x1c,
.age_time_coeff = 15000,
.g1_irqs = 8,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -5992,6 +5991,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.atu_move_port_mask = 0x1f,
@@ -6016,6 +6016,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6039,6 +6040,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6063,6 +6065,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6087,6 +6090,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6114,6 +6118,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6138,6 +6143,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6161,6 +6167,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0,
.atu_move_port_mask = 0xf,
.dual_chip = true,
.ptp_support = true,
@@ -6184,6 +6191,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6208,6 +6216,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6233,6 +6242,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 8,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6259,6 +6269,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0x1f,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
@@ -6283,6 +6294,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6307,6 +6319,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6332,6 +6345,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 15000,
.g1_irqs = 9,
.g2_irqs = 10,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_PORT,
.atu_move_port_mask = 0xf,
.pvt = true,
.multi_chip = true,
@@ -6359,6 +6373,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6383,6 +6398,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6408,6 +6424,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 9,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -6433,6 +6450,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.age_time_coeff = 3750,
.g1_irqs = 10,
.g2_irqs = 14,
+ .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0x1f,
.pvt = true,
.multi_chip = true,
@@ -7074,7 +7092,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.port_max_mtu = mv88e6xxx_get_max_mtu,
.port_change_mtu = mv88e6xxx_change_mtu,
- .get_mac_eee = mv88e6xxx_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = mv88e6xxx_set_mac_eee,
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 9fe8e8a7856b..86bf113c9bfa 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -144,6 +144,7 @@ struct mv88e6xxx_info {
unsigned int age_time_coeff;
unsigned int g1_irqs;
unsigned int g2_irqs;
+ int stats_type;
bool pvt;
/* Mark certain ports as invalid. This is required for example for the
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 5a27d047a38e..75ed1fa500a5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -55,6 +55,7 @@ static irqreturn_t mv88e6185_pcs_handle_irq(int irq, void *dev_id)
}
static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e6185_pcs *mpcs = pcs_to_mv88e6185_pcs(pcs);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 88f624b65470..143fe21d1834 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -158,6 +158,7 @@ static void marvell_c22_pcs_disable(struct phylink_pcs *pcs)
}
static void marvell_c22_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct marvell_c22_pcs *mpcs = pcs_to_marvell_c22_pcs(pcs);
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index d758a6c1b226..026b7bfb7ee5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -257,6 +257,7 @@ static int mv88e639x_sgmii_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e639x_sgmii_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = sgmii_pcs_to_mv88e639x_pcs(pcs);
@@ -395,6 +396,7 @@ static void mv88e639x_xg_pcs_disable(struct mv88e639x_pcs *mpcs)
}
static void mv88e639x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -889,6 +891,7 @@ static int mv88e6393x_xg_pcs_post_config(struct phylink_pcs *pcs,
}
static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mv88e639x_pcs *mpcs = xg_pcs_to_mv88e639x_pcs(pcs);
@@ -896,7 +899,7 @@ static void mv88e6393x_xg_pcs_get_state(struct phylink_pcs *pcs,
int err;
if (state->interface != PHY_INTERFACE_MODE_USXGMII)
- return mv88e639x_xg_pcs_get_state(pcs, state);
+ return mv88e639x_xg_pcs_get_state(pcs, neg_mode, state);
state->link = false;
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 59b4a7240b58..e8cb4da15dbe 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -342,7 +342,7 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
*val = mgmt_eth_data->data[0];
if (len > QCA_HDR_MGMT_DATA1_LEN)
@@ -394,7 +394,7 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
- msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+ QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
@@ -1019,7 +1019,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
of_get_phy_mode(port, &mode);
- if (of_property_read_bool(port, "phy-handle") &&
+ if (of_property_present(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
@@ -1491,7 +1491,7 @@ static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
return container_of(pcs, struct qca8k_pcs, pcs);
}
-static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
@@ -2016,7 +2016,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.set_ageing_time = qca8k_set_ageing_time,
- .get_mac_eee = qca8k_get_mac_eee,
+ .support_eee = dsa_supports_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
.port_disable = qca8k_port_disable,
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 560c74c4ac3d..13005f10edb7 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -557,13 +557,6 @@ exit:
return ret;
}
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_keee *e)
-{
- /* Nothing to do on the port's MAC */
- return 0;
-}
-
static int qca8k_port_configure_learning(struct dsa_switch *ds, int port,
bool learning)
{
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 3664a2e2f1f6..d046679265fa 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -16,7 +16,7 @@
#define QCA8K_ETHERNET_MDIO_PRIORITY 7
#define QCA8K_ETHERNET_PHY_PRIORITY 6
-#define QCA8K_ETHERNET_TIMEOUT 5
+#define QCA8K_ETHERNET_TIMEOUT msecs_to_jiffies(5)
#define QCA8K_NUM_PORTS 7
#define QCA8K_NUM_CPU_PORTS 2
@@ -520,7 +520,6 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index baba204ad62f..3d790f8c6f4d 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -26,12 +26,8 @@ void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
start, end);
} else if (rc == -ERANGE) {
- if ((start - end + 1) > 64)
- pr_err("Field %d-%d too large for 64 bits!\n",
- start, end);
- else
- pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
- *val, start, end);
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
}
dump_stack();
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a542e3c9b05..977b42bc1e8c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -159,7 +159,7 @@ config ETHOC
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
config OA_TC6
- tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
depends on SPI
select PHYLIB
help
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 63c8a2328142..c1295dfad0d0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -74,7 +74,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (threshold < time_since_last_napi && napi_scheduled) {
netdev_err(dev,
"napi handler hasn't been called for a long time but is scheduled\n");
- reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
schedule_reset:
/* Change the state of the device to trigger reset
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 72db9f9e7bee..c6bd803f5b0c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -462,7 +462,7 @@ static void pcnet32_netif_start(struct net_device *dev)
val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff;
lp->a->write_csr(ioaddr, CSR3, val);
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
}
/*
@@ -889,6 +889,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -920,6 +921,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
lp->rx_ring_size, lp->tx_ring_size);
@@ -985,6 +987,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
if (netif_running(dev))
pcnet32_netif_stop(dev);
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
@@ -1122,6 +1125,7 @@ clean_up:
lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
}
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return rc;
} /* end pcnet32_loopback_test */
@@ -2101,6 +2105,7 @@ static int pcnet32_open(struct net_device *dev)
return -EAGAIN;
}
+ netdev_lock(dev);
spin_lock_irqsave(&lp->lock, flags);
/* Check for a valid station address */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2266,7 +2271,7 @@ static int pcnet32_open(struct net_device *dev)
goto err_free_ring;
}
- napi_enable(&lp->napi);
+ napi_enable_locked(&lp->napi);
/* Re-initialize the PCNET32, and start it when done. */
lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
@@ -2300,6 +2305,7 @@ static int pcnet32_open(struct net_device *dev)
lp->a->read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
return 0; /* Always succeed */
@@ -2315,6 +2321,7 @@ err_free_ring:
err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
+ netdev_unlock(dev);
free_irq(dev->irq, dev);
return rc;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index e641dbbea1e2..b854b6b42d77 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -421,18 +421,12 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
if (dev->of_node) {
struct clk *parent = clk_get_parent(pdata->clk);
+ long rate = rgmii_clock(pdata->phy_speed);
- switch (pdata->phy_speed) {
- case SPEED_10:
- clk_set_rate(parent, 2500000);
- break;
- case SPEED_100:
- clk_set_rate(parent, 25000000);
- break;
- default:
- clk_set_rate(parent, 125000000);
- break;
- }
+ if (rate < 0)
+ rate = 125000000;
+
+ clk_set_rate(parent, rate);
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index f93cb3da44b0..8fc75bcedb70 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -348,8 +348,6 @@ struct bcmasp_intf {
/* Used if per intf wol irq */
int wol_irq;
unsigned int wol_irq_enabled:1;
-
- struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -601,5 +599,4 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 9da5ae29a105..a537c121d3e2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -348,58 +348,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
-{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_keee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index cfd50efbdbc0..45ec1a9214a2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -619,7 +619,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -677,8 +676,13 @@ static void bcmasp_adj_link(struct net_device *dev)
}
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -1055,6 +1059,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
@@ -1331,7 +1338,8 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, true);
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
@@ -1373,7 +1381,8 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
- if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
intf->parent->eee_fixup(intf, false);
reg = umac_rl(intf, UMC_MPD_CTRL);
@@ -1404,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b6f844cac80e..748c9b1ea701 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -81,7 +81,6 @@ MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -1343,13 +1342,13 @@ static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
@@ -1842,7 +1841,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
@@ -2176,7 +2175,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
if (!xdp_active)
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
else
@@ -4608,6 +4607,12 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ bp->dev->ethtool->hds_thresh = BNXT_DEFAULT_RX_COPYBREAK;
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4622,12 +4627,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4667,7 +4671,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = SKB_DATA_ALIGN(max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak) +
+ NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -6579,6 +6585,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
+ u16 hds_thresh = (u16)bp->dev->ethtool->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6588,16 +6595,14 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
@@ -8322,16 +8327,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -9160,10 +9169,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -9199,14 +9216,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -9513,6 +9536,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
if (BNXT_PF(bp) &&
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
@@ -11571,6 +11597,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -15701,6 +15747,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -16231,6 +16281,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_init_l2_fltr_tbl(bp);
__bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f11ed59203d9..2373f423a523 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -2241,8 +2244,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2263,6 +2264,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2270,6 +2274,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2342,7 +2351,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2482,6 +2491,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
u32 fw_dbg_cap;
@@ -2501,6 +2511,8 @@ struct bnxt {
((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
#define BNXT_ROCE_VF_RESC_CAP(bp) \
((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2660,6 +2672,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2762,6 +2779,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d87681d71106..540c140d52dc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -833,6 +833,9 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh = dev->ethtool->hds_thresh;
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -840,16 +843,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->ethtool->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -2050,7 +2072,8 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
int rc;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -4327,6 +4350,45 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u8 bank,
u16 start_addr, u16 data_length,
@@ -4375,6 +4437,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4426,6 +4491,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4480,6 +4548,12 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4777,7 +4851,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4887,35 +4962,44 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
@@ -5309,6 +5393,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -5350,6 +5436,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 0ed26e3a28f4..7c88b9f05c4c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -417,6 +417,8 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_VF;
if (BNXT_ROCE_VF_RESC_CAP(bp))
edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 5d6aac60f236..54ad9f8273d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -65,6 +65,8 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
#define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index dc51dce209d5..e6c64e4bd66c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -395,6 +395,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index daa416fb1724..48496209fb16 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -530,19 +530,9 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
- switch (speed) {
- case SPEED_10:
- rate = 2500000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0)
return;
- }
rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
@@ -578,6 +568,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
@@ -608,7 +599,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
return 0;
}
-static void macb_pcs_get_state(struct phylink_pcs *pcs,
+static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
state->link = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 604dcfd49aa4..2f0b3e389e62 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6562,6 +6562,9 @@ static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
+ if (x->xso.dir != XFRM_DEV_OFFLOAD_IN)
+ return;
+
if (!mutex_trylock(&uld_mutex)) {
dev_dbg(adap->pdev_dev,
"crypto uld critical resource is under use\n");
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9913952ccb42..49f6cab01ed5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -109,7 +109,7 @@ static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
{0, 0}, /* 0 - 4 Gbps */
{0, 3}, /* 4 - 10 Gbps */
- {3, 6}, /* 10 - 40 Gbps */
+ {3, 6}, /* 10+ Gbps */
};
static void enic_init_affinity_hint(struct enic *enic)
@@ -428,6 +428,36 @@ static void enic_mtu_check(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (speed > ENIC_LINK_SPEED_10G)
+ index = ENIC_LINK_40G_INDEX;
+ else if (speed > ENIC_LINK_SPEED_4G)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static void enic_link_check(struct enic *enic)
{
int link_status = vnic_dev_link_status(enic->vdev);
@@ -436,6 +466,7 @@ static void enic_link_check(struct enic *enic)
if (link_status && !carrier_ok) {
netdev_info(enic->netdev, "Link UP\n");
netif_carrier_on(enic->netdev);
+ enic_set_rx_coal_setting(enic);
} else if (!link_status && carrier_ok) {
netdev_info(enic->netdev, "Link DOWN\n");
netif_carrier_off(enic->netdev);
@@ -1901,36 +1932,6 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
-static void enic_set_rx_coal_setting(struct enic *enic)
-{
- unsigned int speed;
- int index = -1;
- struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-
- /* 1. Read the link speed from fw
- * 2. Pick the default range for the speed
- * 3. Update it in enic->rx_coalesce_setting
- */
- speed = vnic_dev_port_speed(enic->vdev);
- if (ENIC_LINK_SPEED_10G < speed)
- index = ENIC_LINK_40G_INDEX;
- else if (ENIC_LINK_SPEED_4G < speed)
- index = ENIC_LINK_10G_INDEX;
- else
- index = ENIC_LINK_4G_INDEX;
-
- rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
- rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
- rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
-
- /* Start with the value provided by UCSM */
- for (index = 0; index < enic->rq_count; index++)
- enic->cq[index].cur_rx_coal_timeval =
- enic->config.intr_timer_usec;
-
- rx_coal->use_adaptive_rx_coalesce = 1;
-}
-
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -3063,7 +3064,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_rfs_flw_tbl_init(enic);
- enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 95a5295d0361..0d030cb0b21c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1966,23 +1966,41 @@ failed:
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ netif_napi_set_irq(&queue->napi, queue->irq);
napi_enable(&queue->napi);
- tsnep_enable_irq(queue->adapter, queue->irq_mask);
+ tsnep_enable_irq(adapter, queue->irq_mask);
- if (queue->tx)
+ if (queue->tx) {
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, &queue->napi);
tsnep_tx_enable(queue->tx);
+ }
- if (queue->rx)
+ if (queue->rx) {
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, &queue->napi);
tsnep_rx_enable(queue->rx);
+ }
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
- if (queue->tx)
+ struct tsnep_adapter *adapter = queue->adapter;
+
+ if (queue->rx)
+ netif_queue_set_napi(adapter->netdev, queue->rx->queue_index,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+
+ if (queue->tx) {
tsnep_tx_disable(queue->tx, &queue->napi);
+ netif_queue_set_napi(adapter->netdev, queue->tx->queue_index,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
napi_disable(&queue->napi);
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ tsnep_disable_irq(adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 75401d2a5fb4..a2d7300925a8 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -81,8 +81,7 @@ config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
help
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
which is available on some Freescale SOCs.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index bf5baef5c3e0..4948b4906584 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2281,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
- new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+ new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0;
/* Release the initial buffer */
xdp_return_frame_rx_napi(xdpf);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index a293b08f36d4..147a93bf9fa9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -780,13 +780,14 @@ struct ethsw_dump_ctx {
static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
struct ethsw_dump_ctx *dump)
{
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 535969fa0fdb..6a6fc819dfde 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -146,6 +146,27 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool enetc_skb_is_ipv6(struct sk_buff *skb)
+{
+ return vlan_get_protocol(skb) == htons(ETH_P_IPV6);
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ return skb->csum_offset == offsetof(struct tcphdr, check);
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -163,6 +184,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_addr_t dma;
u8 flags = 0;
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ temp_bd.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START,
+ skb_network_offset(skb));
+ temp_bd.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ skb_network_header_len(skb) / 4);
+ temp_bd.l3_aux1 |= FIELD_PREP(ENETC_TX_BD_L3T,
+ enetc_skb_is_ipv6(skb));
+ if (enetc_skb_is_tcp(skb))
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_TCP);
+ else
+ temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
+ ENETC_TXBD_L4T_UDP);
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ } else if (skb_checksum_help(skb)) {
+ return 0;
+ }
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -173,7 +217,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -489,8 +532,233 @@ static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso
}
}
+static int enetc_lso_count_descs(const struct sk_buff *skb)
+{
+ /* 4 BDs: 1 BD for LSO header + 1 BD for extended BD + 1 BD
+ * for linear area data but not include LSO header, namely
+ * skb_headlen(skb) - lso_hdr_len (it may be 0, but that's
+ * okay, we only need to consider the worst case). And 1 BD
+ * for gap.
+ */
+ return skb_shinfo(skb)->nr_frags + 4;
+}
+
+static int enetc_lso_get_hdr_len(const struct sk_buff *skb)
+{
+ int hdr_len, tlen;
+
+ tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+ hdr_len = skb_transport_offset(skb) + tlen;
+
+ return hdr_len;
+}
+
+static void enetc_lso_start(struct sk_buff *skb, struct enetc_lso_t *lso)
+{
+ lso->lso_seg_size = skb_shinfo(skb)->gso_size;
+ lso->ipv6 = enetc_skb_is_ipv6(skb);
+ lso->tcp = skb_is_gso_tcp(skb);
+ lso->l3_hdr_len = skb_network_header_len(skb);
+ lso->l3_start = skb_network_offset(skb);
+ lso->hdr_len = enetc_lso_get_hdr_len(skb);
+ lso->total_len = skb->len - lso->hdr_len;
+}
+
+static void enetc_lso_map_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso)
+{
+ union enetc_tx_bd txbd_tmp, *txbd;
+ struct enetc_tx_swbd *tx_swbd;
+ u16 frm_len, frm_len_ext;
+ u8 flags, e_flags = 0;
+ dma_addr_t addr;
+ char *hdr;
+
+ /* Get the first BD of the LSO BDs chain */
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Prepare LSO header: MAC + IP + TCP/UDP */
+ hdr = tx_ring->tso_headers + *i * TSO_HEADER_SIZE;
+ memcpy(hdr, skb->data, lso->hdr_len);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ /* {frm_len_ext, frm_len} indicates the total length of
+ * large transmit data unit. frm_len contains the 16 least
+ * significant bits and frm_len_ext contains the 4 most
+ * significant bits.
+ */
+ frm_len = lso->total_len & 0xffff;
+ frm_len_ext = (lso->total_len >> 16) & 0xf;
+
+ /* Set the flags of the first BD */
+ flags = ENETC_TXBD_FLAGS_EX | ENETC_TXBD_FLAGS_CSUM_LSO |
+ ENETC_TXBD_FLAGS_LSO | ENETC_TXBD_FLAGS_L4CS;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.hdr_len = cpu_to_le16(lso->hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(frm_len);
+ txbd_tmp.flags = flags;
+
+ txbd_tmp.l3_aux0 = FIELD_PREP(ENETC_TX_BD_L3_START, lso->l3_start);
+ /* l3_hdr_size in 32-bits (4 bytes) */
+ txbd_tmp.l3_aux1 = FIELD_PREP(ENETC_TX_BD_L3_HDR_LEN,
+ lso->l3_hdr_len / 4);
+ if (lso->ipv6)
+ txbd_tmp.l3_aux1 |= ENETC_TX_BD_L3T;
+ else
+ txbd_tmp.l3_aux0 |= ENETC_TX_BD_IPCS;
+
+ txbd_tmp.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T, lso->tcp ?
+ ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP);
+
+ /* For the LSO header we do not set the dma address since
+ * we do not want it unmapped when we do cleanup. We still
+ * set len so that we count the bytes sent.
+ */
+ tx_swbd->len = lso->hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Get the next BD, and the next BD is extended BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ if (skb_vlan_tag_present(skb)) {
+ /* Setup the VLAN fields */
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = ENETC_TPID_8021Q;
+ e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
+ }
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ txbd_tmp.ext.lso_sg_size = cpu_to_le16(lso->lso_seg_size);
+ txbd_tmp.ext.frm_len_ext = cpu_to_le16(frm_len_ext);
+ *txbd = txbd_tmp;
+}
+
+static int enetc_lso_map_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ int *i, struct enetc_lso_t *lso, int *count)
+{
+ union enetc_tx_bd txbd_tmp, *txbd = NULL;
+ struct enetc_tx_swbd *tx_swbd;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u8 flags = 0;
+ int len, f;
+
+ len = skb_headlen(skb) - lso->hdr_len;
+ if (len > 0) {
+ dma = dma_map_single(tx_ring->dev, skb->data + lso->hdr_len,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 0;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ frag = &skb_shinfo(skb)->frags[0];
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
+ if (txbd)
+ *txbd = txbd_tmp;
+
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(tx_ring->dev, frag);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return -ENOMEM;
+
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+ *count += 1;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.addr = cpu_to_le64(dma);
+ txbd_tmp.buf_len = cpu_to_le16(len);
+
+ tx_swbd->dma = dma;
+ tx_swbd->len = len;
+ tx_swbd->is_dma_page = 1;
+ tx_swbd->dir = DMA_TO_DEVICE;
+ }
+
+ /* Last BD needs 'F' bit set */
+ flags |= ENETC_TXBD_FLAGS_F;
+ txbd_tmp.flags = flags;
+ *txbd = txbd_tmp;
+
+ tx_swbd->is_eof = 1;
+ tx_swbd->skb = skb;
+
+ return 0;
+}
+
+static int enetc_lso_hw_offload(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_lso_t lso = {0};
+ int err, i, count = 0;
+
+ /* Initialize the LSO handler */
+ enetc_lso_start(skb, &lso);
+ i = tx_ring->next_to_use;
+
+ enetc_lso_map_hdr(tx_ring, skb, &i, &lso);
+ /* First BD and an extend BD */
+ count += 2;
+
+ err = enetc_lso_map_data(tx_ring, skb, &i, &lso, &count);
+ if (err)
+ goto dma_err;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+dma_err:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (--count);
+
+ return 0;
+}
+
static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
+ struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
int hdr_len, total_len, data_len;
struct enetc_tx_swbd *tx_swbd;
union enetc_tx_bd *txbd;
@@ -556,7 +824,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
bd_data_num++;
tso_build_data(skb, &tso, size);
- if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ if (unlikely(bd_data_num >= priv->max_frags && data_len))
goto err_chained_bd;
}
@@ -594,7 +862,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -608,16 +876,28 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
if (skb_is_gso(skb)) {
- if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ /* LSO data unit lengths of up to 256KB are supported */
+ if (priv->active_offloads & ENETC_F_LSO &&
+ (skb->len - enetc_lso_get_hdr_len(skb)) <=
+ ENETC_LSO_MAX_DATA_LEN) {
+ if (enetc_bd_unused(tx_ring) < enetc_lso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_tso_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ count = enetc_lso_hw_offload(tx_ring, skb);
+ } else {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
+
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
} else {
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_shinfo(skb)->nr_frags > priv->max_frags))
if (unlikely(skb_linearize(skb)))
goto drop_packet_err;
@@ -627,11 +907,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -640,7 +915,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
if (unlikely(!count))
goto drop_packet_err;
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED(priv->max_frags))
netif_stop_subqueue(ndev, tx_ring->index);
return NETDEV_TX_OK;
@@ -908,7 +1183,8 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
!test_bit(ENETC_TX_DOWN, &priv->flags) &&
- (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
+ (enetc_bd_unused(tx_ring) >=
+ ENETC_TXBDS_MAX_NEEDED(priv->max_frags)))) {
netif_wake_subqueue(ndev, tx_ring->index);
}
@@ -1759,6 +2035,9 @@ void enetc_get_si_caps(struct enetc_si *si)
rss = enetc_rd(hw, ENETC_SIRSSCAPR);
si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
}
+
+ if (val & ENETC_SIPCAPR0_LSO)
+ si->hw_features |= ENETC_SI_F_LSO;
}
EXPORT_SYMBOL_GPL(enetc_get_si_caps);
@@ -2055,6 +2334,14 @@ static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
return 0;
}
+static void enetc_set_lso_flags_mask(struct enetc_hw *hw)
+{
+ enetc_wr(hw, ENETC4_SILSOSFMR0,
+ SILSOSFMR0_VAL_SET(ENETC4_TCP_NL_SEG_FLAGS_DMASK,
+ ENETC4_TCP_NL_SEG_FLAGS_DMASK));
+ enetc_wr(hw, ENETC4_SILSOSFMR1, 0);
+}
+
int enetc_configure_si(struct enetc_ndev_priv *priv)
{
struct enetc_si *si = priv->si;
@@ -2068,6 +2355,9 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
/* enable SI */
enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
+ if (si->hw_features & ENETC_SI_F_LSO)
+ enetc_set_lso_flags_mask(hw);
+
/* TODO: RSS support for i.MX95 will be supported later, and the
* is_enetc_rev1() condition will be removed
*/
@@ -3269,17 +3559,21 @@ EXPORT_SYMBOL_GPL(enetc_pci_remove);
static const struct enetc_drvdata enetc_pf_data = {
.sysclk_freq = ENETC_CLK_400M,
.pmac_offset = ENETC_PMAC_OFFSET,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc4_pf_data = {
.sysclk_freq = ENETC_CLK_333M,
+ .tx_csum = true,
+ .max_frags = ENETC4_MAX_SKB_FRAGS,
.pmac_offset = ENETC4_PMAC_OFFSET,
.eth_ops = &enetc4_pf_ethtool_ops,
};
static const struct enetc_drvdata enetc_vf_data = {
.sysclk_freq = ENETC_CLK_400M,
+ .max_frags = ENETC_MAX_SKB_FRAGS,
.eth_ops = &enetc_vf_ethtool_ops,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 72fa03dbc2dd..4ad4eb5c5a74 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -41,6 +41,18 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_lso_t {
+ bool ipv6;
+ bool tcp;
+ u8 l3_hdr_len;
+ u8 hdr_len; /* LSO header length */
+ u8 l3_start;
+ u16 lso_seg_size;
+ int total_len; /* total data length, not include LSO header */
+};
+
+#define ENETC_LSO_MAX_DATA_LEN SZ_256K
+
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
@@ -59,9 +71,16 @@ struct enetc_rx_swbd {
/* ENETC overhead: optional extension BD + 1 BD gap */
#define ENETC_TXBDS_NEEDED(val) ((val) + 2)
-/* max # of chained Tx BDs is 15, including head and extension BD */
+/* For LS1028A, max # of chained Tx BDs is 15, including head and
+ * extension BD.
+ */
#define ENETC_MAX_SKB_FRAGS 13
-#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
+/* For ENETC v4 and later versions, max # of chained Tx BDs is 63,
+ * including head and extension BD, but the range of MAX_SKB_FRAGS
+ * is 17 ~ 45, so set ENETC4_MAX_SKB_FRAGS to MAX_SKB_FRAGS.
+ */
+#define ENETC4_MAX_SKB_FRAGS MAX_SKB_FRAGS
+#define ENETC_TXBDS_MAX_NEEDED(x) ENETC_TXBDS_NEEDED((x) + 1)
struct enetc_ring_stats {
unsigned int packets;
@@ -231,9 +250,12 @@ enum enetc_errata {
#define ENETC_SI_F_PSFP BIT(0)
#define ENETC_SI_F_QBV BIT(1)
#define ENETC_SI_F_QBU BIT(2)
+#define ENETC_SI_F_LSO BIT(3)
struct enetc_drvdata {
u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
+ u8 tx_csum:1;
+ u8 max_frags;
u64 sysclk_freq;
const struct ethtool_ops *eth_ops;
};
@@ -341,6 +363,8 @@ enum enetc_active_offloads {
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
+ ENETC_F_TXCSUM = BIT(12),
+ ENETC_F_LSO = BIT(13),
};
enum enetc_flags_bit {
@@ -375,6 +399,7 @@ struct enetc_ndev_priv {
u16 msg_enable;
u8 preemptible_tcs;
+ u8 max_frags; /* The maximum number of BDs for fragments */
enum enetc_active_offloads active_offloads;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index 26b220677448..695cb07c74bc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -12,6 +12,29 @@
#define NXP_ENETC_VENDOR_ID 0x1131
#define NXP_ENETC_PF_DEV_ID 0xe101
+/**********************Station interface registers************************/
+/* Station interface LSO segmentation flag mask register 0/1 */
+#define ENETC4_SILSOSFMR0 0x1300
+#define SILSOSFMR0_TCP_MID_SEG GENMASK(27, 16)
+#define SILSOSFMR0_TCP_1ST_SEG GENMASK(11, 0)
+#define SILSOSFMR0_VAL_SET(first, mid) (FIELD_PREP(SILSOSFMR0_TCP_MID_SEG, mid) | \
+ FIELD_PREP(SILSOSFMR0_TCP_1ST_SEG, first))
+
+#define ENETC4_SILSOSFMR1 0x1304
+#define SILSOSFMR1_TCP_LAST_SEG GENMASK(11, 0)
+#define ENETC4_TCP_FLAGS_FIN BIT(0)
+#define ENETC4_TCP_FLAGS_SYN BIT(1)
+#define ENETC4_TCP_FLAGS_RST BIT(2)
+#define ENETC4_TCP_FLAGS_PSH BIT(3)
+#define ENETC4_TCP_FLAGS_ACK BIT(4)
+#define ENETC4_TCP_FLAGS_URG BIT(5)
+#define ENETC4_TCP_FLAGS_ECE BIT(6)
+#define ENETC4_TCP_FLAGS_CWR BIT(7)
+#define ENETC4_TCP_FLAGS_NS BIT(8)
+/* According to tso_build_hdr(), clear all special flags for not last packet. */
+#define ENETC4_TCP_NL_SEG_FLAGS_DMASK (ENETC4_TCP_FLAGS_FIN | \
+ ENETC4_TCP_FLAGS_RST | ENETC4_TCP_FLAGS_PSH)
+
/***************************ENETC port registers**************************/
#define ENETC4_ECAPR0 0x0
#define ECAPR0_RFS BIT(2)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 55ba949230ff..4098f01479bc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -25,6 +25,7 @@
#define ENETC_SIPCAPR0 0x20
#define ENETC_SIPCAPR0_RSS BIT(8)
#define ENETC_SIPCAPR0_RFS BIT(2)
+#define ENETC_SIPCAPR0_LSO BIT(1)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -554,11 +555,23 @@ static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
union enetc_tx_bd {
struct {
__le64 addr;
- __le16 buf_len;
+ union {
+ __le16 buf_len;
+ __le16 hdr_len; /* For LSO, ENETC 4.1 and later */
+ };
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_aux0;
+#define ENETC_TX_BD_L3_START GENMASK(6, 0)
+#define ENETC_TX_BD_IPCS BIT(7)
+ u8 l3_aux1;
+#define ENETC_TX_BD_L3_HDR_LEN GENMASK(6, 0)
+#define ENETC_TX_BD_L3T BIT(7)
+ u8 l4_aux;
+#define ENETC_TX_BD_L4T GENMASK(7, 5)
+#define ENETC_TXBD_L4T_UDP 1
+#define ENETC_TXBD_L4T_TCP 2
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -569,23 +582,27 @@ union enetc_tx_bd {
__le32 tstamp;
__le16 tpid;
__le16 vid;
- u8 reserved[6];
+ __le16 lso_sg_size; /* For ENETC 4.1 and later */
+ __le16 frm_len_ext; /* For ENETC 4.1 and later */
+ u8 reserved[2];
u8 e_flags;
u8 flags;
} ext; /* Tx BD extension */
struct {
__le32 tstamp;
- u8 reserved[10];
+ u8 reserved[8];
+ __le16 lso_err_count; /* For ENETC 4.1 and later */
u8 status;
u8 flags;
} wb; /* writeback descriptor */
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_LSO = BIT(1), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -654,6 +671,8 @@ union enetc_rx_bd {
#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */
#define ENETC_CBD_STATUS_MASK 0xf
+#define ENETC_TPID_8021Q 0
+
struct enetc_cmd_rfse {
u8 smac_h[6];
u8 smac_m[6];
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 0eecfc833164..3fd9b0727875 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -101,6 +101,7 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_WOL << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -109,16 +110,24 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
+ if (si->hw_features & ENETC_SI_F_LSO)
+ priv->active_offloads |= ENETC_F_LSO;
+
/* TODO: currently, i.MX95 ENETC driver does not support advanced features */
if (!is_enetc_rev1(si)) {
ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index a5f8ce576b6e..3768752b6008 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -136,6 +136,7 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1;
priv->sysclk_freq = si->drvdata->sysclk_freq;
+ priv->max_frags = si->drvdata->max_frags;
ndev->netdev_ops = ndev_ops;
enetc_set_ethtool_ops(ndev);
ndev->watchdog_timeo = 5 * HZ;
@@ -144,11 +145,13 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1cca0425d493..c81f2ea588f2 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -671,8 +671,6 @@ struct fec_enet_private {
unsigned int tx_time_itr;
unsigned int itr_clk_rate;
- /* tx lpi eee mode */
- struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4566848e1d7c..68725506a095 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2054,14 +2054,14 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
return us * (fep->clk_ref_rate / 1000) / 1000;
}
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+static int fec_enet_eee_mode_set(struct net_device *ndev, u32 lpi_timer,
+ bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
unsigned int sleep_cycle, wake_cycle;
if (enable) {
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, lpi_timer);
wake_cycle = sleep_cycle;
} else {
sleep_cycle = 0;
@@ -2114,7 +2114,9 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_enable(&fep->napi);
}
if (fep->quirks & FEC_QUIRK_HAS_EEE)
- fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
+ fec_enet_eee_mode_set(ndev,
+ phy_dev->eee_cfg.tx_lpi_timer,
+ phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -3190,7 +3192,6 @@ static int
fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3198,8 +3199,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->tx_lpi_timer = p->tx_lpi_timer;
-
return phy_ethtool_get_eee(ndev->phydev, edata);
}
@@ -3207,7 +3206,6 @@ static int
fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3215,8 +3213,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- p->tx_lpi_timer = edata->tx_lpi_timer;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index fb416d60dcd7..11887458f050 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2690,13 +2690,12 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
{
struct fman *fman;
struct device_node *fm_node, *muram_node;
+ void __iomem *base_addr;
struct resource *res;
u32 val, range[2];
int err, irq;
struct clk *clk;
u32 clk_rate;
- phys_addr_t phys_base_addr;
- resource_size_t mem_size;
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
@@ -2724,18 +2723,6 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
goto fman_node_put;
fman->dts_params.err_irq = err;
- /* Get the FM address */
- res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
- __func__);
- goto fman_node_put;
- }
-
- phys_base_addr = res->start;
- mem_size = resource_size(res);
-
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
err = PTR_ERR(clk);
@@ -2803,24 +2790,16 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
}
}
- fman->dts_params.res =
- devm_request_mem_region(&of_dev->dev, phys_base_addr,
- mem_size, "fman");
- if (!fman->dts_params.res) {
- err = -EBUSY;
- dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
- __func__);
- goto fman_free;
- }
-
- fman->dts_params.base_addr =
- devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
- if (!fman->dts_params.base_addr) {
- err = -ENOMEM;
+ base_addr = devm_platform_get_and_ioremap_resource(of_dev, 0, &res);
+ if (IS_ERR(base_addr)) {
+ err = PTR_ERR(base_addr);
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
+ fman->dts_params.base_addr = base_addr;
+ fman->dts_params.res = res;
+
fman->dev = &of_dev->dev;
err = of_platform_populate(fm_node, NULL, NULL, &of_dev->dev);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 85617bb94959..b3e2a596ad2c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -755,12 +755,12 @@ static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
return container_of(pcs, struct fman_mac, pcs);
}
-static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, neg_mode, state);
}
static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6663c1768089..88510f822759 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -34,6 +34,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
@@ -132,7 +133,6 @@ static const struct ucc_geth_info ugeth_primary_info = {
.transmitFlowControl = 1,
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
- .prel = 7,
.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
@@ -1205,34 +1205,6 @@ static int init_mac_station_addr_regs(u8 address_byte_0,
return 0;
}
-static int init_check_frame_length_mode(int length_check,
- u32 __iomem *maccfg2_register)
-{
- u32 value = 0;
-
- value = in_be32(maccfg2_register);
-
- if (length_check)
- value |= MACCFG2_LC;
- else
- value &= ~MACCFG2_LC;
-
- out_be32(maccfg2_register, value);
- return 0;
-}
-
-static int init_preamble_length(u8 preamble_length,
- u32 __iomem *maccfg2_register)
-{
- if ((preamble_length < 3) || (preamble_length > 7))
- return -EINVAL;
-
- clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
- preamble_length << MACCFG2_PREL_SHIFT);
-
- return 0;
-}
-
static int init_rx_parameters(int reject_broadcast,
int receive_short_frames,
int promiscuous, u32 __iomem *upsmr_register)
@@ -1287,94 +1259,11 @@ static int init_min_frame_len(u16 min_frame_length,
return 0;
}
-static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+static bool phy_interface_mode_is_reduced(phy_interface_t interface)
{
- struct ucc_geth_info *ug_info;
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- int ret_val;
- u32 upsmr, maccfg2;
- u16 value;
-
- ugeth_vdbg("%s: IN", __func__);
-
- ug_info = ugeth->ug_info;
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- /* Set MACCFG2 */
- maccfg2 = in_be32(&ug_regs->maccfg2);
- maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
- if ((ugeth->max_speed == SPEED_10) ||
- (ugeth->max_speed == SPEED_100))
- maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
- else if (ugeth->max_speed == SPEED_1000)
- maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
- maccfg2 |= ug_info->padAndCrc;
- out_be32(&ug_regs->maccfg2, maccfg2);
-
- /* Set UPSMR */
- upsmr = in_be32(&uf_regs->upsmr);
- upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
- UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
- upsmr |= UCC_GETH_UPSMR_RPM;
- switch (ugeth->max_speed) {
- case SPEED_10:
- upsmr |= UCC_GETH_UPSMR_R10M;
- fallthrough;
- case SPEED_100:
- if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
- upsmr |= UCC_GETH_UPSMR_RMM;
- }
- }
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- upsmr |= UCC_GETH_UPSMR_TBIM;
- }
- if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
- upsmr |= UCC_GETH_UPSMR_SGMM;
-
- out_be32(&uf_regs->upsmr, upsmr);
-
- /* Disable autonegotiation in tbi mode, because by default it
- comes up in autonegotiation mode. */
- /* Note that this depends on proper setting in utbipar register. */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
- struct phy_device *tbiphy;
-
- if (!ug_info->tbi_node)
- pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
-
- tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
- pr_warn("Could not get TBI device\n");
-
- value = phy_read(tbiphy, ENET_TBI_MII_CR);
- value &= ~0x1000; /* Turn off autonegotiation */
- phy_write(tbiphy, ENET_TBI_MII_CR, value);
-
- put_device(&tbiphy->mdio.dev);
- }
-
- init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
-
- ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
- if (ret_val != 0) {
- if (netif_msg_probe(ugeth))
- pr_err("Preamble length must be between 3 and 7 inclusive\n");
- return ret_val;
- }
-
- return 0;
+ return phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_RTBI;
}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
@@ -1545,108 +1434,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
/* allow to xmit again */
netif_tx_wake_all_queues(ugeth->ndev);
- __netdev_watchdog_up(ugeth->ndev);
-}
-
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the ugeth structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-
-static void adjust_link(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct ucc_geth __iomem *ug_regs;
- struct ucc_fast __iomem *uf_regs;
- struct phy_device *phydev = ugeth->phydev;
- int new_state = 0;
-
- ug_regs = ugeth->ug_regs;
- uf_regs = ugeth->uccf->uf_regs;
-
- if (phydev->link) {
- u32 tempval = in_be32(&ug_regs->maccfg2);
- u32 upsmr = in_be32(&uf_regs->upsmr);
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != ugeth->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FDX);
- else
- tempval |= MACCFG2_FDX;
- ugeth->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != ugeth->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case SPEED_1000:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_BYTE);
- break;
- case SPEED_100:
- case SPEED_10:
- tempval = ((tempval &
- ~(MACCFG2_INTERFACE_MODE_MASK)) |
- MACCFG2_INTERFACE_MODE_NIBBLE);
- /* if reduced mode, re-set UPSMR.R10M */
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
- (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- if (phydev->speed == SPEED_10)
- upsmr |= UCC_GETH_UPSMR_R10M;
- else
- upsmr &= ~UCC_GETH_UPSMR_R10M;
- }
- break;
- default:
- if (netif_msg_link(ugeth))
- pr_warn(
- "%s: Ack! Speed (%d) is not 10/100/1000!",
- dev->name, phydev->speed);
- break;
- }
- ugeth->oldspeed = phydev->speed;
- }
-
- if (!ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 1;
- }
-
- if (new_state) {
- /*
- * To change the MAC configuration we need to disable
- * the controller. To do so, we have to either grab
- * ugeth->lock, which is a bad idea since 'graceful
- * stop' commands might take quite a while, or we can
- * quiesce driver's activity.
- */
- ugeth_quiesce(ugeth);
- ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
- out_be32(&ug_regs->maccfg2, tempval);
- out_be32(&uf_regs->upsmr, upsmr);
-
- ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- ugeth_activate(ugeth);
- }
- } else if (ugeth->oldlink) {
- new_state = 1;
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(ugeth))
- phy_print_status(phydev);
+ netdev_watchdog_up(ugeth->ndev);
}
/* Initialize TBI PHY interface for communicating with the
@@ -1664,8 +1452,7 @@ static void uec_configure_serdes(struct net_device *dev)
struct phy_device *tbiphy;
if (!ug_info->tbi_node) {
- dev_warn(&dev->dev, "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
+ dev_warn(&dev->dev, "SGMII mode requires that the device tree specify a tbi-handle\n");
return;
}
@@ -1696,34 +1483,145 @@ static void uec_configure_serdes(struct net_device *dev)
put_device(&tbiphy->mdio.dev);
}
-/* Configure the PHY for dev.
- * returns 0 if success. -1 if failure
- */
-static int init_phy(struct net_device *dev)
+static void ugeth_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct ucc_geth_private *priv = netdev_priv(dev);
- struct ucc_geth_info *ug_info = priv->ug_info;
- struct phy_device *phydev;
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct ucc_fast __iomem *uf_regs = ugeth->uccf->uf_regs;
+ u32 old_maccfg2, maccfg2 = in_be32(&ug_regs->maccfg2);
+ u32 old_upsmr, upsmr = in_be32(&uf_regs->upsmr);
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
+ old_maccfg2 = maccfg2;
+ old_upsmr = upsmr;
- phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
- priv->phy_interface);
- if (!phydev) {
- dev_err(&dev->dev, "Could not attach to PHY\n");
- return -ENODEV;
+ /* No length check */
+ maccfg2 &= ~MACCFG2_LC;
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+
+ if (speed == SPEED_10 || speed == SPEED_100)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+
+ maccfg2 |= ug_info->padAndCrc;
+
+ if (phy_interface_mode_is_reduced(interface)) {
+
+ if (interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+
+ switch (speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ fallthrough;
+ case SPEED_100:
+ if (interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
- uec_configure_serdes(dev);
+ if (interface == PHY_INTERFACE_MODE_TBI ||
+ interface == PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_TBIM;
- phy_set_max_speed(phydev, priv->max_speed);
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ upsmr |= UCC_GETH_UPSMR_SGMM;
- priv->phydev = phydev;
+ if (duplex == DUPLEX_HALF)
+ maccfg2 &= ~(MACCFG2_FDX);
+ else
+ maccfg2 |= MACCFG2_FDX;
- return 0;
+ if (maccfg2 != old_maccfg2 || upsmr != old_upsmr) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, maccfg2);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(ndev);
+
+ if (!phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 0;
+ ug_info->receiveFlowControl = rx_pause;
+ ug_info->transmitFlowControl = tx_pause;
+
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+}
+
+static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ u16 value;
+
+ if (state->interface == PHY_INTERFACE_MODE_TBI ||
+ state->interface == PHY_INTERFACE_MODE_RTBI) {
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->mdio.dev);
+ }
+
+ if (phylink_autoneg_inband(mode)) {
+ ug_info->aufc = 1;
+
+ init_flow_control_params(ug_info->aufc, 1, 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
}
static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
@@ -1995,7 +1893,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
static void ucc_geth_stop(struct ucc_geth_private *ugeth)
{
struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
- struct phy_device *phydev = ugeth->phydev;
ugeth_vdbg("%s: IN", __func__);
@@ -2004,7 +1901,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
* Must be done before disabling the controller
* or deadlock may happen.
*/
- phy_stop(phydev);
+ phylink_stop(ugeth->phylink);
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -3246,12 +3143,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
goto err;
}
- err = adjust_enet_interface(ugeth);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
- goto err;
- }
-
/* Set MACSTNADDR1, MACSTNADDR2 */
/* For more details see the hardware spec. */
init_mac_station_addr_regs(dev->dev_addr[0],
@@ -3263,12 +3154,6 @@ static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
&ugeth->ug_regs->macstnaddr1,
&ugeth->ug_regs->macstnaddr2);
- err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
- if (err) {
- netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
- goto err;
- }
-
return 0;
err:
ucc_geth_stop(ugeth);
@@ -3291,10 +3176,10 @@ static int ucc_geth_open(struct net_device *dev)
return -EINVAL;
}
- err = init_phy(dev);
+ err = phylink_of_phy_connect(ugeth->phylink, ugeth->dev->of_node, 0);
if (err) {
- netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
- return err;
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
}
err = ucc_geth_init_mac(ugeth);
@@ -3310,13 +3195,13 @@ static int ucc_geth_open(struct net_device *dev)
goto err;
}
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
napi_enable(&ugeth->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
device_set_wakeup_capable(&dev->dev,
- qe_alive_during_sleep() || ugeth->phydev->irq);
+ qe_alive_during_sleep() || dev->phydev->irq);
device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
return err;
@@ -3337,8 +3222,7 @@ static int ucc_geth_close(struct net_device *dev)
cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
+ phylink_disconnect_phy(ugeth->phylink);
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3372,7 +3256,7 @@ static void ucc_geth_timeout_work(struct work_struct *work)
ucc_geth_stop(ugeth);
ucc_geth_init_mac(ugeth);
/* Must start PHY here */
- phy_start(ugeth->phydev);
+ phylink_start(ugeth->phylink);
netif_tx_start_all_queues(dev);
}
@@ -3397,6 +3281,7 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
{
struct net_device *ndev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ bool mac_wol = false;
if (!netif_running(ndev))
return 0;
@@ -3410,14 +3295,17 @@ static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
*/
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- if (ugeth->wol_en & WAKE_MAGIC) {
+ if (ugeth->wol_en & WAKE_MAGIC && !ugeth->phy_wol_en) {
setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
- } else if (!(ugeth->wol_en & WAKE_PHY)) {
- phy_stop(ugeth->phydev);
+ mac_wol = true;
}
+ rtnl_lock();
+ phylink_suspend(ugeth->phylink, mac_wol);
+ rtnl_unlock();
+
return 0;
}
@@ -3451,12 +3339,9 @@ static int ucc_geth_resume(struct platform_device *ofdev)
}
}
- ugeth->oldlink = 0;
- ugeth->oldspeed = 0;
- ugeth->oldduplex = -1;
-
- phy_stop(ugeth->phydev);
- phy_start(ugeth->phydev);
+ rtnl_lock();
+ phylink_resume(ugeth->phylink);
+ rtnl_unlock();
napi_enable(&ugeth->napi);
netif_device_attach(ndev);
@@ -3469,32 +3354,6 @@ static int ucc_geth_resume(struct platform_device *ofdev)
#define ucc_geth_resume NULL
#endif
-static phy_interface_t to_phy_interface(const char *phy_connection_type)
-{
- if (strcasecmp(phy_connection_type, "mii") == 0)
- return PHY_INTERFACE_MODE_MII;
- if (strcasecmp(phy_connection_type, "gmii") == 0)
- return PHY_INTERFACE_MODE_GMII;
- if (strcasecmp(phy_connection_type, "tbi") == 0)
- return PHY_INTERFACE_MODE_TBI;
- if (strcasecmp(phy_connection_type, "rmii") == 0)
- return PHY_INTERFACE_MODE_RMII;
- if (strcasecmp(phy_connection_type, "rgmii") == 0)
- return PHY_INTERFACE_MODE_RGMII;
- if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
- return PHY_INTERFACE_MODE_RGMII_ID;
- if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
- return PHY_INTERFACE_MODE_RGMII_TXID;
- if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
- return PHY_INTERFACE_MODE_RGMII_RXID;
- if (strcasecmp(phy_connection_type, "rtbi") == 0)
- return PHY_INTERFACE_MODE_RTBI;
- if (strcasecmp(phy_connection_type, "sgmii") == 0)
- return PHY_INTERFACE_MODE_SGMII;
-
- return PHY_INTERFACE_MODE_MII;
-}
-
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
@@ -3502,10 +3361,7 @@ static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- if (!ugeth->phydev)
- return -ENODEV;
-
- return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+ return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
}
static const struct net_device_ops ucc_geth_netdev_ops = {
@@ -3513,7 +3369,6 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
@@ -3553,6 +3408,12 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
+struct phylink_mac_ops ugeth_mac_ops = {
+ .mac_link_up = ugeth_mac_link_up,
+ .mac_link_down = ugeth_mac_link_down,
+ .mac_config = ugeth_mac_config,
+};
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3560,23 +3421,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct net_device *dev = NULL;
struct ucc_geth_private *ugeth = NULL;
struct ucc_geth_info *ug_info;
+ struct device_node *phy_node;
+ struct phylink *phylink;
struct resource res;
- int err, ucc_num, max_speed = 0;
+ int err, ucc_num;
const unsigned int *prop;
phy_interface_t phy_interface;
- static const int enet_to_speed[] = {
- SPEED_10, SPEED_10, SPEED_10,
- SPEED_100, SPEED_100, SPEED_100,
- SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
- };
- static const phy_interface_t enet_to_phy_interface[] = {
- PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
- PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
- PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
- PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
- PHY_INTERFACE_MODE_SGMII,
- };
ugeth_vdbg("%s: IN", __func__);
@@ -3612,57 +3462,35 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
- ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
- /*
- * In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- err = of_phy_register_fixed_link(np);
- if (err)
- return err;
- ug_info->phy_node = of_node_get(np);
- }
-
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
- /* get the phy interface type, or default to MII */
- prop = of_get_property(np, "phy-connection-type", NULL);
- if (!prop) {
- /* handle interface property present in old trees */
- prop = of_get_property(ug_info->phy_node, "interface", NULL);
- if (prop != NULL) {
- phy_interface = enet_to_phy_interface[*prop];
- max_speed = enet_to_speed[*prop];
- } else
- phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
- phy_interface = to_phy_interface((const char *)prop);
- }
-
- /* get speed, or derive from PHY interface */
- if (max_speed == 0)
- switch (phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_SGMII:
- max_speed = SPEED_1000;
- break;
- default:
- max_speed = SPEED_100;
- break;
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ prop = of_get_property(phy_node, "interface", NULL);
+ if (prop) {
+ dev_err(&ofdev->dev,
+ "Device-tree property 'interface' is no longer supported. Please use 'phy-connection-type' instead.");
+ of_node_put(phy_node);
+ err = -EINVAL;
+ goto err_put_tbi;
}
+ of_node_put(phy_node);
+ }
+
+ err = of_get_phy_mode(np, &phy_interface);
+ if (err) {
+ dev_err(&ofdev->dev, "Invalid phy-connection-type");
+ goto err_put_tbi;
+ }
- if (max_speed == SPEED_1000) {
+ if (phy_interface == PHY_INTERFACE_MODE_GMII ||
+ phy_interface_mode_is_rgmii(phy_interface) ||
+ phy_interface == PHY_INTERFACE_MODE_TBI ||
+ phy_interface == PHY_INTERFACE_MODE_RTBI ||
+ phy_interface == PHY_INTERFACE_MODE_SGMII) {
unsigned int snums = qe_get_num_of_snums();
- /* configure muram FIFOs for gigabit operation */
ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
@@ -3691,7 +3519,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev = devm_alloc_etherdev(&ofdev->dev, sizeof(*ugeth));
if (!dev) {
err = -ENOMEM;
- goto err_deregister_fixed_link;
+ goto err_put_tbi;
}
ugeth = netdev_priv(dev);
@@ -3718,23 +3546,50 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
- ugeth->phy_interface = phy_interface;
- ugeth->max_speed = max_speed;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ ugeth->phylink_config.dev = &dev->dev;
+ ugeth->phylink_config.type = PHYLINK_NETDEV;
+
+ ugeth->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ugeth->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(ugeth->phylink_config.supported_interfaces);
+
+ if (ug_info->tbi_node) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TBI,
+ ugeth->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RTBI,
+ ugeth->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&ugeth->phylink_config, dev_fwnode(&dev->dev),
+ phy_interface, &ugeth_mac_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto err_put_tbi;
+ }
+
+ ugeth->phylink = phylink;
err = devm_register_netdev(&ofdev->dev, dev);
if (err) {
if (netif_msg_probe(ugeth))
pr_err("%s: Cannot register net device, aborting\n",
dev->name);
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
}
err = of_get_ethdev_address(np, dev);
if (err == -EPROBE_DEFER)
- goto err_deregister_fixed_link;
+ goto err_destroy_phylink;
ugeth->ug_info = ug_info;
ugeth->dev = device;
@@ -3743,11 +3598,11 @@ static int ucc_geth_probe(struct platform_device* ofdev)
return 0;
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+err_destroy_phylink:
+ phylink_destroy(phylink);
+err_put_tbi:
of_node_put(ug_info->tbi_node);
- of_node_put(ug_info->phy_node);
+
return err;
}
@@ -3755,13 +3610,10 @@ static void ucc_geth_remove(struct platform_device* ofdev)
{
struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
ucc_geth_memclean(ugeth);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
+ phylink_destroy(ugeth->phylink);
of_node_put(ugeth->ug_info->tbi_node);
- of_node_put(ugeth->ug_info->phy_node);
}
static const struct of_device_id ucc_geth_match[] = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 4294ed096ebb..38789faae706 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/phylink.h>
#include <linux/if_ether.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -921,7 +922,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
#define UCC_GETH_MACCFG1_INIT 0
-#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1 | \
+ (7 << MACCFG2_PREL_SHIFT))
/* Ethernet Address Type. */
enum enet_addr_type {
@@ -1073,6 +1075,9 @@ struct ucc_geth_tad_params {
u16 vid;
};
+struct phylink;
+struct phylink_config;
+
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
@@ -1088,7 +1093,6 @@ struct ucc_geth_info {
u8 miminumInterFrameGapEnforcement;
u8 backToBackInterFrameGap;
int ipAddressAlignment;
- int lengthCheckRx;
u32 mblinterval;
u16 nortsrbytetime;
u8 fracsiz;
@@ -1114,7 +1118,6 @@ struct ucc_geth_info {
int transmitFlowControl;
u8 maxGroupAddrInHash;
u8 maxIndAddrInHash;
- u8 prel;
u16 maxFrameLength;
u16 minFrameLength;
u16 maxD1Length;
@@ -1125,7 +1128,6 @@ struct ucc_geth_info {
u32 eventRegMask;
u16 pausePeriod;
u16 extensionField;
- struct device_node *phy_node;
struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
@@ -1210,14 +1212,12 @@ struct ucc_geth_private {
u16 skb_dirtytx[NUM_TX_QUEUES];
struct ugeth_mii_info *mii_info;
- struct phy_device *phydev;
- phy_interface_t phy_interface;
- int max_speed;
uint32_t msg_enable;
- int oldspeed;
- int oldduplex;
- int oldlink;
- int wol_en;
+ u32 wol_en;
+ u32 phy_wol_en;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct device_node *node;
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 699f346faf5c..1fb49e5a414a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -104,14 +104,8 @@ static int
uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(ugeth->phylink, cmd);
}
static int
@@ -119,12 +113,8 @@ uec_set_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_ksettings_set(phydev, cmd);
+ return phylink_ethtool_ksettings_set(ugeth->phylink, cmd);
}
static void
@@ -133,12 +123,7 @@ uec_get_pauseparam(struct net_device *netdev,
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- pause->autoneg = ugeth->phydev->autoneg;
-
- if (ugeth->ug_info->receiveFlowControl)
- pause->rx_pause = 1;
- if (ugeth->ug_info->transmitFlowControl)
- pause->tx_pause = 1;
+ return phylink_ethtool_get_pauseparam(ugeth->phylink, pause);
}
static int
@@ -146,30 +131,11 @@ uec_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- int ret = 0;
ugeth->ug_info->receiveFlowControl = pause->rx_pause;
ugeth->ug_info->transmitFlowControl = pause->tx_pause;
- if (ugeth->phydev->autoneg) {
- if (netif_running(netdev)) {
- /* FIXME: automatically restart */
- netdev_info(netdev, "Please re-open the interface\n");
- }
- } else {
- struct ucc_geth_info *ug_info = ugeth->ug_info;
-
- ret = init_flow_control_params(ug_info->aufc,
- ug_info->receiveFlowControl,
- ug_info->transmitFlowControl,
- ug_info->pausePeriod,
- ug_info->extensionField,
- &ugeth->uccf->uf_regs->upsmr,
- &ugeth->ug_regs->uempr,
- &ugeth->ug_regs->maccfg1);
- }
-
- return ret;
+ return phylink_ethtool_set_pauseparam(ugeth->phylink, pause);
}
static uint32_t
@@ -343,28 +309,42 @@ uec_get_drvinfo(struct net_device *netdev,
static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
- if (phydev && phydev->irq)
- wol->supported |= WAKE_PHY;
+ phylink_ethtool_get_wol(ugeth->phylink, wol);
+
if (qe_alive_during_sleep())
wol->supported |= WAKE_MAGIC;
- wol->wolopts = ugeth->wol_en;
+ wol->wolopts |= ugeth->wol_en;
}
static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
- struct phy_device *phydev = ugeth->phydev;
+ int ret = 0;
- if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
- return -EINVAL;
- else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ ret = phylink_ethtool_set_wol(ugeth->phylink, wol);
+ if (ret == -EOPNOTSUPP) {
+ ugeth->phy_wol_en = 0;
+ } else if (ret) {
+ return ret;
+ } else {
+ ugeth->phy_wol_en = wol->wolopts;
+ goto out;
+ }
+
+ /* If the PHY isn't handling the WoL and the MAC is asked to more than
+ * WAKE_MAGIC, error-out
+ */
+ if (!ugeth->phy_wol_en &&
+ wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+
+ if (wol->wolopts & WAKE_MAGIC &&
+ !qe_alive_during_sleep())
return -EINVAL;
+out:
ugeth->wol_en = wol->wolopts;
device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 060e0e674938..aa7d723011d0 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -1128,20 +1128,6 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
-{
- union gve_adminq_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
- cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
- .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
- .parameter_value = cpu_to_be64(mtu),
- };
-
- return gve_adminq_execute_cmd(priv, &cmd);
-}
-
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval)
{
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 863683de9694..228217458275 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -612,7 +612,6 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
-int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f879426cb552..394debc62268 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -1146,8 +1146,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv,
/* jiffies can wraparound but time comparisons can handle overflows. */
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT);
add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet);
*bytes += pending_packet->skb->len;
@@ -1191,8 +1190,7 @@ static void remove_miss_completions(struct gve_priv *priv,
pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL;
pending_packet->timeout_jiffies =
jiffies +
- msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT *
- MSEC_PER_SEC);
+ secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT);
/* Maintain pending packet in another list so the packet can be
* unallocated at a later time.
*/
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index ae58ac38c206..7ea15f9ef849 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -5,4 +5,5 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
-hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o
+hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
+ hbg_debugfs.o hbg_err.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 96daf058d387..b4300d8ea4ad 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -4,6 +4,7 @@
#ifndef __HBG_COMMON_H
#define __HBG_COMMON_H
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_reg.h"
@@ -33,6 +34,14 @@ enum hbg_tx_state {
enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
+ HBG_NIC_STATE_RESETTING,
+ HBG_NIC_STATE_RESET_FAIL,
+};
+
+enum hbg_reset_type {
+ HBG_RESET_TYPE_NONE = 0,
+ HBG_RESET_TYPE_FLR,
+ HBG_RESET_TYPE_FUNCTION,
};
struct hbg_buffer {
@@ -84,6 +93,7 @@ struct hbg_dev_specs {
u32 vlan_layers;
u32 max_mtu;
u32 min_mtu;
+ u32 uc_mac_num;
u32 max_frame_len;
u32 rx_buf_size;
@@ -114,6 +124,22 @@ struct hbg_mac {
u32 duplex;
u32 autoneg;
u32 link_status;
+ u32 pause_autoneg;
+};
+
+struct hbg_mac_table_entry {
+ u8 addr[ETH_ALEN];
+};
+
+struct hbg_mac_filter {
+ struct hbg_mac_table_entry *mac_table;
+ u32 table_max_len;
+ bool enabled;
+};
+
+/* saved for restore after rest */
+struct hbg_user_def {
+ struct ethtool_pauseparam pause_param;
};
struct hbg_priv {
@@ -126,6 +152,9 @@ struct hbg_priv {
struct hbg_vector vectors;
struct hbg_ring tx_ring;
struct hbg_ring rx_ring;
+ struct hbg_mac_filter filter;
+ enum hbg_reset_type reset_type;
+ struct hbg_user_def user_def;
};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
new file mode 100644
index 000000000000..8473c43d171a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/seq_file.h>
+#include <linux/string_choices.h>
+#include "hbg_common.h"
+#include "hbg_debugfs.h"
+#include "hbg_hw.h"
+#include "hbg_irq.h"
+#include "hbg_txrx.h"
+
+static struct dentry *hbg_dbgfs_root;
+
+struct hbg_dbg_info {
+ const char *name;
+ int (*read)(struct seq_file *seq, void *data);
+};
+
+#define state_str_true_false(p, s) str_true_false(test_bit(s, &(p)->state))
+
+static void hbg_dbg_ring(struct hbg_priv *priv, struct hbg_ring *ring,
+ struct seq_file *s)
+{
+ u32 irq_mask = ring->dir == HBG_DIR_TX ? HBG_INT_MSK_TX_B :
+ HBG_INT_MSK_RX_B;
+
+ seq_printf(s, "ring used num: %u\n",
+ hbg_get_queue_used_num(ring));
+ seq_printf(s, "ring max num: %u\n", ring->len);
+ seq_printf(s, "ring head: %u, tail: %u\n", ring->head, ring->tail);
+ seq_printf(s, "fifo used num: %u\n",
+ hbg_hw_get_fifo_used_num(priv, ring->dir));
+ seq_printf(s, "fifo max num: %u\n",
+ hbg_get_spec_fifo_max_num(priv, ring->dir));
+ seq_printf(s, "irq enabled: %s\n",
+ str_true_false(hbg_hw_irq_is_enabled(priv, irq_mask)));
+}
+
+static int hbg_dbg_tx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->tx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_rx_ring(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_dbg_ring(priv, &priv->rx_ring, s);
+ return 0;
+}
+
+static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_irq_info *info;
+ u32 i;
+
+ for (i = 0; i < priv->vectors.info_array_len; i++) {
+ info = &priv->vectors.info_array[i];
+ seq_printf(s,
+ "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n",
+ info->name,
+ str_true_false(hbg_hw_irq_is_enabled(priv,
+ info->mask)),
+ str_true_false(info->need_print),
+ info->count);
+ }
+
+ return 0;
+}
+
+static int hbg_dbg_mac_table(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_mac_filter *filter;
+ u32 i;
+
+ filter = &priv->filter;
+ seq_printf(s, "mac addr max count: %u\n", filter->table_max_len);
+ seq_printf(s, "filter enabled: %s\n", str_true_false(filter->enabled));
+
+ for (i = 0; i < filter->table_max_len; i++) {
+ if (is_zero_ether_addr(filter->mac_table[i].addr))
+ continue;
+
+ seq_printf(s, "[%u] %pM\n", i, filter->mac_table[i].addr);
+ }
+
+ return 0;
+}
+
+static const char * const reset_type_str[] = {"None", "FLR", "Function"};
+
+static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
+{
+ struct net_device *netdev = dev_get_drvdata(s->private);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ seq_printf(s, "event handling state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_EVENT_HANDLING));
+ seq_printf(s, "resetting state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESETTING));
+ seq_printf(s, "reset fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
+ seq_printf(s, "last reset type: %s\n",
+ reset_type_str[priv->reset_type]);
+
+ return 0;
+}
+
+static const struct hbg_dbg_info hbg_dbg_infos[] = {
+ { "tx_ring", hbg_dbg_tx_ring },
+ { "rx_ring", hbg_dbg_rx_ring },
+ { "irq_info", hbg_dbg_irq_info },
+ { "mac_table", hbg_dbg_mac_table },
+ { "nic_state", hbg_dbg_nic_state },
+};
+
+static void hbg_debugfs_uninit(void *data)
+{
+ debugfs_remove_recursive((struct dentry *)data);
+}
+
+void hbg_debugfs_init(struct hbg_priv *priv)
+{
+ const char *name = pci_name(priv->pdev);
+ struct device *dev = &priv->pdev->dev;
+ struct dentry *root;
+ u32 i;
+
+ root = debugfs_create_dir(name, hbg_dbgfs_root);
+
+ for (i = 0; i < ARRAY_SIZE(hbg_dbg_infos); i++)
+ debugfs_create_devm_seqfile(dev, hbg_dbg_infos[i].name,
+ root, hbg_dbg_infos[i].read);
+
+ /* Ignore the failure because debugfs is not a key feature. */
+ devm_add_action_or_reset(dev, hbg_debugfs_uninit, root);
+}
+
+void hbg_debugfs_register(void)
+{
+ hbg_dbgfs_root = debugfs_create_dir("hibmcge", NULL);
+}
+
+void hbg_debugfs_unregister(void)
+{
+ debugfs_remove_recursive(hbg_dbgfs_root);
+ hbg_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
new file mode 100644
index 000000000000..80670d66bbeb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_DEBUGFS_H
+#define __HBG_DEBUGFS_H
+
+void hbg_debugfs_register(void);
+void hbg_debugfs_unregister(void);
+
+void hbg_debugfs_init(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
new file mode 100644
index 000000000000..4d1f4a33391a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
+#include "hbg_hw.h"
+
+static void hbg_restore_mac_table(struct hbg_priv *priv)
+{
+ struct hbg_mac_filter *filter = &priv->filter;
+ u64 addr;
+ u32 i;
+
+ for (i = 0; i < filter->table_max_len; i++)
+ if (!is_zero_ether_addr(filter->mac_table[i].addr)) {
+ addr = ether_addr_to_u64(filter->mac_table[i].addr);
+ hbg_hw_set_uc_addr(priv, addr, i);
+ }
+
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_restore_user_def_settings(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ hbg_restore_mac_table(priv);
+ hbg_hw_set_mtu(priv, priv->netdev->mtu);
+ hbg_hw_set_pause_enable(priv, pause_param->tx_pause,
+ pause_param->rx_pause);
+}
+
+int hbg_rebuild(struct hbg_priv *priv)
+{
+ int ret;
+
+ ret = hbg_hw_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_restore_user_def_settings(priv);
+ return 0;
+}
+
+static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (netif_running(priv->netdev)) {
+ dev_warn(&priv->pdev->dev,
+ "failed to reset because port is up\n");
+ return -EBUSY;
+ }
+
+ priv->reset_type = type;
+ set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ }
+
+ return ret;
+}
+
+static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
+{
+ int ret;
+
+ if (!test_bit(HBG_NIC_STATE_RESETTING, &priv->state) ||
+ type != priv->reset_type)
+ return 0;
+
+ ASSERT_RTNL();
+
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
+ ret = hbg_rebuild(priv);
+ if (ret) {
+ set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
+ return ret;
+ }
+
+ dev_info(&priv->pdev->dev, "reset done\n");
+ return ret;
+}
+
+/* must be protected by rtnl lock */
+int hbg_reset(struct hbg_priv *priv)
+{
+ int ret;
+
+ ASSERT_RTNL();
+ ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
+ if (ret)
+ return ret;
+
+ return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
+}
+
+static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ rtnl_lock();
+ hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
+}
+
+static void hbg_pci_err_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers hbg_pci_err_handler = {
+ .reset_prepare = hbg_pci_err_reset_prepare,
+ .reset_done = hbg_pci_err_reset_done,
+};
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv)
+{
+ pdrv->err_handler = &hbg_pci_err_handler;
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
new file mode 100644
index 000000000000..d7828e446308
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef __HBG_ERR_H
+#define __HBG_ERR_H
+
+#include <linux/pci.h>
+
+void hbg_set_pci_err_handler(struct pci_driver *pdrv);
+int hbg_reset(struct hbg_priv *priv);
+int hbg_rebuild(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index c3370114aef3..00364a438ec2 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -3,12 +3,193 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/rtnetlink.h>
+#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+
+enum hbg_reg_dump_type {
+ HBG_DUMP_REG_TYPE_SPEC = 0,
+ HBG_DUMP_REG_TYPE_MDIO,
+ HBG_DUMP_REG_TYPE_GMAC,
+ HBG_DUMP_REG_TYPE_PCU,
+};
+
+struct hbg_reg_info {
+ u32 type;
+ u32 offset;
+ u32 val;
+};
+
+#define HBG_DUMP_SPEC_I(offset) {HBG_DUMP_REG_TYPE_SPEC, offset, 0}
+#define HBG_DUMP_MDIO_I(offset) {HBG_DUMP_REG_TYPE_MDIO, offset, 0}
+#define HBG_DUMP_GMAC_I(offset) {HBG_DUMP_REG_TYPE_GMAC, offset, 0}
+#define HBG_DUMP_PCU_I(offset) {HBG_DUMP_REG_TYPE_PCU, offset, 0}
+
+static const struct hbg_reg_info hbg_dump_reg_infos[] = {
+ /* dev specs */
+ HBG_DUMP_SPEC_I(HBG_REG_SPEC_VALID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_EVENT_REQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_PHY_ID_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAC_ADDR_HIGH_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_UC_MAC_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MDIO_FREQ_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MAX_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_MIN_MTU_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_TX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_RX_FIFO_NUM_ADDR),
+ HBG_DUMP_SPEC_I(HBG_REG_VLAN_LAYERS_ADDR),
+
+ /* mdio */
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_COMMAND_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_ADDR_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_WDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_RDATA_ADDR),
+ HBG_DUMP_MDIO_I(HBG_REG_MDIO_STA_ADDR),
+
+ /* gmac */
+ HBG_DUMP_GMAC_I(HBG_REG_DUPLEX_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_TYPE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FC_TX_TIMER_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_LOW_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_FD_FC_ADDR_HIGH_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MAX_FRAME_SIZE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_MODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PORT_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_PAUSE_ENABLE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_AN_NEG_STATE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_TRANSMIT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_REC_FILT_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LINE_LOOP_BACK_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_CF_CRC_STRIP_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_MODE_CHANGE_EN_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_LOOP_REG_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_RECV_CTRL_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_VLAN_CODE_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_0_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_1_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_2_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_3_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_4_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_LOW_5_ADDR),
+ HBG_DUMP_GMAC_I(HBG_REG_STATION_ADDR_HIGH_5_ADDR),
+
+ /* pcu */
+ HBG_DUMP_PCU_I(HBG_REG_TX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CFG_FIFO_THRSLD_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_INTRPT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_TX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUS_ERR_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_MAX_FRAME_LEN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DEBUG_ST_MCH_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_CURR_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_FIFO_HIST_STATUS_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_CFF_DATA_NUM_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_TX_PAUSE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CFF_ADDR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_BUF_SIZE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_CTRL_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_RX_PKT_MODE_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST0_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST1_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_DBG_ST2_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_BUS_RST_EN_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_TXINT_CLR_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_MSK_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_STAT_ADDR),
+ HBG_DUMP_PCU_I(HBG_REG_CF_IND_RXINT_CLR_ADDR),
+};
+
+static const u32 hbg_dump_type_base_array[] = {
+ [HBG_DUMP_REG_TYPE_SPEC] = 0,
+ [HBG_DUMP_REG_TYPE_MDIO] = HBG_REG_MDIO_BASE,
+ [HBG_DUMP_REG_TYPE_GMAC] = HBG_REG_SGMII_BASE,
+ [HBG_DUMP_REG_TYPE_PCU] = HBG_REG_SGMII_BASE,
+};
+
+static int hbg_ethtool_get_regs_len(struct net_device *netdev)
+{
+ return ARRAY_SIZE(hbg_dump_reg_infos) * sizeof(struct hbg_reg_info);
+}
+
+static void hbg_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_reg_info *info;
+ u32 i, offset = 0;
+
+ regs->version = 0;
+ for (i = 0; i < ARRAY_SIZE(hbg_dump_reg_infos); i++) {
+ info = data + offset;
+
+ *info = hbg_dump_reg_infos[i];
+ info->val = hbg_reg_read(priv, info->offset);
+ info->offset -= hbg_dump_type_base_array[info->type];
+
+ offset += sizeof(*info);
+ }
+}
+
+static void hbg_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &param->tx_pause, &param->rx_pause);
+}
+
+static int hbg_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hbg_priv *priv = netdev_priv(net_dev);
+
+ priv->mac.pause_autoneg = param->autoneg;
+ phy_set_asym_pause(priv->mac.phydev, param->rx_pause, param->tx_pause);
+
+ if (!param->autoneg)
+ hbg_hw_set_pause_enable(priv, param->tx_pause, param->rx_pause);
+
+ priv->user_def.pause_param = *param;
+ return 0;
+}
+
+static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (*flags != ETH_RESET_DEDICATED)
+ return -EOPNOTSUPP;
+
+ *flags = 0;
+ return hbg_reset(priv);
+}
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_regs_len = hbg_ethtool_get_regs_len,
+ .get_regs = hbg_ethtool_get_regs,
+ .get_pauseparam = hbg_ethtool_get_pauseparam,
+ .set_pauseparam = hbg_ethtool_set_pauseparam,
+ .reset = hbg_ethtool_reset,
+ .nway_reset = phy_ethtool_nway_reset,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 05295c2ad439..e7798f213645 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include "hbg_common.h"
@@ -67,6 +68,8 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
+ specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
+
mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
@@ -135,9 +138,13 @@ void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
}
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
{
- hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
+ u32 addr;
+
+ /* mac addr is u64, so the addr offset is 0x8 */
+ addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
+ hbg_reg_write64(priv, addr, mac_addr);
}
static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
@@ -161,8 +168,13 @@ static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
{
- hbg_hw_set_pcu_max_frame_len(priv, mtu);
- hbg_hw_set_mac_max_frame_len(priv, mtu);
+ u32 frame_len;
+
+ frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
+ ETH_HLEN + ETH_FCS_LEN;
+
+ hbg_hw_set_pcu_max_frame_len(priv, frame_len);
+ hbg_hw_set_mac_max_frame_len(priv, frame_len);
}
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
@@ -207,6 +219,34 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
HBG_REG_DUPLEX_B, duplex);
}
+/* only support uc filter */
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
+{
+ hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
+ HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
+}
+
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
+{
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
+ hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
+}
+
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
+{
+ *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_TX_B);
+ *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
+ HBG_REG_PAUSE_ENABLE_RX_B);
+}
+
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
+{
+ hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
+}
+
static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
{
u32 ctrl = 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
index 14fb39241c93..a4a049b5121d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h
@@ -51,9 +51,13 @@ bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask);
void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable);
void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu);
void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable);
-void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr);
+void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index);
u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir);
void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc);
void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr);
+void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable);
+void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en);
+void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en);
+void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 75505fb5cc4a..bb0f25ac9760 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -6,13 +6,13 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include "hbg_common.h"
+#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
#include "hbg_irq.h"
#include "hbg_mdio.h"
#include "hbg_txrx.h"
-
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu);
+#include "hbg_debugfs.h"
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
@@ -55,11 +55,7 @@ static int hbg_hw_txrx_clear(struct hbg_priv *priv)
return ret;
/* After reset, regs need to be reconfigured */
- hbg_hw_init(priv);
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(priv->netdev->dev_addr));
- hbg_change_mtu(priv, priv->netdev->mtu);
-
- return 0;
+ return hbg_rebuild(priv);
}
static int hbg_net_stop(struct net_device *netdev)
@@ -74,31 +70,127 @@ static int hbg_net_stop(struct net_device *netdev)
return hbg_hw_txrx_clear(priv);
}
+static void hbg_update_promisc_mode(struct net_device *netdev, bool overflow)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ /* Only when not table_overflow, and netdev->flags not set IFF_PROMISC,
+ * The MAC filter will be enabled.
+ * Otherwise the filter will be disabled.
+ */
+ priv->filter.enabled = !(overflow || (netdev->flags & IFF_PROMISC));
+ hbg_hw_set_mac_filter_enable(priv, priv->filter.enabled);
+}
+
+static void hbg_set_mac_to_mac_table(struct hbg_priv *priv,
+ u32 index, const u8 *addr)
+{
+ if (addr) {
+ ether_addr_copy(priv->filter.mac_table[index].addr, addr);
+ hbg_hw_set_uc_addr(priv, ether_addr_to_u64(addr), index);
+ } else {
+ eth_zero_addr(priv->filter.mac_table[index].addr);
+ hbg_hw_set_uc_addr(priv, 0, index);
+ }
+}
+
+static int hbg_get_index_from_mac_table(struct hbg_priv *priv,
+ const u8 *addr, u32 *index)
+{
+ u32 i;
+
+ for (i = 0; i < priv->filter.table_max_len; i++)
+ if (ether_addr_equal(priv->filter.mac_table[i].addr, addr)) {
+ *index = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hbg_add_mac_to_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* already exists */
+ if (!hbg_get_index_from_mac_table(priv, addr, &index))
+ return 0;
+
+ for (index = 0; index < priv->filter.table_max_len; index++)
+ if (is_zero_ether_addr(priv->filter.mac_table[index].addr)) {
+ hbg_set_mac_to_mac_table(priv, index, addr);
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static void hbg_del_mac_from_filter(struct hbg_priv *priv, const u8 *addr)
+{
+ u32 index;
+
+ /* not exists */
+ if (hbg_get_index_from_mac_table(priv, addr, &index))
+ return;
+
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+}
+
+static int hbg_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ return hbg_add_mac_to_filter(priv, addr);
+}
+
+static int hbg_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (ether_addr_equal(netdev->dev_addr, (u8 *)addr))
+ return 0;
+
+ hbg_del_mac_from_filter(priv, addr);
+ return 0;
+}
+
+static void hbg_net_set_rx_mode(struct net_device *netdev)
+{
+ int ret;
+
+ ret = __dev_uc_sync(netdev, hbg_uc_sync, hbg_uc_unsync);
+
+ /* If ret != 0, overflow has occurred */
+ hbg_update_promisc_mode(netdev, !!ret);
+}
+
static int hbg_net_set_mac_address(struct net_device *netdev, void *addr)
{
struct hbg_priv *priv = netdev_priv(netdev);
u8 *mac_addr;
+ bool exists;
+ u32 index;
mac_addr = ((struct sockaddr *)addr)->sa_data;
if (!is_valid_ether_addr(mac_addr))
return -EADDRNOTAVAIL;
- hbg_hw_set_uc_addr(priv, ether_addr_to_u64(mac_addr));
- dev_addr_set(netdev, mac_addr);
+ /* The index of host mac is always 0.
+ * If new mac address already exists,
+ * delete the existing mac address and
+ * add it to the position with index 0.
+ */
+ exists = !hbg_get_index_from_mac_table(priv, mac_addr, &index);
+ hbg_set_mac_to_mac_table(priv, 0, mac_addr);
+ if (exists)
+ hbg_set_mac_to_mac_table(priv, index, NULL);
+ hbg_hw_set_rx_pause_mac_addr(priv, ether_addr_to_u64(mac_addr));
+ dev_addr_set(netdev, mac_addr);
return 0;
}
-static void hbg_change_mtu(struct hbg_priv *priv, int new_mtu)
-{
- u32 frame_len;
-
- frame_len = new_mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
- ETH_HLEN + ETH_FCS_LEN;
- hbg_hw_set_mtu(priv, frame_len);
-}
-
static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -106,7 +198,7 @@ static int hbg_net_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
return -EBUSY;
- hbg_change_mtu(priv, new_mtu);
+ hbg_hw_set_mtu(priv, new_mtu);
WRITE_ONCE(netdev->mtu, new_mtu);
dev_dbg(&priv->pdev->dev,
@@ -142,8 +234,39 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_set_mac_address = hbg_net_set_mac_address,
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
+ .ndo_set_rx_mode = hbg_net_set_rx_mode,
};
+static int hbg_mac_filter_init(struct hbg_priv *priv)
+{
+ struct hbg_dev_specs *dev_specs = &priv->dev_specs;
+ struct hbg_mac_filter *filter = &priv->filter;
+ struct hbg_mac_table_entry *tmp_table;
+
+ tmp_table = devm_kcalloc(&priv->pdev->dev, dev_specs->uc_mac_num,
+ sizeof(*tmp_table), GFP_KERNEL);
+ if (!tmp_table)
+ return -ENOMEM;
+
+ filter->mac_table = tmp_table;
+ filter->table_max_len = dev_specs->uc_mac_num;
+ filter->enabled = true;
+
+ hbg_hw_set_mac_filter_enable(priv, filter->enabled);
+ return 0;
+}
+
+static void hbg_init_user_def(struct hbg_priv *priv)
+{
+ struct ethtool_pauseparam *pause_param = &priv->user_def.pause_param;
+
+ priv->mac.pause_autoneg = HBG_STATUS_ENABLE;
+
+ pause_param->autoneg = priv->mac.pause_autoneg;
+ hbg_hw_get_pause_enable(priv, &pause_param->tx_pause,
+ &pause_param->rx_pause);
+}
+
static int hbg_init(struct hbg_priv *priv)
{
int ret;
@@ -160,7 +283,17 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
- return hbg_mdio_init(priv);
+ ret = hbg_mdio_init(priv);
+ if (ret)
+ return ret;
+
+ ret = hbg_mac_filter_init(priv);
+ if (ret)
+ return ret;
+
+ hbg_debugfs_init(priv);
+ hbg_init_user_def(priv);
+ return 0;
}
static int hbg_pci_init(struct pci_dev *pdev)
@@ -216,13 +349,15 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netdev->max_mtu = priv->dev_specs.max_mtu;
netdev->min_mtu = priv->dev_specs.min_mtu;
netdev->netdev_ops = &hbg_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- hbg_change_mtu(priv, ETH_DATA_LEN);
+ hbg_hw_set_mtu(priv, ETH_DATA_LEN);
hbg_net_set_mac_address(priv->netdev, &priv->dev_specs.mac_addr);
hbg_ethtool_set_ops(netdev);
@@ -245,7 +380,27 @@ static struct pci_driver hbg_driver = {
.id_table = hbg_pci_tbl,
.probe = hbg_probe,
};
-module_pci_driver(hbg_driver);
+
+static int __init hbg_module_init(void)
+{
+ int ret;
+
+ hbg_debugfs_register();
+ hbg_set_pci_err_handler(&hbg_driver);
+ ret = pci_register_driver(&hbg_driver);
+ if (ret)
+ hbg_debugfs_unregister();
+
+ return ret;
+}
+module_init(hbg_module_init);
+
+static void __exit hbg_module_exit(void)
+{
+ pci_unregister_driver(&hbg_driver);
+ hbg_debugfs_unregister();
+}
+module_exit(hbg_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index a3479fba8501..db6bc4cfb971 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -114,6 +114,19 @@ static void hbg_mdio_init_hw(struct hbg_priv *priv)
hbg_mdio_set_command(mac, cmd);
}
+static void hbg_flowctrl_cfg(struct hbg_priv *priv)
+{
+ struct phy_device *phydev = priv->mac.phydev;
+ bool rx_pause;
+ bool tx_pause;
+
+ if (!priv->mac.pause_autoneg)
+ return;
+
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
+ hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
@@ -140,6 +153,7 @@ static void hbg_phy_adjust_link(struct net_device *netdev)
priv->mac.duplex = phydev->duplex;
priv->mac.autoneg = phydev->autoneg;
hbg_hw_adjust_link(priv, speed, phydev->duplex);
+ hbg_flowctrl_cfg(priv);
}
priv->mac.link_status = phydev->link;
@@ -168,6 +182,7 @@ static int hbg_phy_connect(struct hbg_priv *priv)
return ret;
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index 57d81c6d7633..f12efc12f3c5 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -10,6 +10,8 @@
#define HBG_REG_MAC_ID_ADDR 0x0008
#define HBG_REG_PHY_ID_ADDR 0x000C
#define HBG_REG_MAC_ADDR_ADDR 0x0010
+#define HBG_REG_MAC_ADDR_HIGH_ADDR 0x0014
+#define HBG_REG_UC_MAC_NUM_ADDR 0x0018
#define HBG_REG_MDIO_FREQ_ADDR 0x0024
#define HBG_REG_MAX_MTU_ADDR 0x0028
#define HBG_REG_MIN_MTU_ADDR 0x002C
@@ -28,6 +30,7 @@
#define HBG_REG_MDIO_COMMAND_OP_M GENMASK(11, 10)
#define HBG_REG_MDIO_COMMAND_PRTAD_M GENMASK(9, 5)
#define HBG_REG_MDIO_COMMAND_DEVAD_M GENMASK(4, 0)
+#define HBG_REG_MDIO_ADDR_ADDR (HBG_REG_MDIO_BASE + 0x0004)
#define HBG_REG_MDIO_WDATA_ADDR (HBG_REG_MDIO_BASE + 0x0008)
#define HBG_REG_MDIO_WDATA_M GENMASK(15, 0)
#define HBG_REG_MDIO_RDATA_ADDR (HBG_REG_MDIO_BASE + 0x000C)
@@ -36,6 +39,10 @@
/* GMAC */
#define HBG_REG_SGMII_BASE 0x10000
#define HBG_REG_DUPLEX_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x0008)
+#define HBG_REG_FD_FC_TYPE_ADDR (HBG_REG_SGMII_BASE + 0x000C)
+#define HBG_REG_FC_TX_TIMER_ADDR (HBG_REG_SGMII_BASE + 0x001C)
+#define HBG_REG_FD_FC_ADDR_LOW_ADDR (HBG_REG_SGMII_BASE + 0x0020)
+#define HBG_REG_FD_FC_ADDR_HIGH_ADDR (HBG_REG_SGMII_BASE + 0x0024)
#define HBG_REG_DUPLEX_B BIT(0)
#define HBG_REG_MAX_FRAME_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x003C)
#define HBG_REG_PORT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x0040)
@@ -43,20 +50,42 @@
#define HBG_REG_PORT_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0044)
#define HBG_REG_PORT_ENABLE_RX_B BIT(1)
#define HBG_REG_PORT_ENABLE_TX_B BIT(2)
+#define HBG_REG_PAUSE_ENABLE_ADDR (HBG_REG_SGMII_BASE + 0x0048)
+#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
+#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
+#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
+#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
+#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
#define HBG_REG_MODE_CHANGE_EN_ADDR (HBG_REG_SGMII_BASE + 0x01B4)
#define HBG_REG_MODE_CHANGE_EN_B BIT(0)
+#define HBG_REG_LOOP_REG_ADDR (HBG_REG_SGMII_BASE + 0x01DC)
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
+#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
+#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
+#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
+#define HBG_REG_STATION_ADDR_HIGH_1_ADDR (HBG_REG_SGMII_BASE + 0x020C)
#define HBG_REG_STATION_ADDR_LOW_2_ADDR (HBG_REG_SGMII_BASE + 0x0210)
#define HBG_REG_STATION_ADDR_HIGH_2_ADDR (HBG_REG_SGMII_BASE + 0x0214)
+#define HBG_REG_STATION_ADDR_LOW_3_ADDR (HBG_REG_SGMII_BASE + 0x0218)
+#define HBG_REG_STATION_ADDR_HIGH_3_ADDR (HBG_REG_SGMII_BASE + 0x021C)
+#define HBG_REG_STATION_ADDR_LOW_4_ADDR (HBG_REG_SGMII_BASE + 0x0220)
+#define HBG_REG_STATION_ADDR_HIGH_4_ADDR (HBG_REG_SGMII_BASE + 0x0224)
+#define HBG_REG_STATION_ADDR_LOW_5_ADDR (HBG_REG_SGMII_BASE + 0x0228)
+#define HBG_REG_STATION_ADDR_HIGH_5_ADDR (HBG_REG_SGMII_BASE + 0x022C)
/* PCU */
+#define HBG_REG_TX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0420)
+#define HBG_REG_RX_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0424)
+#define HBG_REG_CFG_FIFO_THRSLD_ADDR (HBG_REG_SGMII_BASE + 0x0428)
#define HBG_REG_CF_INTRPT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x042C)
#define HBG_INT_MSK_WE_ERR_B BIT(31)
#define HBG_INT_MSK_RBREQ_ERR_B BIT(30)
@@ -78,11 +107,17 @@
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
#define HBG_REG_CF_INTRPT_CLR_ADDR (HBG_REG_SGMII_BASE + 0x0438)
+#define HBG_REG_TX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x043C)
+#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
+#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
+#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
#define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490)
@@ -101,6 +136,10 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
+#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
+#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
+#define HBG_REG_BUS_RST_EN_ADDR (HBG_REG_SGMII_BASE + 0x0688)
#define HBG_REG_CF_IND_TXINT_MSK_ADDR (HBG_REG_SGMII_BASE + 0x0694)
#define HBG_REG_IND_INTR_MASK_B BIT(0)
#define HBG_REG_CF_IND_TXINT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0698)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 851490346261..6b6ced37e490 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3019,115 +3019,6 @@ static struct platform_driver g_dsaf_driver = {
module_platform_driver(g_dsaf_driver);
-/**
- * hns_dsaf_roce_reset - reset dsaf and roce
- * @dsaf_fwnode: Pointer to framework node for the dasf
- * @dereset: false - request reset , true - drop reset
- * return 0 - success , negative -fail
- */
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
-{
- struct dsaf_device *dsaf_dev;
- struct platform_device *pdev;
- u32 mp;
- u32 sl;
- u32 credit;
- int i;
- static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
- };
- static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
- {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
- {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
- };
-
- /* find the platform device corresponding to fwnode */
- if (is_of_node(dsaf_fwnode)) {
- pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
- } else if (is_acpi_device_node(dsaf_fwnode)) {
- pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
- } else {
- pr_err("fwnode is neither OF or ACPI type\n");
- return -EINVAL;
- }
-
- /* check if we were a success in fetching pdev */
- if (!pdev) {
- pr_err("couldn't find platform device for node\n");
- return -ENODEV;
- }
-
- /* retrieve the dsaf_device from the driver data */
- dsaf_dev = dev_get_drvdata(&pdev->dev);
- if (!dsaf_dev) {
- dev_err(&pdev->dev, "dsaf_dev is NULL\n");
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* now, make sure we are running on compatible SoC */
- if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
- dsaf_dev->ae_dev.name);
- put_device(&pdev->dev);
- return -ENODEV;
- }
-
- /* do reset or de-reset according to the flag */
- if (!dereset) {
- /* reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- false);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
- } else {
- /* configure dsaf tx roce correspond to port map and sl map */
- mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(mp, 7 << i * 3, i * 3,
- port_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
-
- sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
- for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
- dsaf_set_field(sl, 3 << i * 2, i * 2,
- sl_map[i][DSAF_ROCE_6PORT_MODE]);
- dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
-
- /* de-reset rocee-channels in dsaf and rocee */
- dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
- true);
- msleep(SRST_TIME_INTERVAL);
- dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
-
- /* enable dsaf channel rocee credit */
- credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
-
- dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
- dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
- }
-
- put_device(&pdev->dev);
-
- return 0;
-}
-EXPORT_SYMBOL(hns_dsaf_roce_reset);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
MODULE_DESCRIPTION("HNS DSAF driver");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0eb03dff1a8b..653dfbb25d1b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -42,29 +42,6 @@ struct hns_mac_cb;
#define HNS_MAX_WAIT_CNT 10000
-enum dsaf_roce_port_mode {
- DSAF_ROCE_6PORT_MODE,
- DSAF_ROCE_4PORT_MODE,
- DSAF_ROCE_2PORT_MODE,
- DSAF_ROCE_CHAN_MODE_NUM,
-};
-
-enum dsaf_roce_port_num {
- DSAF_ROCE_PORT_0,
- DSAF_ROCE_PORT_1,
- DSAF_ROCE_PORT_2,
- DSAF_ROCE_PORT_3,
- DSAF_ROCE_PORT_4,
- DSAF_ROCE_PORT_5,
-};
-
-enum dsaf_roce_qos_sl {
- DSAF_ROCE_SL_0,
- DSAF_ROCE_SL_1,
- DSAF_ROCE_SL_2,
- DSAF_ROCE_SL_3,
-};
-
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -307,9 +284,6 @@ struct dsaf_misc_op {
void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
- void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
- bool dereset);
- void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
@@ -463,6 +437,4 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 5df19c604d09..91391a49fcea 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -326,69 +326,6 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
HNS_XGE_RESET_FUNC, port, dereset);
}
-/**
- * hns_dsaf_srst_chns - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- u32 reg_addr;
-
- if (!dereset)
- reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
- else
- reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
-
- dsaf_write_sub(dsaf_dev, reg_addr, msk);
-}
-
-/**
- * hns_dsaf_srst_chns_acpi - reset dsaf channels
- * @dsaf_dev: dsaf device struct pointer
- * @msk: xbar channels mask value:
- * @dereset: false - request reset , true - drop reset
- *
- * bit0-5 for xge0-5
- * bit6-11 for ppe0-5
- * bit12-17 for roce0-5
- * bit18-19 for com/dfx
- */
-static void
-hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_DSAF_CHN_RESET_FUNC,
- msk, dereset);
-}
-
-static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
-{
- if (!dereset) {
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
- } else {
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
- dsaf_write_sub(dsaf_dev,
- DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
- msleep(20);
- dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
- }
-}
-
-static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
-{
- hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
- HNS_ROCE_RESET_FUNC, 0, dereset);
-}
-
static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
bool dereset)
{
@@ -729,8 +666,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
misc_op->ppe_srst = hns_ppe_srst_by_port;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
misc_op->get_phy_if = hns_mac_get_phy_if;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
@@ -746,8 +681,6 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
misc_op->ppe_comm_srst = hns_ppe_com_srst;
- misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
- misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 46af467aa596..635b3a95dd82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -195,11 +195,6 @@ void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
}
-void hns_rcb_start(struct hnae_queue *q, u32 val)
-{
- hns_rcb_ring_enable_hw(q, val);
-}
-
/**
*hns_rcb_common_init_commit_hw - make rcb common init completed
*@rcb_common: rcb common device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 0f4cc184ef39..68f81547dfb4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -116,7 +116,6 @@ int hns_rcb_buf_size2type(u32 buf_size);
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
-void hns_rcb_start(struct hnae_queue *q, u32 val);
int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index f81a43d2cdfc..486fb0e20bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -469,7 +469,7 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
err = HINIC_MGMT_CMD_UNSUPPORTED;
} else if (err || !out_size || vlan_filter.status) {
dev_err(&pdev->dev,
- "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n",
err, vlan_filter.status, out_size);
err = -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 20bc40eec487..24ec9a4f1ffa 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -292,6 +292,7 @@ config ICE
select DIMLIB
select LIBIE
select NET_DEVLINK
+ select PACKING
select PLDMFW
select DPLL
help
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 98861cc6df7c..b9dd7b719832 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1180,126 +1180,6 @@ s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
}
/**
- * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
- * @hw: Pointer to hardware structure
- * @results: Pointer array to message, results[0] is pointer to message
- * @mbx: Pointer to mailbox information structure
- *
- * This function is a default handler for MAC/VLAN requests from the VF.
- * The assumption is that in this case it is acceptable to just directly
- * hand off the message from the VF to the underlying shared code.
- **/
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
-{
- struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u8 mac[ETH_ALEN];
- u32 *result;
- int err = 0;
- bool set;
- u16 vlan;
- u32 vid;
-
- /* we shouldn't be updating rules on a disabled interface */
- if (!FM10K_VF_FLAG_ENABLED(vf_info))
- err = FM10K_ERR_PARAM;
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
- result = results[FM10K_MAC_VLAN_MSG_VLAN];
-
- /* record VLAN id requested */
- err = fm10k_tlv_attr_get_u32(result, &vid);
- if (err)
- return err;
-
- set = !(vid & FM10K_VLAN_CLEAR);
- vid &= ~FM10K_VLAN_CLEAR;
-
- /* if the length field has been set, this is a multi-bit
- * update request. For multi-bit requests, simply disallow
- * them when the pf_vid has been set. In this case, the PF
- * should have already cleared the VLAN_TABLE, and if we
- * allowed them, it could allow a rogue VF to receive traffic
- * on a VLAN it was not assigned. In the single-bit case, we
- * need to modify requests for VLAN 0 to use the default PF or
- * SW vid when assigned.
- */
-
- if (vid >> 16) {
- /* prevent multi-bit requests when PF has
- * administratively set the VLAN for this VF
- */
- if (vf_info->pf_vid)
- return FM10K_ERR_PARAM;
- } else {
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
-
- vid = err;
- }
-
- /* update VSI info for VF in regards to VLAN table */
- err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
- result = results[FM10K_MAC_VLAN_MSG_MAC];
-
- /* record unicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* block attempts to set MAC for a locked device */
- if (is_valid_ether_addr(vf_info->mac) &&
- !ether_addr_equal(mac, vf_info->mac))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new unicast address */
- err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
- mac, vlan, set, 0);
- }
-
- if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
- result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
-
- /* record multicast MAC address requested */
- err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
- if (err)
- return err;
-
- /* verify that the VF is allowed to request multicast */
- if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
- return FM10K_ERR_PARAM;
-
- set = !(vlan & FM10K_VLAN_CLEAR);
- vlan &= ~FM10K_VLAN_CLEAR;
-
- err = fm10k_iov_select_vid(vf_info, vlan);
- if (err < 0)
- return err;
-
- vlan = (u16)err;
-
- /* notify switch of request for new multicast address */
- err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
- mac, vlan, set);
- }
-
- return err;
-}
-
-/**
* fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
* @vf_info: VF info structure containing capability flags
* @mode: Requested xcast mode
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 8e814df709d2..ad3696893cb1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -99,8 +99,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
-s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
- struct fm10k_mbx_info *);
s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d4255c2706fa..c67963bfe14e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -88,6 +88,7 @@ enum i40e_state {
__I40E_SERVICE_SCHED,
__I40E_ADMINQ_EVENT_PENDING,
__I40E_MDD_EVENT_PENDING,
+ __I40E_MDD_VF_PRINT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
__I40E_TIMEOUT_RECOVERY_PENDING,
@@ -191,6 +192,7 @@ enum i40e_pf_flags {
*/
I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
I40E_FLAG_VF_VLAN_PRUNING_ENA,
+ I40E_FLAG_MDD_AUTO_RESET_VF,
I40E_PF_FLAGS_NBITS, /* must be last */
};
@@ -572,7 +574,7 @@ struct i40e_pf {
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
-
+ struct ratelimit_state mdd_message_rate_limit;
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
* based agent (LLDPAD). Also, indicates what
@@ -1189,7 +1191,6 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
u32 i40e_get_current_fd_count(struct i40e_pf *pf);
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
@@ -1197,7 +1198,6 @@ void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
const u8 *macaddr, s16 vlan);
void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1);
@@ -1313,7 +1313,6 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
int i40e_get_partition_bw_setting(struct i40e_pf *pf);
int i40e_set_partition_bw_setting(struct i40e_pf *pf);
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, unsigned long *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index f73f5930fc58..175c1320c143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1016,16 +1016,6 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-int
-i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */ u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status)
-{
- return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
- cmd_details, true, aq_status);
-}
-
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e8031f1a9b4f..370b4bddee44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1805,37 +1805,6 @@ int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
- * @hw: pointer to the hw struct
- * @seid: vsi number
- * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
- * @cmd_details: pointer to command details structure or NULL
- **/
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
- (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- u16 flags = 0;
- int status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (enable)
- flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
- cmd->promiscuous_flags = cpu_to_le16(flags);
- cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
- cmd->seid = cpu_to_le16(seid);
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2436,136 +2405,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
- * @hw: pointer to the hw struct
- * @opcode: AQ opcode for add or delete mirror rule
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @id: Destination VSI SEID or Rule ID
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
- * VEBs/VEPA elements only
- **/
-static int i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_add_delete_mirror_rule *cmd =
- (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
- struct i40e_aqc_add_delete_mirror_rule_completion *resp =
- (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- u16 buf_size;
- int status;
-
- buf_size = count * sizeof(*mr_list);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, opcode);
- cmd->seid = cpu_to_le16(sw_seid);
- cmd->rule_type = cpu_to_le16(rule_type &
- I40E_AQC_MIRROR_RULE_TYPE_MASK);
- cmd->num_entries = cpu_to_le16(count);
- /* Dest VSI for add, rule_id for delete */
- cmd->destination = cpu_to_le16(id);
- if (mr_list) {
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
- I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- }
-
- status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
- cmd_details);
- if (!status ||
- hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
- if (rule_id)
- *rule_id = le16_to_cpu(resp->rule_id);
- if (rules_used)
- *rules_used = le16_to_cpu(resp->mirror_rules_used);
- if (rules_free)
- *rules_free = le16_to_cpu(resp->mirror_rules_free);
- }
- return status;
-}
-
-/**
- * i40e_aq_add_mirrorrule - add a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @dest_vsi: SEID of VSI to which packets will be mirrored
- * @count: length of the list
- * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
- * @cmd_details: pointer to command details structure or NULL
- * @rule_id: Rule ID returned from FW
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
- **/
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
-{
- if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
- rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
- rule_type, dest_vsi, count, mr_list,
- cmd_details, rule_id, rules_used, rules_free);
-}
-
-/**
- * i40e_aq_delete_mirrorrule - delete a mirror rule
- * @hw: pointer to the hw struct
- * @sw_seid: Switch SEID (to which rule refers)
- * @rule_type: Rule Type (ingress/egress/VLAN)
- * @count: length of the list
- * @rule_id: Rule ID that is returned in the receive desc as part of
- * add_mirrorrule.
- * @mr_list: list of mirrored VLAN IDs to be removed
- * @cmd_details: pointer to command details structure or NULL
- * @rules_used: Number of rules used in internal switch
- * @rules_free: Number of rules free in internal switch
- *
- * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
- **/
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count,
- __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
-{
- /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- /* count and mr_list shall be valid for rule_type INGRESS VLAN
- * mirroring. For other rule_type, count and rule_type should
- * not matter.
- */
- if (count == 0 || !mr_list)
- return -EINVAL;
- }
-
- return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
- rule_type, rule_id, count, mr_list,
- cmd_details, NULL, rules_used, rules_free);
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -3180,41 +3019,6 @@ i40e_aq_update_nvm_exit:
}
/**
- * i40e_aq_rearrange_nvm
- * @hw: pointer to the hw struct
- * @rearrange_nvm: defines direction of rearrangement
- * @cmd_details: pointer to command details structure or NULL
- *
- * Rearrange NVM structure, available only for transition FW
- **/
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aqc_nvm_update *cmd;
- struct i40e_aq_desc desc;
- int status;
-
- cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
-
- rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
- I40E_AQ_NVM_REARRANGE_TO_STRUCT);
-
- if (!rearrange_nvm) {
- status = -EINVAL;
- goto i40e_aq_rearrange_nvm_exit;
- }
-
- cmd->command_flags |= rearrange_nvm;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
-i40e_aq_rearrange_nvm_exit:
- return status;
-}
-
-/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
* @bridge_type: type of bridge requested
@@ -3335,44 +3139,6 @@ int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_restore_lldp
- * @hw: pointer to the hw struct
- * @setting: pointer to factory setting variable or NULL
- * @restore: True if factory settings should be restored
- * @cmd_details: pointer to command details structure or NULL
- *
- * Restore LLDP Agent factory settings if @restore set to True. In other case
- * only returns factory setting in AQ response.
- **/
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_restore *cmd =
- (struct i40e_aqc_lldp_restore *)&desc.params.raw;
- int status;
-
- if (!test_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps)) {
- i40e_debug(hw, I40E_DEBUG_ALL,
- "Restore LLDP not supported by current FW version.\n");
- return -ENODEV;
- }
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
-
- if (restore)
- cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- if (setting)
- *setting = cmd->command & 1;
-
- return status;
-}
-
-/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
@@ -4570,84 +4336,6 @@ phy_write_end:
}
/**
- * i40e_write_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Writes value to specified PHY register
- **/
-int i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_write_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
- * i40e_read_phy_register
- * @hw: pointer to the HW structure
- * @page: registers page number
- * @reg: register address in the page
- * @phy_addr: PHY address on MDIO interface
- * @value: PHY register value
- *
- * Reads specified PHY register value
- **/
-int i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
-{
- int status;
-
- switch (hw->device_id) {
- case I40E_DEV_ID_1G_BASE_T_X722:
- status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
- value);
- break;
- case I40E_DEV_ID_1G_BASE_T_BC:
- case I40E_DEV_ID_5G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- case I40E_DEV_ID_10G_BASE_T_BC:
- case I40E_DEV_ID_10G_BASE_T_X722:
- case I40E_DEV_ID_25G_B:
- case I40E_DEV_ID_25G_SFP28:
- status = i40e_read_phy_register_clause45(hw, page, reg,
- phy_addr, value);
- break;
- default:
- status = -EIO;
- break;
- }
-
- return status;
-}
-
-/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -4663,80 +4351,6 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
}
/**
- * i40e_blink_phy_link_led
- * @hw: pointer to the HW structure
- * @time: time how long led will blinks in secs
- * @interval: gap between LED on and off in msecs
- *
- * Blinks PHY link LED
- **/
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
-{
- u16 led_addr = I40E_PHY_LED_PROV_REG_1;
- u16 gpio_led_port;
- u8 phy_addr = 0;
- int status = 0;
- u16 led_ctl;
- u8 port_num;
- u16 led_reg;
- u32 i;
-
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
-
- for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
- led_addr++) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
- if (status)
- goto phy_blinking_end;
- led_ctl = led_reg;
- if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
- led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
- if (status)
- goto phy_blinking_end;
- break;
- }
- }
-
- if (time > 0 && interval > 0) {
- for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
- if (status)
- goto restore_config;
- if (led_reg & I40E_PHY_LED_MANUAL_ON)
- led_reg = 0;
- else
- led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
- if (status)
- goto restore_config;
- msleep(interval);
- }
- }
-
-restore_config:
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
-
-phy_blinking_end:
- return status;
-}
-
-/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
@@ -5269,39 +4883,6 @@ i40e_find_segment_in_package(u32 segment_type,
(struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
/**
- * i40e_find_section_in_profile
- * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
- * @profile: pointer to the i40e segment header to be searched
- *
- * This function searches i40e segment for a particular section type. On
- * success it returns a pointer to the section header, otherwise it will
- * return NULL.
- **/
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile)
-{
- struct i40e_profile_section_header *sec;
- struct i40e_section_table *sec_tbl;
- u32 sec_off;
- u32 i;
-
- if (profile->header.type != SEGMENT_TYPE_I40E)
- return NULL;
-
- I40E_SECTION_TABLE(profile, sec_tbl);
-
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec_off = sec_tbl->section_offset[i];
- sec = I40E_SECTION_HEADER(profile, sec_off);
- if (sec->section.type == section_type)
- return sec;
- }
-
- return NULL;
-}
-
-/**
* i40e_ddp_exec_aq_section - Execute generic AQ for DDP
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
@@ -5524,45 +5105,6 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
}
/**
- * i40e_add_pinfo_to_list
- * @hw: pointer to the hardware structure
- * @profile: pointer to the profile segment of the package
- * @profile_info_sec: buffer for information section
- * @track_id: package tracking id
- *
- * Register a profile to the list of loaded profiles.
- */
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id)
-{
- struct i40e_profile_section_header *sec = NULL;
- struct i40e_profile_info *pinfo;
- u32 offset = 0, info = 0;
- int status = 0;
-
- sec = (struct i40e_profile_section_header *)profile_info_sec;
- sec->tbl_size = 1;
- sec->data_end = sizeof(struct i40e_profile_section_header) +
- sizeof(struct i40e_profile_info);
- sec->section.type = SECTION_TYPE_INFO;
- sec->section.offset = sizeof(struct i40e_profile_section_header);
- sec->section.size = sizeof(struct i40e_profile_info);
- pinfo = (struct i40e_profile_info *)(profile_info_sec +
- sec->section.offset);
- pinfo->track_id = track_id;
- pinfo->version = profile->version;
- pinfo->op = I40E_DDP_ADD_TRACKID;
- memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
-
- status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
- track_id, &offset, &info, NULL);
-
- return status;
-}
-
-/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 8db1eb0c1768..352e957443fd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1491,19 +1491,6 @@ void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
}
/**
- * i40e_dcb_hw_get_num_tc
- * @hw: pointer to the hw struct
- *
- * Returns number of traffic classes configured in HW
- **/
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
-{
- u32 reg = rd32(hw, I40E_PRTDCB_GENC);
-
- return FIELD_GET(I40E_PRTDCB_GENC_NUMTC_MASK, reg);
-}
-
-/**
* i40e_dcb_hw_rx_ets_bw_config
* @hw: pointer to the hw struct
* @bw_share: Bandwidth share indexed per traffic class
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index d76497566e40..d5662c639c41 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -253,7 +253,6 @@ void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
u8 pfc_en, u8 *prio_tc);
void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
-u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
u8 *mode, u8 *prio_type);
void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 208c2f0857b6..6cd9da662ae1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -722,7 +722,7 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
dev_info(&pf->pdev->dev, " num MDD=%lld\n",
- vf->num_mdd_events);
+ vf->mdd_tx_events.count + vf->mdd_rx_events.count);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bce5b76f1e7a..8a7a83f83ee5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -459,6 +459,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
I40E_PRIV_FLAG("vf-vlan-pruning",
I40E_FLAG_VF_VLAN_PRUNING_ENA, 0),
+ I40E_PRIV_FLAG("mdd-auto-reset-vf",
+ I40E_FLAG_MDD_AUTO_RESET_VF, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 0e1d9e2fbf38..65a702668e21 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1666,9 +1666,8 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
- * This function should be called instead of i40e_del_filter only if you know
- * the exact filter you will remove already, such as via i40e_find_filter or
- * i40e_find_mac.
+ * This function requires you've found * the exact filter you will remove
+ * already, such as via i40e_find_filter or i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
@@ -1698,29 +1697,6 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
/**
- * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
- * @vsi: the VSI to be searched
- * @macaddr: the MAC address
- * @vlan: the VLAN
- *
- * NOTE: This function is expected to be called with mac_filter_hash_lock
- * being held.
- * ANOTHER NOTE: This function MUST be called from within the context of
- * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
- * instead of list_for_each_entry().
- **/
-void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
-{
- struct i40e_mac_filter *f;
-
- if (!vsi || !macaddr)
- return;
-
- f = i40e_find_filter(vsi, macaddr, vlan);
- __i40e_del_filter(vsi, f);
-}
-
-/**
* i40e_add_mac_filter - Add a MAC filter for all active VLANs
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
@@ -9629,19 +9605,6 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
}
/**
- * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
- * @pf: board private structure
- **/
-u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
-{
- u32 val, fcnt_prog;
-
- val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
- fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
- return fcnt_prog;
-}
-
-/**
* i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
@@ -11217,6 +11180,67 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
}
/**
+ * i40e_print_vf_mdd_event - print VF Tx/Rx malicious driver detect event
+ * @pf: board private structure
+ * @vf: pointer to the VF structure
+ * @is_tx: true - for Tx event, false - for Rx
+ */
+static void i40e_print_vf_mdd_event(struct i40e_pf *pf, struct i40e_vf *vf,
+ bool is_tx)
+{
+ dev_err(&pf->pdev->dev, is_tx ?
+ "%lld Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n" :
+ "%lld Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pm. mdd-auto-reset-vfs=%s\n",
+ is_tx ? vf->mdd_tx_events.count : vf->mdd_rx_events.count,
+ pf->hw.pf_id,
+ vf->vf_id,
+ vf->default_lan_addr.addr,
+ str_on_off(test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)));
+}
+
+/**
+ * i40e_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
+ *
+ * Called from i40e_handle_mdd_event to rate limit and print VFs MDD events.
+ */
+static void i40e_print_vfs_mdd_events(struct i40e_pf *pf)
+{
+ unsigned int i;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ if (!__ratelimit(&pf->mdd_message_rate_limit))
+ return;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct i40e_vf *vf = &pf->vf[i];
+ bool is_printed = false;
+
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed = vf->mdd_rx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, false);
+ is_printed = true;
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count;
+ i40e_print_vf_mdd_event(pf, vf, true);
+ is_printed = true;
+ }
+
+ if (is_printed && !test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF #%d\n",
+ i);
+ }
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the PF structure
*
@@ -11230,8 +11254,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u32 reg;
int i;
- if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__I40E_MDD_EVENT_PENDING, pf->state)) {
+ /* Since the VF MDD event logging is rate limited, check if
+ * there are pending MDD events.
+ */
+ i40e_print_vfs_mdd_events(pf);
return;
+ }
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
@@ -11275,36 +11304,48 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* see if one of the VFs needs its hand slapped */
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ bool is_mdd_on_tx = false;
+ bool is_mdd_on_rx = false;
+
vf = &(pf->vf[i]);
reg = rd32(hw, I40E_VP_MDET_TX(i));
if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_tx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_tx = true;
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ set_bit(__I40E_MDD_VF_PRINT_PENDING, pf->state);
wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
- dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
- i);
- dev_info(&pf->pdev->dev,
- "Use PF Control I/F to re-enable the VF\n");
+ vf->mdd_rx_events.count++;
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
+ is_mdd_on_rx = true;
+ }
+
+ if ((is_mdd_on_tx || is_mdd_on_rx) &&
+ test_bit(I40E_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
+ /* VF MDD event counters will be cleared by
+ * reset, so print the event prior to reset.
+ */
+ if (is_mdd_on_rx)
+ i40e_print_vf_mdd_event(pf, vf, false);
+ if (is_mdd_on_tx)
+ i40e_print_vf_mdd_event(pf, vf, true);
+
+ i40e_vc_reset_vf(vf, true);
}
}
- /* re-enable mdd interrupt cause */
- clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
i40e_flush(hw);
+
+ i40e_print_vfs_mdd_events(pf);
}
/**
@@ -12614,89 +12655,6 @@ int i40e_set_partition_bw_setting(struct i40e_pf *pf)
}
/**
- * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
- * @pf: board private structure
- **/
-int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
-{
- /* Commit temporary BW setting to permanent NVM image */
- enum i40e_admin_queue_err last_aq_status;
- u16 nvm_word;
- int ret;
-
- if (pf->hw.partition_id != 1) {
- dev_info(&pf->pdev->dev,
- "Commit BW only works on partition 1! This is partition %d",
- pf->hw.partition_id);
- ret = -EOPNOTSUPP;
- goto bw_commit_out;
- }
-
- /* Acquire NVM for read access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Read word 0x10 of NVM - SW compatibility word 1 */
- ret = i40e_aq_read_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word), &nvm_word,
- false, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
-
- /* Wait a bit for NVM release to complete */
- msleep(50);
-
- /* Acquire NVM for write access */
- ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
- last_aq_status = pf->hw.aq.asq_last_status;
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
- goto bw_commit_out;
- }
- /* Write it back out unchanged to initiate update NVM,
- * which will force a write of the shadow (alt) RAM to
- * the NVM - thus storing the bandwidth values permanently.
- */
- ret = i40e_aq_update_nvm(&pf->hw,
- I40E_SR_NVM_CONTROL_WORD,
- 0x10, sizeof(nvm_word),
- &nvm_word, true, 0, NULL);
- /* Save off last admin queue command status before releasing
- * the NVM
- */
- last_aq_status = pf->hw.aq.asq_last_status;
- i40e_release_nvm(&pf->hw);
- if (ret)
- dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %pe aq_err %s\n",
- ERR_PTR(ret),
- i40e_aq_str(&pf->hw, last_aq_status));
-bw_commit_out:
-
- return ret;
-}
-
-/**
* i40e_is_total_port_shutdown_enabled - read NVM and return value
* if total port shutdown feature is enabled for this PF
* @pf: board private structure
@@ -15998,6 +15956,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* VF MDD event logs are rate limited to one second intervals */
+ ratelimit_state_init(&pf->mdd_message_rate_limit, 1 * HZ, 1);
+
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
* a reset in the case of small MSS+TSO.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5a0699ca7ce5..099bb8ab7d70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -27,13 +27,6 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
int
-i40e_asq_send_command_v2(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details,
- enum i40e_admin_queue_err *aq_status);
-int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -72,8 +65,6 @@ int i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
/* admin send queue commands */
@@ -141,9 +132,6 @@ int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -176,14 +164,6 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
@@ -220,9 +200,6 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
-int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
@@ -234,9 +211,6 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-int
-i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
- struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
bool persist,
struct i40e_asq_cmd_details *cmd_details);
@@ -458,13 +432,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
int i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
-int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-int i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
u16 buff_size, u32 track_id,
u32 *error_offset, u32 *error_info,
@@ -477,20 +445,12 @@ int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
-struct i40e_profile_section_header *
-i40e_find_section_in_profile(u32 section_type,
- struct i40e_profile_segment *profile);
int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-int
-i40e_add_pinfo_to_list(struct i40e_hw *hw,
- struct i40e_profile_segment *profile,
- u8 *profile_info_sec, u32 track_id);
-
/* i40e_ddp */
int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index dfa785e39458..1120f8e4bb67 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -216,7 +216,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
* @notify_vf: notify vf about reset or not
* Reset VF handler.
**/
-static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 66f95e2f3146..5cf74f16f433 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -64,6 +64,12 @@ struct i40evf_channel {
u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
};
+struct i40e_mdd_vf_events {
+ u64 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u64 last_printed;
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -92,7 +98,9 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
- u64 num_mdd_events; /* num of mdd events detected */
+ /* num of mdd tx and rx events detected */
+ struct i40e_mdd_vf_events mdd_rx_events;
+ struct i40e_mdd_vf_events mdd_tx_events;
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -120,6 +128,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf);
bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4e885df789ef..e28f1905a4a0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -395,32 +395,6 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
WARN_ON_ONCE(1);
}
-static int
-i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
@@ -486,8 +460,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!first)
first = bi;
- else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
+ else if (!xsk_buff_add_frag(first, bi)) {
+ xsk_buff_free(first);
break;
+ }
if (++next_to_process == count)
next_to_process = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index a9e54866ae6b..cbfaaa5b7d02 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1180,7 +1180,7 @@ static void iavf_napi_enable_all(struct iavf_adapter *adapter)
q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi;
- napi_enable(napi);
+ napi_enable_locked(napi);
}
}
@@ -1196,7 +1196,7 @@ static void iavf_napi_disable_all(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
- napi_disable(&q_vector->napi);
+ napi_disable_locked(&q_vector->napi);
}
}
@@ -1800,8 +1800,8 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll);
}
return 0;
@@ -1827,7 +1827,7 @@ static void iavf_free_q_vectors(struct iavf_adapter *adapter)
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
+ netif_napi_del_locked(&q_vector->napi);
}
kfree(adapter->q_vectors);
adapter->q_vectors = NULL;
@@ -1968,6 +1968,7 @@ err:
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
+ bool netdev_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -1976,7 +1977,7 @@ static void iavf_finish_config(struct work_struct *work)
* The dev->lock is needed to update the queue number
*/
rtnl_lock();
- mutex_lock(&adapter->netdev->lock);
+ netdev_lock(adapter->netdev);
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
@@ -1988,7 +1989,16 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
+ /* Set the real number of queues when reset occurs while
+ * state == __IAVF_DOWN
+ */
+ pairs = adapter->num_active_queues;
+ netif_set_real_num_rx_queues(adapter->netdev, pairs);
+ netif_set_real_num_tx_queues(adapter->netdev, pairs);
+
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ netdev_unlock(adapter->netdev);
+ netdev_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
@@ -2003,11 +2013,7 @@ static void iavf_finish_config(struct work_struct *work)
goto out;
}
}
-
- /* Set the real number of queues when reset occurs while
- * state == __IAVF_DOWN
- */
- fallthrough;
+ break;
case __IAVF_RUNNING:
pairs = adapter->num_active_queues;
netif_set_real_num_rx_queues(adapter->netdev, pairs);
@@ -2020,7 +2026,8 @@ static void iavf_finish_config(struct work_struct *work)
out:
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&adapter->netdev->lock);
+ if (!netdev_released)
+ netdev_unlock(adapter->netdev);
rtnl_unlock();
}
@@ -2713,12 +2720,16 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_adapter *adapter = container_of(work,
struct iavf_adapter,
watchdog_task.work);
+ struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
+ netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
- if (adapter->state == __IAVF_REMOVE)
+ if (adapter->state == __IAVF_REMOVE) {
+ netdev_unlock(netdev);
return;
+ }
goto restart_watchdog;
}
@@ -2730,30 +2741,35 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_EXTENDED_CAPS:
iavf_init_process_extended_caps(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_CONFIG_ADAPTER:
iavf_init_config_adapter(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
@@ -2765,6 +2781,7 @@ static void iavf_watchdog_task(struct work_struct *work)
* as it can loop forever
*/
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
@@ -2773,6 +2790,7 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
@@ -2780,6 +2798,7 @@ static void iavf_watchdog_task(struct work_struct *work)
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
@@ -2792,6 +2811,7 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_change_state(adapter, __IAVF_INIT_FAILED);
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
@@ -2811,12 +2831,14 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
HZ * 2);
return;
@@ -2847,6 +2869,7 @@ static void iavf_watchdog_task(struct work_struct *work)
case __IAVF_REMOVE:
default:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return;
}
@@ -2858,12 +2881,14 @@ static void iavf_watchdog_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
}
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
restart_watchdog:
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
@@ -2990,12 +3015,12 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- mutex_lock(&netdev->lock);
+ netdev_lock(netdev);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
queue_work(adapter->wq, &adapter->reset_task);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
}
@@ -3043,7 +3068,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3184,7 +3209,7 @@ continue_reset:
wake_up(&adapter->reset_waitqueue);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return;
reset_err:
@@ -3195,7 +3220,7 @@ reset_err:
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3667,10 +3692,10 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- mutex_lock(&netdev->lock);
+ netdev_lock(netdev);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- mutex_unlock(&netdev->lock);
+ netdev_unlock(netdev);
return ret;
}
@@ -4340,14 +4365,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
+ netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
+ netdev_unlock(netdev);
return -EBUSY;
+ }
usleep_range(500, 1000);
}
@@ -4396,6 +4424,7 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
@@ -4408,6 +4437,7 @@ err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return err;
}
@@ -4429,10 +4459,12 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
}
@@ -4466,6 +4498,7 @@ static int iavf_close(struct net_device *netdev)
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
/* We explicitly don't free resources here because the hardware is
* still active and can DMA into memory. Resources are cleared in
@@ -5342,6 +5375,7 @@ static int iavf_suspend(struct device *dev_d)
netif_device_detach(netdev);
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
if (netif_running(netdev)) {
@@ -5353,6 +5387,7 @@ static int iavf_suspend(struct device *dev_d)
iavf_reset_interrupt_capability(adapter);
mutex_unlock(&adapter->crit_lock);
+ netdev_unlock(netdev);
return 0;
}
@@ -5451,6 +5486,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
iavf_change_state(adapter, __IAVF_REMOVE);
@@ -5487,6 +5523,7 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&hw->aq.asq_mutex);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
+ netdev_unlock(netdev);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 3307d551f431..9e0d9f710441 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -32,7 +32,8 @@ ice-y := ice_main.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
- devlink/devlink_port.o \
+ devlink/health.o \
+ devlink/port.o \
ice_sf_eth.o \
ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..d1b9ccec5e05 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,7 +6,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
@@ -1207,9 +1207,15 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
struct ice_vsi *vsi = ice_get_main_vsi(pf);
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
vsi->flags = ICE_VSI_FLAG_INIT;
@@ -1232,6 +1238,8 @@ err_load:
rtnl_unlock();
err_vsi_cfg:
ice_deinit_dev(pf);
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..ea40f7941259
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char *const ice_common_port_solutions =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char *const ice_port_number_label = "Port Number";
+static const char *const ice_update_nvm_solution = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ count = le16_to_cpu(event->desc.params.get_health_status.health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+ const u64 graceful_period = 0;
+
+ rep = devl_health_reporter_create(devlink, ops, graceful_period, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index c6779d9dffff..767419a67fef 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_lib.h"
#include "ice_fltr.h"
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f5d6f974185..71e05d30f0fd 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -78,6 +78,7 @@
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -665,6 +666,7 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
u8 num_quanta_prof_used;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 46f9726d9a8a..d28ced107e34 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -12,6 +12,13 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -2085,10 +2092,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2511,6 +2518,87 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
+};
+
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
+ */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
+};
+
/* Set FW Logging configuration (indirect 0xFF30)
* Register for FW Logging (indirect 0xFF31)
* Query FW Logging (indirect 0xFF32)
@@ -2651,6 +2739,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_set_health_status_cfg set_health_status_cfg;
+ struct ice_aqc_get_health_status get_health_status;
struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
@@ -2853,6 +2943,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 82a9cd4ec7ae..b2af8e3586f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -454,6 +454,9 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
* metadata and flags to allow redirecting to PR netdev
*/
@@ -910,8 +913,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 532024f34ce4..7a2a2e8da8fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -308,6 +309,42 @@ bool ice_is_e825c(struct ice_hw *hw)
}
/**
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
+ *
+ * Return: true if the device has c827 phy.
+ */
+static bool ice_is_pf_c827(struct ice_hw *hw)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
+ return true;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return false;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
+ return true;
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -1025,6 +1062,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1173,8 +1237,19 @@ int ice_init_hw(struct ice_hw *hw)
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
- return 0;
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1360,39 +1435,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
-
- if (!ice_rxq_ctx)
- return -EINVAL;
-
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
-
/* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1413,35 +1480,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1470,10 +1552,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -2547,6 +2641,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2569,6 +2664,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2709,40 +2806,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -4609,205 +4672,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
-/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_qword, dest, sizeof(dest_qword));
-
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
-}
-
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
* @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
@@ -6083,6 +5947,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_health_status_cfg;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index fe6f88cfd948..15ba38543738 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -92,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
@@ -113,7 +112,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -143,6 +141,8 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
u8 serdes_num, int *output);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index ac7db100e2cd..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,7 +5,7 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 611577ebc29d..1479b45738af 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -371,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -401,29 +393,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -551,18 +529,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -573,7 +545,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -582,7 +554,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -590,7 +562,6 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 89fa3d53d317..8fd522093dfe 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,7 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
@@ -1567,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1816,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1829,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1842,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -2364,9 +2373,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -4741,55 +4752,12 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
-{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
@@ -4810,7 +4778,7 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
+ return err;
}
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
@@ -4834,7 +4802,7 @@ int ice_init_dev(struct ice_pf *pf)
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_interrupt_scheme;
+ goto unroll_pf_init;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4845,17 +4813,15 @@ int ice_init_dev(struct ice_pf *pf)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
+ goto unroll_irq_scheme_init;
}
return 0;
-err_req_irq_msix_misc:
+unroll_irq_scheme_init:
ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
+unroll_pf_init:
ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
return err;
}
@@ -5087,6 +5053,7 @@ static int ice_init_devlink(struct ice_pf *pf)
return err;
ice_devlink_init_regions(pf);
+ ice_health_init(pf);
ice_devlink_register(pf);
return 0;
@@ -5095,6 +5062,7 @@ static int ice_init_devlink(struct ice_pf *pf)
static void ice_deinit_devlink(struct ice_pf *pf)
{
ice_devlink_unregister(pf);
+ ice_health_deinit(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
}
@@ -5312,13 +5280,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
- adapter = ice_adapter_get(pdev);
- if (IS_ERR(adapter))
- return PTR_ERR(adapter);
-
pf->pdev = pdev;
- pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5346,29 +5308,44 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_adapter;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
+unroll_load:
ice_unload(pf);
-err_load:
+unroll_init:
devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
-err_init:
+unroll_adapter:
ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
return err;
}
@@ -7793,6 +7770,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -8283,16 +8262,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index efd770dfec44..c27f28c1cf04 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -16,28 +16,28 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
- /* name, gpio */
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
};
static const char ice_pin_names_nvm[][64] = {
@@ -49,12 +49,12 @@ static const char ice_pin_names_nvm[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
- /* name, gpio */
- { GNSS, { 1, -1 }},
- { SMA1, { 1, 0 }},
- { UFL1, { -1, 0 }},
- { SMA2, { 3, 2 }},
- { UFL2, { 3, -1 }},
+ /* name, gpio, delay */
+ { GNSS, { 1, -1 }, { 0, 0 }},
+ { SMA1, { 1, 0 }, { 0, 1 }},
+ { UFL1, { -1, 0 }, { 0, 1 }},
+ { SMA2, { 3, 2 }, { 0, 1 }},
+ { UFL2, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -464,7 +464,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -473,6 +475,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -489,11 +492,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -504,35 +513,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -1558,18 +1584,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
@@ -1764,9 +1801,9 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
+ unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
- unsigned int gpio_pin;
int pin_desc_idx;
if (rq->flags & ~PTP_PEROUT_PHASE)
@@ -1777,6 +1814,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@@ -1808,11 +1846,11 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
* at the next multiple of period, maintaining phase.
*/
clk = ice_ptp_read_src_clk_reg(pf, NULL);
- if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
- start -= ice_prop_delay(hw);
+ start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index c490d98fd9c6..a1d0e988c084 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -211,6 +211,7 @@ enum ice_ptp_pin_nvm {
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@@ -219,6 +220,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
+ unsigned int delay[2];
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index a8e57cf05a9c..ac46d1183300 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -341,8 +341,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
@@ -351,8 +349,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
@@ -361,8 +357,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
@@ -371,8 +365,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
/* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
@@ -381,8 +373,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
/* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
@@ -391,8 +381,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 02e84f5b1d45..ec91822e9280 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -391,7 +391,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -469,7 +469,7 @@ static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw24.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -546,7 +546,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -651,7 +651,7 @@ static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
/* Log the current clock configuration */
ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
+ str_enabled_disabled(dw24.ts_pll_enable),
ice_clk_src_str(dw23.time_ref_sel),
ice_clk_freq_str(dw9.time_ref_freq_sel),
ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
@@ -4876,33 +4876,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -5085,6 +5098,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -5102,6 +5164,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -5125,6 +5190,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -5139,6 +5254,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -5423,6 +5541,8 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
}
/* Device agnostic functions
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1cee0f1bba2d..6779ce120515 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -326,8 +324,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define ICE_E810_OUT_PROP_DELAY_NS 1
-#define ICE_E825C_OUT_PROP_DELAY_NS 11
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -389,11 +385,6 @@ static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
return e82x_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
-{
- return e82x_time_ref[time_ref].pps_delay;
-}
-
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@@ -432,20 +423,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
-static inline u64 ice_prop_delay(const struct ice_hw *hw)
-{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_E825C_OUT_PROP_DELAY_NS;
- case ICE_PHY_E810:
- return ICE_E810_OUT_PROP_DELAY_NS;
- case ICE_PHY_E82X:
- return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
- default:
- return 0;
- }
-}
-
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -689,11 +666,18 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 970a99a52bf1..fb7a1b9a4313 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 75d7147e1c01..1a2c94375ca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -5,8 +5,8 @@
#include "ice_txrx.h"
#include "ice_fltr.h"
#include "ice_sf_eth.h"
-#include "devlink/devlink_port.h"
#include "devlink/devlink.h"
+#include "devlink/port.h"
static const struct net_device_ops ice_sf_netdev_ops = {
.ndo_open = ice_open,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 0e740342e294..4a91e0aaf0a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4784,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4a9ef722635f..33a1a5934c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -18,6 +18,7 @@
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
+#include <linux/wait.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -368,6 +369,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -382,6 +384,7 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -848,6 +851,14 @@ struct ice_mbx_data {
#define ICE_PORTS_PER_QUAD 4
#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
struct ice_eth56g_params {
u8 num_phys;
bool onestep_ena;
@@ -856,6 +867,7 @@ struct ice_eth56g_params {
};
union ice_phy_params {
+ struct ice_e810_params e810;
struct ice_eth56g_params eth56g;
};
@@ -1214,4 +1226,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 334ae945d640..8975d2971bc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -801,35 +801,6 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -895,7 +866,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
break;
}
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 463c0d26b9d4..6c1b702fd992 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o igb_xsk.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 3c2dc7bdebb5..02f340280d20 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -18,8 +18,10 @@
#include <linux/i2c-algo-bit.h>
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <linux/lockdep.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
struct igb_adapter;
@@ -86,6 +88,7 @@ struct igb_adapter;
#define IGB_XDP_CONSUMED BIT(0)
#define IGB_XDP_TX BIT(1)
#define IGB_XDP_REDIR BIT(2)
+#define IGB_XDP_EXIT BIT(3)
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -255,6 +258,7 @@ enum igb_tx_flags {
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
+ IGB_TYPE_XSK
};
/* wrapper around a pointer to a socket buffer,
@@ -320,6 +324,7 @@ struct igb_ring {
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
+ struct xdp_buff **rx_buffer_info_zc;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
@@ -357,6 +362,7 @@ struct igb_ring {
};
};
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -384,7 +390,8 @@ enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+ IGB_RING_FLAG_TX_DISABLED
};
#define ring_uses_large_buffer(ring) \
@@ -731,12 +738,21 @@ int igb_setup_tx_resources(struct igb_ring *);
int igb_setup_rx_resources(struct igb_ring *);
void igb_free_tx_resources(struct igb_ring *);
void igb_free_rx_resources(struct igb_ring *);
+void igb_clean_tx_ring(struct igb_ring *tx_ring);
+void igb_clean_rx_ring(struct igb_ring *rx_ring);
void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status);
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes);
void igb_setup_tctl(struct igb_adapter *);
void igb_setup_rctl(struct igb_adapter *);
void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp);
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb);
void igb_alloc_rx_buffers(struct igb_ring *, u16);
void igb_update_stats(struct igb_adapter *);
bool igb_has_link(struct igb_adapter *adapter);
@@ -797,6 +813,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
+/* This function assumes __netif_tx_lock is held by the caller. */
+static inline void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter)
+{
+ return !!READ_ONCE(adapter->xdp_prog);
+}
+
int igb_add_filter(struct igb_adapter *adapter,
struct igb_nfc_filter *input);
int igb_erase_filter(struct igb_adapter *adapter,
@@ -807,4 +850,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter,
int igb_del_mac_steering_filter(struct igb_adapter *adapter,
const u8 *addr, u8 queue, u8 flags);
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring);
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid);
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget);
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool);
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 288a4bb2683a..d368b753a467 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,7 +33,6 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
-#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -116,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
-static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(struct timer_list *);
static void igb_watchdog(struct timer_list *);
@@ -475,12 +472,17 @@ rx_ring_summary:
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
+ dma_addr_t dma = (dma_addr_t)0;
+ struct igb_rx_buffer *buffer_info = NULL;
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ if (!rx_ring->xsk_pool) {
+ buffer_info = &rx_ring->rx_buffer_info[i];
+ dma = buffer_info->dma;
+ }
+
if (i == rx_ring->next_to_use)
next_desc = " NTU";
else if (i == rx_ring->next_to_clean)
@@ -500,11 +502,11 @@ rx_ring_summary:
"R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
+ (u64)dma,
next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
+ buffer_info && dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
@@ -1990,7 +1992,11 @@ static void igb_configure(struct igb_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
- igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+ if (ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(ring, ring->xsk_pool,
+ igb_desc_unused(ring));
+ else
+ igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
}
}
@@ -2911,37 +2917,20 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp);
+ case XDP_SETUP_XSK_POOL:
+ return igb_xsk_pool_setup(adapter, xdp->xsk.pool,
+ xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
-/* This function assumes __netif_tx_lock is held by the caller. */
-static void igb_xdp_ring_update_tail(struct igb_ring *ring)
-{
- lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
-
- /* Force memory writes to complete before letting h/w know there
- * are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
-}
-
-static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
-{
- unsigned int r_idx = smp_processor_id();
-
- if (r_idx >= adapter->num_tx_queues)
- r_idx = r_idx % adapter->num_tx_queues;
-
- return adapter->tx_ring[r_idx];
-}
-
-static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
@@ -2955,7 +2944,8 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return IGB_XDP_CONSUMED;
@@ -2988,10 +2978,14 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ tx_ring = igb_xdp_is_enabled(adapter) ?
+ igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return -ENXIO;
+
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
@@ -3042,6 +3036,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_setup_tc = igb_setup_tc,
.ndo_bpf = igb_xdp,
.ndo_xdp_xmit = igb_xdp_xmit,
+ .ndo_xsk_wakeup = igb_xsk_wakeup,
};
/**
@@ -3338,7 +3333,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4364,6 +4360,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
@@ -4424,7 +4422,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0);
+ rx_ring->queue_index,
+ rx_ring->q_vector->napi.napi_id);
if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index);
@@ -4720,12 +4719,17 @@ void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
struct e1000_hw *hw = &adapter->hw;
int reg_idx = ring->reg_idx;
u32 srrctl = 0;
+ u32 buf_size;
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- if (ring_uses_large_buffer(ring))
- srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring->xsk_pool)
+ buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ else if (ring_uses_large_buffer(ring))
+ buf_size = IGB_RXBUFFER_3072;
else
- srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ buf_size = IGB_RXBUFFER_2048;
+
+ srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ srrctl |= buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -4757,8 +4761,17 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
u32 rxdctl = 0;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL));
+ WRITE_ONCE(ring->xsk_pool, igb_xsk_pool(adapter, ring));
+ if (ring->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL));
+ }
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4785,9 +4798,12 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
- /* initialize rx_buffer_info */
- memset(ring->rx_buffer_info, 0,
- sizeof(struct igb_rx_buffer) * ring->count);
+ if (ring->xsk_pool)
+ memset(ring->rx_buffer_info_zc, 0,
+ sizeof(*ring->rx_buffer_info_zc) * ring->count);
+ else
+ memset(ring->rx_buffer_info, 0,
+ sizeof(*ring->rx_buffer_info) * ring->count);
/* initialize Rx descriptor 0 */
rx_desc = IGB_RX_DESC(ring, 0);
@@ -4888,19 +4904,24 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
* igb_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
-static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
u16 i = tx_ring->next_to_clean;
struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+ u32 xsk_frames = 0;
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs or xdp frames */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
dev_kfree_skb_any(tx_buffer->skb);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -4931,6 +4952,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+skip_for_xsk:
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
@@ -4945,6 +4967,9 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
/* reset BQL for queue */
netdev_tx_reset_queue(txring_txq(tx_ring));
+ if (tx_ring->xsk_pool && xsk_frames)
+ xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
+
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -4975,8 +5000,13 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = NULL;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
+ if (rx_ring->xsk_pool) {
+ vfree(rx_ring->rx_buffer_info_zc);
+ rx_ring->rx_buffer_info_zc = NULL;
+ } else {
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ }
/* if not set, then don't free */
if (!rx_ring->desc)
@@ -5007,13 +5037,18 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
* igb_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
+ if (rx_ring->xsk_pool) {
+ igb_clean_rx_ring_zc(rx_ring);
+ goto skip_for_xsk;
+ }
+
/* Free all the Rx ring sk_buffs */
while (i != rx_ring->next_to_alloc) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
@@ -5041,6 +5076,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
i = 0;
}
+skip_for_xsk:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -6467,6 +6503,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
+ return NETDEV_TX_BUSY;
+
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGB_TYPE_SKB;
@@ -6622,7 +6661,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
- if (adapter->xdp_prog) {
+ if (igb_xdp_is_enabled(adapter)) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -8195,6 +8234,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
struct igb_q_vector *q_vector = container_of(napi,
struct igb_q_vector,
napi);
+ struct xsk_buff_pool *xsk_pool;
bool clean_complete = true;
int work_done = 0;
@@ -8206,7 +8246,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
- int cleaned = igb_clean_rx_irq(q_vector, budget);
+ int cleaned;
+
+ xsk_pool = READ_ONCE(q_vector->rx.ring->xsk_pool);
+ cleaned = xsk_pool ?
+ igb_clean_rx_irq_zc(q_vector, xsk_pool, budget) :
+ igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
if (cleaned >= budget)
@@ -8235,13 +8280,18 @@ static int igb_poll(struct napi_struct *napi, int budget)
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
- struct igb_adapter *adapter = q_vector->adapter;
- struct igb_ring *tx_ring = q_vector->tx.ring;
- struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
unsigned int budget = q_vector->tx.work_limit;
+ struct igb_ring *tx_ring = q_vector->tx.ring;
unsigned int i = tx_ring->next_to_clean;
+ union e1000_adv_tx_desc *tx_desc;
+ struct igb_tx_buffer *tx_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ int cpu = smp_processor_id();
+ bool xsk_xmit_done = true;
+ struct netdev_queue *nq;
+ u32 xsk_frames = 0;
if (test_bit(__IGB_DOWN, &adapter->state))
return true;
@@ -8272,10 +8322,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- if (tx_buffer->type == IGB_TYPE_SKB)
+ if (tx_buffer->type == IGB_TYPE_SKB) {
napi_consume_skb(tx_buffer->skb, napi_budget);
- else
+ } else if (tx_buffer->type == IGB_TYPE_XDP) {
xdp_return_frame(tx_buffer->xdpf);
+ } else if (tx_buffer->type == IGB_TYPE_XSK) {
+ xsk_frames++;
+ goto skip_for_xsk;
+ }
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -8307,6 +8361,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
+skip_for_xsk:
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -8335,6 +8390,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+ xsk_pool = READ_ONCE(tx_ring->xsk_pool);
+ if (xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ txq_trans_cond_update(nq);
+ xsk_xmit_done = igb_xmit_zc(tx_ring, xsk_pool);
+ __netif_tx_unlock(nq);
+ }
+
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
@@ -8397,7 +8467,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
}
}
- return !!budget;
+ return !!budget && xsk_xmit_done;
}
/**
@@ -8588,9 +8658,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
-static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
- struct igb_ring *rx_ring,
- struct xdp_buff *xdp)
+static int igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IGB_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -8630,7 +8699,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
@@ -8756,10 +8825,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8786,9 +8851,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
* order to populate the hash, checksum, VLAN, timestamp, protocol, and
* other fields within the skb.
**/
-static void igb_process_skb_fields(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
@@ -8870,6 +8935,38 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
rx_buffer->page = NULL;
}
+void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status)
+{
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+
+ if (status & IGB_XDP_REDIR)
+ xdp_do_flush();
+
+ if (status & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+ }
+}
+
+void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets,
+ unsigned int bytes)
+{
+ struct igb_ring *ring = q_vector->rx.ring;
+
+ u64_stats_update_begin(&ring->rx_syncp);
+ ring->rx_stats.packets += packets;
+ ring->rx_stats.bytes += bytes;
+ u64_stats_update_end(&ring->rx_syncp);
+
+ q_vector->rx.total_packets += packets;
+ q_vector->rx.total_bytes += bytes;
+}
+
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
@@ -8877,12 +8974,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
struct igb_ring *rx_ring = q_vector->rx.ring;
u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
- int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
- struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -8940,12 +9036,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
#endif
- skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = igb_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
xdp_xmit |= xdp_res;
igb_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -8964,7 +9058,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
&xdp, timestamp);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -8978,7 +9072,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igb_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -9001,24 +9095,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- if (xdp_xmit & IGB_XDP_REDIR)
- xdp_do_flush();
-
- if (xdp_xmit & IGB_XDP_TX) {
- struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
-
- nq = txring_txq(tx_ring);
- __netif_tx_lock(nq, cpu);
- igb_xdp_ring_update_tail(tx_ring);
- __netif_tx_unlock(nq);
- }
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
new file mode 100644
index 000000000000..157d43787fa0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Intel Corporation. */
+
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "e1000_hw.h"
+#include "igb.h"
+
+static int igb_realloc_rx_buffer_info(struct igb_ring *ring, bool pool_present)
+{
+ int size = pool_present ?
+ sizeof(*ring->rx_buffer_info_zc) * ring->count :
+ sizeof(*ring->rx_buffer_info) * ring->count;
+ void *buff_info = vmalloc(size);
+
+ if (!buff_info)
+ return -ENOMEM;
+
+ if (pool_present) {
+ vfree(ring->rx_buffer_info);
+ ring->rx_buffer_info = NULL;
+ ring->rx_buffer_info_zc = buff_info;
+ } else {
+ vfree(ring->rx_buffer_info_zc);
+ ring->rx_buffer_info_zc = NULL;
+ ring->rx_buffer_info = buff_info;
+ }
+
+ return 0;
+}
+
+static void igb_txrx_ring_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+ struct e1000_hw *hw = &adapter->hw;
+
+ set_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ wr32(E1000_TXDCTL(tx_ring->reg_idx), 0);
+ wr32(E1000_RXDCTL(rx_ring->reg_idx), 0);
+
+ synchronize_net();
+
+ /* Rx/Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
+ igb_clean_tx_ring(tx_ring);
+ igb_clean_rx_ring(rx_ring);
+
+ memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats));
+ memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats));
+}
+
+static void igb_txrx_ring_enable(struct igb_adapter *adapter, u16 qid)
+{
+ struct igb_ring *tx_ring = adapter->tx_ring[qid];
+ struct igb_ring *rx_ring = adapter->rx_ring[qid];
+
+ igb_configure_tx_ring(adapter, tx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+
+ synchronize_net();
+
+ clear_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags);
+
+ /* call igb_desc_unused which always leaves
+ * at least 1 descriptor unused to make sure
+ * next_to_use != next_to_clean
+ */
+ if (rx_ring->xsk_pool)
+ igb_alloc_rx_buffers_zc(rx_ring, rx_ring->xsk_pool,
+ igb_desc_unused(rx_ring));
+ else
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
+
+ /* Rx/Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+}
+
+struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter,
+ struct igb_ring *ring)
+{
+ int qid = ring->queue_index;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+
+ if (!igb_xdp_is_enabled(adapter))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+static int igb_xsk_pool_enable(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ if (qid >= adapter->num_rx_queues)
+ return -EINVAL;
+
+ if (qid >= netdev->real_num_rx_queues ||
+ qid >= netdev->real_num_tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IGB_RX_DMA_ATTR);
+ if (err)
+ return err;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, true);
+ if (!err) {
+ igb_txrx_ring_enable(adapter, qid);
+ /* Kick start the NAPI context so that receiving will start */
+ err = igb_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
+ }
+
+ if (err) {
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int igb_xsk_pool_disable(struct igb_adapter *adapter, u16 qid)
+{
+ struct xsk_buff_pool *pool;
+ struct igb_ring *rx_ring;
+ bool if_running;
+ int err;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ rx_ring = adapter->rx_ring[qid];
+ if_running = netif_running(adapter->netdev) && igb_xdp_is_enabled(adapter);
+ if (if_running)
+ igb_txrx_ring_disable(adapter, qid);
+
+ xsk_pool_dma_unmap(pool, IGB_RX_DMA_ATTR);
+
+ if (if_running) {
+ err = igb_realloc_rx_buffer_info(rx_ring, false);
+ if (err)
+ return err;
+
+ igb_txrx_ring_enable(adapter, qid);
+ }
+
+ return 0;
+}
+
+int igb_xsk_pool_setup(struct igb_adapter *adapter,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ return pool ? igb_xsk_pool_enable(adapter, pool, qid) :
+ igb_xsk_pool_disable(adapter, qid);
+}
+
+static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union e1000_adv_rx_desc *rx_desc, u16 count)
+{
+ dma_addr_t dma;
+ u16 buffs;
+ int i;
+
+ /* nothing to do */
+ if (!count)
+ return 0;
+
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
+ rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.upper.length = 0;
+
+ rx_desc++;
+ xdp++;
+ }
+
+ return buffs;
+}
+
+bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union e1000_adv_rx_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ xdp = &rx_ring->rx_buffer_info_zc[ntu];
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
+ rx_desc = IGB_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->rx_buffer_info_zc;
+ ntu = 0;
+ count -= nb_buffs_extra;
+ }
+
+ nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
+
+ /* clear the length for the next_to_use descriptor */
+ rx_desc = IGB_RX_DESC(rx_ring, ntu);
+ rx_desc->wb.upper.length = 0;
+
+exit:
+ if (rx_ring->next_to_use != ntu) {
+ rx_ring->next_to_use = ntu;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(ntu, rx_ring->tail);
+ }
+
+ return total_count == (nb_buffs + nb_buffs_extra);
+}
+
+void igb_clean_rx_ring_zc(struct igb_ring *rx_ring)
+{
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = rx_ring->rx_buffer_info_zc[ntc];
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
+ }
+}
+
+static struct sk_buff *igb_construct_skb_zc(struct igb_ring *rx_ring,
+ struct xdp_buff *xdp,
+ ktime_t timestamp)
+{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+
+ net_prefetch(xdp->data_meta);
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
+ if (unlikely(!skb))
+ return NULL;
+
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
+
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
+ return skb;
+}
+
+static int igb_run_xdp_zc(struct igb_adapter *adapter, struct igb_ring *rx_ring,
+ struct xdp_buff *xdp, struct xsk_buff_pool *xsk_pool,
+ struct bpf_prog *xdp_prog)
+{
+ int err, result = IGB_XDP_PASS;
+ u32 act;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
+ if (likely(act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ return IGB_XDP_REDIR;
+
+ if (xsk_uses_need_wakeup(xsk_pool) &&
+ err == -ENOBUFS)
+ result = IGB_XDP_EXIT;
+ else
+ result = IGB_XDP_CONSUMED;
+ goto out_failure;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+
+ return result;
+}
+
+int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
+ struct xsk_buff_pool *xsk_pool, const int budget)
+{
+ struct igb_adapter *adapter = q_vector->adapter;
+ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ u32 ntc = rx_ring->next_to_clean;
+ struct bpf_prog *xdp_prog;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
+ u16 entries_to_alloc;
+ struct sk_buff *skb;
+
+ /* xdp_prog cannot be NULL in the ZC path */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ while (likely(total_packets < budget)) {
+ union e1000_adv_rx_desc *rx_desc;
+ ktime_t timestamp = 0;
+ struct xdp_buff *xdp;
+ unsigned int size;
+ int xdp_res = 0;
+
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ xdp = rx_ring->rx_buffer_info_zc[ntc];
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ xdp->data,
+ &timestamp);
+
+ xdp->data += ts_hdr_len;
+ xdp->data_meta += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
+
+ xdp_res = igb_run_xdp_zc(adapter, rx_ring, xdp, xsk_pool,
+ xdp_prog);
+
+ if (xdp_res) {
+ if (likely(xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IGB_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IGB_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ }
+
+ total_packets++;
+ total_bytes += size;
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+ continue;
+ }
+
+ skb = igb_construct_skb_zc(rx_ring, xdp, timestamp);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ break;
+ }
+
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc == rx_ring->count)
+ ntc = 0;
+
+ if (eth_skb_pad(skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_packets++;
+ }
+
+ rx_ring->next_to_clean = ntc;
+
+ if (xdp_xmit)
+ igb_finalize_xdp(adapter, xdp_xmit);
+
+ igb_update_rx_stats(q_vector, total_packets, total_bytes);
+
+ entries_to_alloc = igb_desc_unused(rx_ring);
+ if (entries_to_alloc >= IGB_RX_BUFFER_WRITE)
+ failure |= !igb_alloc_rx_buffers_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
+
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xsk_pool);
+
+ return (int)total_packets;
+ }
+ return failure ? budget : (int)total_packets;
+}
+
+bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+{
+ unsigned int budget = igb_desc_unused(tx_ring);
+ u32 cmd_type, olinfo_status, nb_pkts, i = 0;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
+ union e1000_adv_tx_desc *tx_desc = NULL;
+ struct igb_tx_buffer *tx_buffer_info;
+ unsigned int total_bytes = 0;
+ dma_addr_t dma;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
+ return true;
+
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
+
+ while (nb_pkts-- > 0) {
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer_info->bytecount = descs[i].len;
+ tx_buffer_info->type = IGB_TYPE_XSK;
+ tx_buffer_info->xdpf = NULL;
+ tx_buffer_info->gso_segs = 1;
+ tx_buffer_info->time_stamp = jiffies;
+
+ tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
+
+ /* FIXME: This sets the Report Status (RS) bit for every
+ * descriptor. One nice to have optimization would be to set it
+ * only for the last descriptor in the whole batch. See Intel
+ * ice driver for an example on how to do it.
+ */
+ cmd_type |= descs[i].len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+
+ total_bytes += descs[i].len;
+
+ i++;
+ tx_ring->next_to_use++;
+ tx_buffer_info->next_to_watch = tx_desc;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ }
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return nb_pkts < budget;
+}
+
+int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
+ u32 eics = 0;
+
+ if (test_bit(__IGB_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igb_xdp_is_enabled(adapter))
+ return -EINVAL;
+
+ if (qid >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[qid];
+
+ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+ return -ENETDOWN;
+
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
+ /* Cause software interrupt */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ eics |= ring->q_vector->eims_value;
+ wr32(E1000_EICS, eics);
+ } else {
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index eac0f966e0e4..b8111ad9a9a8 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -337,6 +337,8 @@ struct igc_adapter {
struct igc_led_classdev *leds;
};
+void igc_set_queue_napi(struct igc_adapter *adapter, int q_idx,
+ struct napi_struct *napi);
void igc_up(struct igc_adapter *adapter);
void igc_down(struct igc_adapter *adapter);
int igc_open(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index d9d1a1a11daf..be8a49a86d09 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -279,9 +279,4 @@ struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
-
#endif /* _IGC_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 27872bdea9bd..56a35d58e7a6 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2123,10 +2123,6 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
@@ -2515,8 +2511,7 @@ out_failure:
}
}
-static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
- struct xdp_buff *xdp)
+static int igc_xdp_run_prog(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
@@ -2530,7 +2525,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
res = __igc_xdp_run_prog(adapter, prog, xdp);
out:
- return ERR_PTR(-res);
+ return res;
}
/* This function assumes __netif_tx_lock is held by the caller. */
@@ -2585,6 +2580,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
+ int xdp_res = 0;
while (likely(total_packets < budget)) {
struct igc_xdp_buff ctx = { .rx_ts = NULL };
@@ -2630,12 +2626,10 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_buff_clear_frags_flag(&ctx.xdp);
ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &ctx.xdp);
+ xdp_res = igc_xdp_run_prog(adapter, &ctx.xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
@@ -2657,7 +2651,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_failed++;
rx_buffer->pagecnt_bias++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &rx_ring->flags);
@@ -2672,7 +2666,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
continue;
/* verify the packet layout is correct */
- if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || igc_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -4948,6 +4942,22 @@ static int igc_sw_init(struct igc_adapter *adapter)
return 0;
}
+void igc_set_queue_napi(struct igc_adapter *adapter, int vector,
+ struct napi_struct *napi)
+{
+ struct igc_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->rx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->rx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ if (q_vector->tx.ring)
+ netif_queue_set_napi(adapter->netdev,
+ q_vector->tx.ring->queue_index,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
@@ -4955,6 +4965,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
void igc_up(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int i = 0;
/* hardware has been reset, we need to reload some things */
@@ -4962,8 +4973,11 @@ void igc_up(struct igc_adapter *adapter)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
if (adapter->msix_entries)
igc_configure_msix(adapter);
@@ -5192,6 +5206,7 @@ void igc_down(struct igc_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
if (adapter->q_vector[i]) {
napi_synchronize(&adapter->q_vector[i]->napi);
+ igc_set_queue_napi(adapter, i, NULL);
napi_disable(&adapter->q_vector[i]->napi);
}
}
@@ -5576,6 +5591,9 @@ static int igc_request_msix(struct igc_adapter *adapter)
q_vector);
if (err)
goto err_free;
+
+ netif_napi_set_irq(&q_vector->napi,
+ adapter->msix_entries[vector].vector);
}
igc_configure_msix(adapter);
@@ -6018,6 +6036,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
struct igc_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct napi_struct *napi;
int err = 0;
int i = 0;
@@ -6053,8 +6072,11 @@ static int __igc_open(struct net_device *netdev, bool resuming)
clear_bit(__IGC_DOWN, &adapter->state);
- for (i = 0; i < adapter->num_q_vectors; i++)
- napi_enable(&adapter->q_vector[i]->napi);
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ napi = &adapter->q_vector[i]->napi;
+ napi_enable(napi);
+ igc_set_queue_napi(adapter, i, napi);
+ }
/* Clear any pending interrupts. */
rd32(IGC_ICR);
@@ -6779,45 +6801,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_get_tstamp = igc_get_tstamp,
};
-/* PCIe configuration access */
-void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_read_config_word(adapter->pdev, reg, value);
-}
-
-void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- pci_write_config_word(adapter->pdev, reg, *value);
-}
-
-s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_read_word(adapter->pdev, reg, value);
-
- return IGC_SUCCESS;
-}
-
-s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
-{
- struct igc_adapter *adapter = hw->back;
-
- if (!pci_is_pcie(adapter->pdev))
- return -IGC_ERR_CONFIG;
-
- pcie_capability_write_word(adapter->pdev, reg, *value);
-
- return IGC_SUCCESS;
-}
-
u32 igc_rd32(struct igc_hw *hw, u32 reg)
{
struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
@@ -7338,7 +7321,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int igc_resume(struct device *dev)
+static int __igc_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7381,7 +7364,11 @@ static int igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
if (netif_running(netdev)) {
+ if (!rpm)
+ rtnl_lock();
err = __igc_open(netdev, true);
+ if (!rpm)
+ rtnl_unlock();
if (!err)
netif_device_attach(netdev);
}
@@ -7389,9 +7376,14 @@ static int igc_resume(struct device *dev)
return err;
}
+static int igc_resume(struct device *dev)
+{
+ return __igc_resume(dev, false);
+}
+
static int igc_runtime_resume(struct device *dev)
{
- return igc_resume(dev);
+ return __igc_resume(dev, true);
}
static int igc_suspend(struct device *dev)
@@ -7436,14 +7428,18 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct igc_adapter *adapter = netdev_priv(netdev);
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
igc_down(adapter);
pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7454,7 +7450,7 @@ static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igc_resume routine.
+ * resembles the first-half of the __igc_resume routine.
**/
static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
{
@@ -7493,7 +7489,7 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igc_resume routine.
+ * second-half of the __igc_resume routine.
*/
static void igc_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index 58f81aba0144..efd121c03967 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -36,56 +36,6 @@ static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
}
/**
- * igc_acquire_nvm - Generic request for access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Set the EEPROM access request bit and wait for EEPROM access grant bit.
- * Return successful if access grant bit set, else clear the request for
- * EEPROM access and return -IGC_ERR_NVM (-1).
- */
-s32 igc_acquire_nvm(struct igc_hw *hw)
-{
- s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
- u32 eecd = rd32(IGC_EECD);
- s32 ret_val = 0;
-
- wr32(IGC_EECD, eecd | IGC_EECD_REQ);
- eecd = rd32(IGC_EECD);
-
- while (timeout) {
- if (eecd & IGC_EECD_GNT)
- break;
- udelay(5);
- eecd = rd32(IGC_EECD);
- timeout--;
- }
-
- if (!timeout) {
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -IGC_ERR_NVM;
- }
-
- return ret_val;
-}
-
-/**
- * igc_release_nvm - Release exclusive access to EEPROM
- * @hw: pointer to the HW structure
- *
- * Stop any current commands to the EEPROM and clear the EEPROM request bit.
- */
-void igc_release_nvm(struct igc_hw *hw)
-{
- u32 eecd;
-
- eecd = rd32(IGC_EECD);
- eecd &= ~IGC_EECD_REQ;
- wr32(IGC_EECD, eecd);
-}
-
-/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.h b/drivers/net/ethernet/intel/igc/igc_nvm.h
index f9fc2e9cfb03..ab78d0c64547 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.h
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.h
@@ -4,8 +4,6 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
-s32 igc_acquire_nvm(struct igc_hw *hw);
-void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index e27af72aada8..13bbd3346e01 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -13,6 +13,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
+ bool need_update;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -22,7 +23,8 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
- if (if_running)
+ need_update = !!adapter->xdp_prog != !!prog;
+ if (if_running && need_update)
igc_close(dev);
old_prog = xchg(&adapter->xdp_prog, prog);
@@ -34,7 +36,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running)
+ if (if_running && need_update)
igc_open(dev);
return 0;
@@ -84,6 +86,7 @@ static int igc_xdp_enable_pool(struct igc_adapter *adapter,
napi_disable(napi);
}
+ igc_set_queue_napi(adapter, queue_id, NULL);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
@@ -133,6 +136,7 @@ static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
+ igc_set_queue_napi(adapter, queue_id, napi);
if (needs_reset) {
napi_enable(napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 965e5ce1b326..b456d102655a 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2018 Intel Corporation.
+# Copyright(c) 1999 - 2024 Intel Corporation.
#
# Makefile for the Intel(R) 10GbE PCI Express ethernet driver
#
@@ -9,7 +9,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ ixgbe_xsk.o ixgbe_e610.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 559b443c409f..e6a380d4929b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -20,6 +20,7 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
+#include "ixgbe_e610.h"
#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
@@ -173,6 +174,7 @@ enum ixgbe_tx_flags {
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define IXGBE_E610_VF_DEVICE_ID 0x57AD
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
{ \
@@ -654,6 +656,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
+#define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
@@ -661,6 +664,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
+#define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
+#define IXGBE_FLAG2_NO_MEDIA BIT(21)
+#define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
/* Tx fast path data */
int num_tx_queues;
@@ -793,6 +799,7 @@ struct ixgbe_adapter {
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
+ u16 lse_mask;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
@@ -849,6 +856,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -874,6 +882,7 @@ enum ixgbe_state_t {
__IXGBE_PTP_RUNNING,
__IXGBE_PTP_TX_IN_PROGRESS,
__IXGBE_RESET_REQUESTED,
+ __IXGBE_PHY_INIT_COMPLETE,
};
struct ixgbe_cb {
@@ -896,6 +905,7 @@ enum ixgbe_boards {
board_x550em_x_fw,
board_x550em_a,
board_x550em_a_fw,
+ board_e610,
};
extern const struct ixgbe_info ixgbe_82598_info;
@@ -906,6 +916,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
+extern const struct ixgbe_info ixgbe_e610_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index cdaf087b4e85..964988b4d58b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1615,6 +1615,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3be1bfb16498..7beaf6ea57f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -58,6 +58,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -88,6 +89,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -469,9 +472,14 @@ int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_e610) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
+ }
+
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
@@ -660,7 +668,11 @@ int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
+ if (hw->mac.type == ixgbe_mac_e610)
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS_E610);
+ else
+ link_status = ixgbe_read_pci_cfg_word(hw,
+ IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
@@ -2918,6 +2930,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_e610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return 1;
}
@@ -3366,7 +3382,8 @@ int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
- if ((hw->mac.type >= ixgbe_mac_X550) &&
+ if ((hw->mac.type >= ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_e610) &&
(links_reg & IXGBE_LINKS_SPEED_NON_STD))
*speed = IXGBE_LINK_SPEED_5GB_FULL;
else
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f2709b10c2e5..19d6b6fa8fb3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include <linux/dcbnl.h>
@@ -154,6 +154,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
for (j = 0; j < netdev->addr_len; j++, i++)
perm_addr[i] = adapter->hw.mac.san_addr[j];
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..683c668672d6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -0,0 +1,2658 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_type.h"
+#include "ixgbe_x540.h"
+#include "ixgbe_mbx.h"
+#include "ixgbe_phy.h"
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ * It may happen when CSR is busy during link state changes.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - 0 - success.
+ * * - -EIO - CSR mechanism is not enabled.
+ * * - -EBUSY - CSR mechanism is busy.
+ * * - -EINVAL - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - -ETIME - Admin Command X command timeout.
+ * * - -EIO - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
+ struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode, buf_tail_size = buf_size % 4;
+ u32 *raw_desc = (u32 *)desc;
+ u32 hicr, i, buf_tail = 0;
+ bool valid_buf = false;
+
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
+
+ if (!(hicr & IXGBE_PF_HICR_EN))
+ return -EIO;
+
+ if (hicr & IXGBE_PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ return -EBUSY;
+ }
+
+ opcode = le16_to_cpu(desc->opcode);
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
+ return -EINVAL;
+
+ if (buf)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && !buf_size) ||
+ (!buf && buf_size))
+ return -EINVAL;
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf) {
+ if (buf_tail_size)
+ memcpy(&buf_tail, buf + buf_size - buf_tail_size,
+ buf_tail_size);
+
+ if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
+ desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = cpu_to_le16(buf_size);
+
+ if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < buf_size / 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
+ if (buf_tail_size)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
+ ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
+
+#define MAX_SLEEP_RESP_US 1000
+#define MAX_TMOUT_RESP_SYNC_US 100000000
+
+ /* Wait for sync Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_SV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+#define MAX_TMOUT_RESP_ASYNC_US 150000000
+
+ /* Wait for async Admin Command response */
+ read_poll_timeout(IXGBE_READ_REG, hicr,
+ (hicr & IXGBE_PF_HICR_EV) ||
+ !(hicr & IXGBE_PF_HICR_C),
+ MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
+ IXGBE_PF_HICR);
+
+ /* Read sync Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
+ raw_desc[i] = raw_desc[i];
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & IXGBE_PF_HICR_C)
+ return -ETIME;
+
+ if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
+ return -EIO;
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != cpu_to_le16(opcode) &&
+ opcode != ixgbe_aci_opc_get_fw_event)
+ return -EIO;
+
+ if (desc->retval) {
+ hw->aci.last_status = (enum ixgbe_aci_err)
+ le16_to_cpu(desc->retval);
+ return -EIO;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf) {
+ for (i = 0; i < buf_size / 4; i++)
+ ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ if (buf_tail_size) {
+ buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
+ memcpy(buf + buf_size - buf_tail_size, &buf_tail,
+ buf_tail_size);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u16 opcode = le16_to_cpu(desc->opcode);
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ u8 idx = 0, *buf_cpy = NULL;
+ bool is_cmd_for_retry;
+ unsigned long timeout;
+ int err;
+
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ *buf_cpy = *(u8 *)buf;
+ }
+ desc_cpy = *desc;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
+ do {
+ mutex_lock(&hw->aci.lock);
+ err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ mutex_unlock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || !err ||
+ last_status != IXGBE_ACI_RC_EBUSY)
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ *desc = desc_cpy;
+
+ msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
+ time_before(jiffies, timeout));
+
+ kfree(buf_cpy);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+ u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!e || (!e->msg_buf && e->buf_len))
+ return -EINVAL;
+
+ mutex_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (err)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
+ err = -ENOENT;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending)
+ *pending = ixgbe_aci_check_event_pending(hw);
+
+aci_get_event_exit:
+ mutex_unlock(&hw->aci.lock);
+
+ return err;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* Zero out the desc. */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access,
+ u8 sdp_number, u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+ cmd_resp->timeout = cpu_to_le32(*timeout);
+ *timeout = 0;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_ids res, u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the -EALREADY is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout;
+ int err;
+
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of -EALREADY means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (err == -EALREADY)
+ return err;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (err && retry_timeout && res_timeout) {
+ msleep(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* Success - lock acquired.
+ * -EALREADY - lock free, no work to do.
+ */
+ if (!err || err == -EALREADY)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ int err;
+
+ err = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while (err == -ETIME &&
+ total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
+ usleep_range(1000, 1500);
+ err = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_e610_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = le32_to_cpu(elem->logical_id);
+ u32 phys_id = le32_to_cpu(elem->phys_id);
+ u32 number = le32_to_cpu(elem->number);
+ u16 cap = le16_to_cpu(elem->cap);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ default:
+ /* Not one of the recognized common capabilities */
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
+ "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->num_allocd_vfs = le32_to_cpu(cap->number);
+ func_p->vf_base_id = le32_to_cpu(cap->logical_id);
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+#define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
+ u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ return funcs ? (max / funcs) : 0;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_e610_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = le16_to_cpu(cap_resp[i].cap);
+
+ ixgbe_parse_e610_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return -ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of -ENOMEM means the buffer size is too small.
+ */
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = le32_to_cpu(cmd->count);
+
+ return err;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!err)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count;
+ u8 *cbuf;
+ int err;
+
+ cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!err)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ kfree(cbuf);
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ int err;
+
+ err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (err)
+ return err;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return -EINVAL;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+ if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+ hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ if (!cfg)
+ return -EINVAL;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.params.set_phy.lport_num = hw->bus.func;
+ desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
+
+ err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+ if (!err)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return err;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = hw->bus.func;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
+
+ /* Set node type. */
+ cmd->addr.topo_params.node_type_ctx |=
+ FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
+ if (ixgbe_is_media_cage_present(hw))
+ return ixgbe_media_type_aui;
+ fallthrough;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
+ case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li = &hw->link.link_info;
+
+ err = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (err)
+ return err;
+
+ if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ return 0;
+
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (!err)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ kfree(pcaps);
+
+ return err;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ if (!hw || !link_up)
+ return -EINVAL;
+
+ if (hw->link.get_link_info) {
+ int err = ixgbe_update_link_info(hw);
+
+ if (err)
+ return err;
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = {};
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ int err;
+
+ if (!hw)
+ return -EINVAL;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = hw->bus.func;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (err)
+ return err;
+
+ /* Save off old link status information. */
+ *li_old = *li;
+
+ /* Update current link status information. */
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* Update fc info. */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
+ IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* Save link status information. */
+ if (link)
+ *link = *li;
+
+ /* Flag cleared so calling functions don't call AQ again. */
+ hw->link.get_link_info = false;
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: true for enable lse, false otherwise
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ int err;
+
+ err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (err)
+ return err;
+
+ /* Enabling link status events generation by fw. */
+ return ixgbe_aci_get_link_info(hw, activate, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_e610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ int rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ int highest_bit;
+
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return ixgbe_media_type_unknown;
+
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
+ if (highest_bit) {
+ hw->link.link_info.phy_type_high =
+ BIT_ULL(highest_bit - 1);
+ hw->link.link_info.phy_type_low = 0;
+ } else {
+ highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
+ if (highest_bit)
+ hw->link.link_info.phy_type_low =
+ BIT_ULL(highest_bit - 1);
+ }
+ }
+
+ /* Based on link status or search above try to discover media type. */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_setup_link_e610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_e610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ int err;
+ u32 i;
+
+ if (!speed || !link_up)
+ return -EINVAL;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command.
+ */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && !(*link_up)) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msleep(100);
+ hw->link.get_link_info = true;
+ err = ixgbe_get_link_status(hw, link_up);
+ if (err)
+ return err;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW.
+ */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_get_link_capabilities_e610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return -EINVAL;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return -EINVAL;
+
+ switch (req_mode) {
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Clear the old pause settings. */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* Set the new capabilities. */
+ cfg->caps |= pause_mask;
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_fc_e610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
+ int err;
+
+ /* Get the current PHY config */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (err)
+ return err;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_fc_autoneg_e610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
+{
+ int err;
+
+ /* Get current link err.
+ * Current FC mode will be stored in the hw context.
+ */
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ goto no_autoneg;
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
+ goto no_autoneg;
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
+ goto no_autoneg;
+
+ hw->fc.fc_was_autonegged = true;
+ return;
+
+no_autoneg:
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_disable_rx_e610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ u32 pfdtxgswc;
+ int err;
+
+ if (!(rxctrl & IXGBE_RXCTRL_RXEN))
+ return;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ err = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (err) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+}
+
+/**
+ * ixgbe_init_phy_ops_e610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
+ else
+ phy->ops.set_phy_power = NULL;
+
+ /* Identify the PHY */
+ return phy->ops.identify(hw);
+}
+
+/**
+ * ixgbe_identify_phy_e610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type_low, phy_type_high;
+ int err;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
+ if (err)
+ return err;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (err)
+ return err;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+ phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+ phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+
+ if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+
+ return 0;
+}
+
+/**
+ * ixgbe_identify_module_e610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ int err;
+
+ err = ixgbe_update_link_info(hw);
+ if (err)
+ return err;
+
+ media_available =
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by
+ * ixgbe_update_link_info()
+ */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ u64 phy_type_low = 0, phy_type_high = 0;
+ int err;
+
+ err = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (err)
+ return err;
+
+ /* If media is not available get default config. */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (err)
+ return err;
+
+ sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
+ sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
+
+ /* Get Active configuration to avoid unintended changes. */
+ err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types. */
+ phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_low = cpu_to_le64(phy_type_low);
+ phy_type_high &= sup_phy_type_high;
+ pcfg.phy_type_high = cpu_to_le64(phy_type_high);
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_set_phy_power_e610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on)
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ else
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+
+ /* PHY is already in requested power mode. */
+ if (phy_caps.caps == phy_cfg.caps)
+ return 0;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_enter_lplu_e610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
+ int err;
+
+ err = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &phy_caps);
+ if (err)
+ return err;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+}
+
+/**
+ * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
+ * struct in order to set up EEPROM access.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type != ixgbe_eeprom_uninitialized)
+ return 0;
+
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
+
+ /* Switching to words (sr_size contains power of 2). */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
+ eeprom->word_size);
+
+ return 0;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return -EOPNOTSUPP;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return 0;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
+ if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_cmd_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return -EINVAL;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = cpu_to_le16(module_typeid);
+ cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = cpu_to_le16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ int err;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!err && cmd->checksum !=
+ cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ u16 data_local;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (err)
+ return err;
+
+ *data = data_local;
+ return 0;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns an error code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ int err;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u)))
+ return -EINVAL;
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = min_t(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (err)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return err;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
+ * ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2;
+ int err;
+
+ err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+ if (err)
+ return err;
+
+ *words = bytes / 2;
+
+ for (int i = 0; i < *words; i++)
+ data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
+ * @hw: pointer to hardware structure
+ * @offset: offset of words in the EEPROM to read
+ * @words: number of words to read
+ * @data: words to read from the EEPROM
+ *
+ * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
+ * prior to the read. Acquire/release the NVM ownership.
+ *
+ * Return: the operation exit code.
+ */
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return err;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ int err;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ err = hw->eeprom.ops.init_params(hw);
+ if (err)
+ return err;
+ }
+
+ err = ixgbe_nvm_validate_checksum(hw);
+ if (err)
+ return err;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+
+ err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (err)
+ return err;
+
+ err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!err)
+ *checksum_val = tmp_checksum;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbe_reset_hw_e610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and performs a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int err;
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ err = hw->mac.ops.stop_adapter(hw);
+ if (err)
+ goto reset_hw_out;
+
+ /* Flush pending Tx transactions. */
+ ixgbe_clear_tx_pending(hw);
+
+ hw->phy.ops.init(hw);
+mac_reset_top:
+ err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (err)
+ return -EBUSY;
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+
+ err = -EIO;
+ netdev_err(adapter->netdev, "Reset polling failed to complete.");
+ }
+
+ /* Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ msleep(100);
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /* Maximum number of Receive Address Registers. */
+#define IXGBE_MAX_NUM_RAR 128
+
+ /* Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to the
+ * maximum number of Receive Address Registers, since we modify this
+ * value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
+ hw->mac.ops.init_rx_addrs(hw);
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+reset_hw_out:
+ return err;
+}
+
+static const struct ixgbe_mac_operations mac_ops_e610 = {
+ .init_hw = ixgbe_init_hw_generic,
+ .start_hw = ixgbe_start_hw_X540,
+ .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
+ .enable_rx_dma = ixgbe_enable_rx_dma_generic,
+ .get_mac_addr = ixgbe_get_mac_addr_generic,
+ .get_device_caps = ixgbe_get_device_caps_generic,
+ .stop_adapter = ixgbe_stop_adapter_generic,
+ .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
+ .set_rxpba = ixgbe_set_rxpba_generic,
+ .check_link = ixgbe_check_link_e610,
+ .blink_led_start = ixgbe_blink_led_start_X540,
+ .blink_led_stop = ixgbe_blink_led_stop_X540,
+ .set_rar = ixgbe_set_rar_generic,
+ .clear_rar = ixgbe_clear_rar_generic,
+ .set_vmdq = ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
+ .clear_vmdq = ixgbe_clear_vmdq_generic,
+ .init_rx_addrs = ixgbe_init_rx_addrs_generic,
+ .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
+ .enable_mc = ixgbe_enable_mc_generic,
+ .disable_mc = ixgbe_disable_mc_generic,
+ .clear_vfta = ixgbe_clear_vfta_generic,
+ .set_vfta = ixgbe_set_vfta_generic,
+ .fc_enable = ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
+ .init_uta_tables = ixgbe_init_uta_tables_generic,
+ .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
+ .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
+ .set_source_address_pruning =
+ ixgbe_set_source_address_pruning_x550,
+ .set_ethertype_anti_spoofing =
+ ixgbe_set_ethertype_anti_spoofing_x550,
+ .disable_rx_buff = ixgbe_disable_rx_buff_generic,
+ .enable_rx_buff = ixgbe_enable_rx_buff_generic,
+ .enable_rx = ixgbe_enable_rx_generic,
+ .disable_rx = ixgbe_disable_rx_e610,
+ .led_on = ixgbe_led_on_generic,
+ .led_off = ixgbe_led_off_generic,
+ .init_led_link_act = ixgbe_init_led_link_act_generic,
+ .reset_hw = ixgbe_reset_hw_e610,
+ .get_media_type = ixgbe_get_media_type_e610,
+ .setup_link = ixgbe_setup_link_e610,
+ .get_link_capabilities = ixgbe_get_link_capabilities_e610,
+ .get_bus_info = ixgbe_get_bus_info_generic,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
+ .release_swfw_sync = ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_e610,
+ .fc_autoneg = ixgbe_fc_autoneg_e610,
+};
+
+static const struct ixgbe_phy_operations phy_ops_e610 = {
+ .init = ixgbe_init_phy_ops_e610,
+ .identify = ixgbe_identify_phy_e610,
+ .identify_sfp = ixgbe_identify_module_e610,
+ .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
+ .setup_link = ixgbe_setup_phy_link_e610,
+ .enter_lplu = ixgbe_enter_lplu_e610,
+};
+
+static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
+ .read = ixgbe_read_ee_aci_e610,
+ .read_buffer = ixgbe_read_ee_aci_buffer_e610,
+ .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
+};
+
+const struct ixgbe_info ixgbe_e610_info = {
+ .mac = ixgbe_mac_e610,
+ .get_invariants = ixgbe_get_invariants_X540,
+ .mac_ops = &mac_ops_e610,
+ .eeprom_ops = &eeprom_ops_e610,
+ .phy_ops = &phy_ops_e610,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..ba8c06b73810
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps);
+int ixgbe_get_caps(struct ixgbe_hw *hw);
+int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+int ixgbe_update_link_info(struct ixgbe_hw *hw);
+int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+int ixgbe_setup_fc_e610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw);
+void ixgbe_disable_rx_e610(struct ixgbe_hw *hw);
+int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_phy_e610(struct ixgbe_hw *hw);
+int ixgbe_identify_module_e610(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw);
+int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on);
+int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw);
+int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data);
+int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val);
+int ixgbe_reset_hw_e610(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9482e0cca8b7..da91c582d439 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/* ethtool support for ixgbe */
@@ -690,6 +690,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -1613,6 +1614,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1874,6 +1876,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1935,6 +1938,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 16fa621ce0ff..336d47ffb95a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe.h"
#include "ixgbe_sriov.h"
@@ -107,6 +107,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 2e38e8f6fac1..7236f20c9a30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -42,6 +42,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
+#include "ixgbe_e610.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_mbx.h"
#include "ixgbe_phy.h"
@@ -73,6 +74,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_x550em_x_fw] = &ixgbe_x550em_x_fw_info,
[board_x550em_a] = &ixgbe_x550em_a_info,
[board_x550em_a_fw] = &ixgbe_x550em_a_fw_info,
+ [board_e610] = &ixgbe_e610_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -131,6 +133,11 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_BACKPLANE), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SFP), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_10G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_2_5G_T), board_e610},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_SGMII), board_e610},
/* required last entry */
{0, }
};
@@ -173,6 +180,8 @@ static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *);
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *);
static const struct net_device_ops ixgbe_netdev_ops;
@@ -236,8 +245,11 @@ static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
* bandwidth details should be gathered from the parent bus instead of from the
* device. Used to ensure that various locations all have the correct device ID
* checks.
+ *
+ * Return: true if information should be collected from the parent bus, false
+ * otherwise
*/
-static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+static bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_SFP_SF_QP:
@@ -876,6 +888,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -915,6 +928,7 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1025,7 +1039,7 @@ static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
return ((head <= tail) ? tail : tail + ring->count) - head;
}
-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
u32 tx_done = ixgbe_get_tx_completed(tx_ring);
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
@@ -1909,10 +1923,6 @@ bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
{
struct net_device *netdev = rx_ring->netdev;
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* Verify netdev is present, and that packet does not have any
* errors that would be unacceptable to the netdev.
*/
@@ -2220,9 +2230,9 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbe_run_xdp(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
@@ -2272,7 +2282,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
@@ -2330,6 +2340,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -2375,12 +2386,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbe_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbe_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- unsigned int xdp_res = -PTR_ERR(skb);
-
+ if (xdp_res) {
if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
@@ -2400,7 +2409,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -2414,7 +2423,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
+ if (xdp_res || ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
continue;
/* probably a little skewed due to removing CRC */
@@ -2515,6 +2524,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2528,6 +2538,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
IXGBE_EIMS_MAILBOX |
IXGBE_EIMS_LSC);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mask &= ~IXGBE_EIMS_FW_EVENT;
+
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -2744,6 +2757,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2966,6 +2980,218 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_check_phy_fw_load - check if PHY FW load failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check if external PHY FW load failed and print an error message if it did.
+ */
+static void ixgbe_check_phy_fw_load(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ if (!(link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ return;
+ }
+
+ if (adapter->flags2 & IXGBE_FLAG2_PHY_FW_LOAD_FAILED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ netdev_err(adapter->netdev, "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ adapter->flags2 |= IXGBE_FLAG2_PHY_FW_LOAD_FAILED;
+ }
+}
+
+/**
+ * ixgbe_check_module_power - check module power level
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Check module power level returned by a previous call to aci_get_link_info
+ * and print error messages if module power level is not supported.
+ */
+static void ixgbe_check_module_power(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ /* If module power level is supported, clear the flag. */
+ if (!(link_cfg_err & (IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT |
+ IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED))) {
+ adapter->flags2 &= ~IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ return;
+ }
+
+ /* If IXGBE_FLAG2_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do.
+ */
+ if (adapter->flags2 & IXGBE_FLAG2_MOD_POWER_UNSUPPORTED)
+ return;
+
+ if (link_cfg_err & IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT) {
+ netdev_err(adapter->netdev, "The installed module is incompatible with the device's NVM image. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ } else if (link_cfg_err & IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED) {
+ netdev_err(adapter->netdev, "The module's power requirements exceed the device's power supply. Cannot start link.\n");
+ adapter->flags2 |= IXGBE_FLAG2_MOD_POWER_UNSUPPORTED;
+ }
+}
+
+/**
+ * ixgbe_check_link_cfg_err - check if link configuration failed
+ * @adapter: pointer to adapter structure
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * Print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure.
+ */
+static void ixgbe_check_link_cfg_err(struct ixgbe_adapter *adapter,
+ u8 link_cfg_err)
+{
+ ixgbe_check_module_power(adapter, link_cfg_err);
+ ixgbe_check_phy_fw_load(adapter, link_cfg_err);
+}
+
+/**
+ * ixgbe_process_link_status_event - process the link event
+ * @adapter: pointer to adapter structure
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+static int
+ixgbe_process_link_status_event(struct ixgbe_adapter *adapter, bool link_up,
+ u16 link_speed)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int status;
+
+ /* Update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed.
+ */
+ status = ixgbe_update_link_info(hw);
+ if (status)
+ e_dev_err("Failed to update link status, err %d aq_err %d\n",
+ status, hw->aci.last_status);
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ /* Check if the link state is up after updating link info, and treat
+ * this event as an UP event since the link is actually UP now.
+ */
+ if (hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)
+ link_up = true;
+
+ /* Turn off PHY if media was removed. */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA) &&
+ !(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ adapter->flags2 |= IXGBE_FLAG2_NO_MEDIA;
+
+ if (link_up == adapter->link_up &&
+ link_up == netif_carrier_ok(adapter->netdev) &&
+ link_speed == adapter->link_speed)
+ return 0;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ ixgbe_watchdog_update_link(adapter);
+
+ if (link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_link_status_event - handle link status event via ACI
+ * @adapter: pointer to adapter structure
+ * @e: event structure containing link status info
+ */
+static void
+ixgbe_handle_link_status_event(struct ixgbe_adapter *adapter,
+ struct ixgbe_aci_event *e)
+{
+ struct ixgbe_aci_cmd_get_link_status_data *link_data;
+ u16 link_speed;
+ bool link_up;
+
+ link_data = (struct ixgbe_aci_cmd_get_link_status_data *)e->msg_buf;
+
+ link_up = !!(link_data->link_info & IXGBE_ACI_LINK_UP);
+ link_speed = le16_to_cpu(link_data->link_speed);
+
+ if (ixgbe_process_link_status_event(adapter, link_up, link_speed))
+ e_dev_warn("Could not process link status event");
+}
+
+/**
+ * ixgbe_schedule_fw_event - schedule Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * If the adapter is not in down, removing or resetting state,
+ * an event is scheduled.
+ */
+static void ixgbe_schedule_fw_event(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
+ !test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_FW_ASYNC_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
+
+/**
+ * ixgbe_aci_event_cleanup - release msg_buf memory
+ * @event: pointer to the event holding msg_buf to be released
+ *
+ * Clean memory allocated for event's msg_buf. Implements auto memory cleanup.
+ */
+static void ixgbe_aci_event_cleanup(struct ixgbe_aci_event *event)
+{
+ kfree(event->msg_buf);
+}
+
+/**
+ * ixgbe_handle_fw_event - handle Firmware event
+ * @adapter: pointer to the adapter structure
+ *
+ * Obtain an event from the ACI and then and then process it according to the
+ * type of the event and the opcode.
+ */
+static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_aci_event event __cleanup(ixgbe_aci_event_cleanup);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool pending = false;
+ int err;
+
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ adapter->flags2 &= ~IXGBE_FLAG2_FW_ASYNC_EVENT;
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ err = ixgbe_aci_get_event(hw, &event, &pending);
+ if (err)
+ break;
+
+ switch (le16_to_cpu(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ ixgbe_handle_link_status_event(adapter, &event);
+ break;
+ default:
+ e_warn(hw, "unknown FW async event captured\n");
+ break;
+ }
+ } while (pending);
+}
+
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
@@ -2982,6 +3208,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -3035,6 +3262,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ fallthrough;
case ixgbe_mac_x550em_a:
if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
@@ -3091,12 +3321,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_msg_task(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -3334,6 +3568,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_FW_EVENT)
+ ixgbe_schedule_fw_event(adapter);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
ixgbe_check_sfp_event(adapter, eicr);
@@ -3342,6 +3579,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
@@ -3442,6 +3680,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -4359,6 +4598,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
fallthrough;
@@ -4526,6 +4766,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4564,6 +4805,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -5148,6 +5390,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -5208,6 +5451,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -5510,6 +5754,48 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_enable_link_status_events - enable link status events
+ * @adapter: pointer to the adapter structure
+ * @mask: event mask to be set
+ *
+ * Enables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_enable_link_status_events(struct ixgbe_adapter *adapter,
+ u16 mask)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, true, mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = mask;
+ return 0;
+}
+
+/**
+ * ixgbe_disable_link_status_events - disable link status events
+ * @adapter: pointer to the adapter structure
+ *
+ * Disables link status events by invoking ixgbe_configure_lse()
+ *
+ * Return: the exit code of the operation.
+ */
+static int ixgbe_disable_link_status_events(struct ixgbe_adapter *adapter)
+{
+ int err;
+
+ err = ixgbe_configure_lse(&adapter->hw, false, adapter->lse_mask);
+ if (err)
+ return err;
+
+ adapter->lse_mask = 0;
+ return 0;
+}
+
+/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
**/
@@ -5532,13 +5818,21 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* ixgbe_non_sfp_link_config - set up non-SFP+ link
* @hw: pointer to private hardware struct
*
- * Returns 0 on success, negative on failure
+ * Configure non-SFP link.
+ *
+ * Return: 0 on success, negative on failure
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 speed;
+ struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
+ hw);
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
bool autoneg, link_up = false;
int ret = -EIO;
+ u32 speed;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -5561,13 +5855,53 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (ret)
return ret;
- if (hw->mac.ops.setup_link)
+ if (hw->mac.ops.setup_link) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ret = ixgbe_enable_link_status_events(adapter, mask);
+ if (ret)
+ return ret;
+ }
ret = hw->mac.ops.setup_link(hw, speed, link_up);
+ }
return ret;
}
/**
+ * ixgbe_check_media_subtask - check for media
+ * @adapter: pointer to adapter structure
+ *
+ * If media is available then initialize PHY user configuration. Configure the
+ * PHY if the interface is up.
+ */
+static void ixgbe_check_media_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* No need to check for media if it's already present */
+ if (!(adapter->flags2 & IXGBE_FLAG2_NO_MEDIA))
+ return;
+
+ /* Refresh link info and check if media is present */
+ if (ixgbe_update_link_info(hw))
+ return;
+
+ ixgbe_check_link_cfg_err(adapter, hw->link.link_info.link_cfg_err);
+
+ if (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ /* PHY settings are reset on media insertion, reconfigure
+ * PHY to preserve settings.
+ */
+ if (!(ixgbe_non_sfp_link_config(&adapter->hw)))
+ adapter->flags2 &= ~IXGBE_FLAG2_NO_MEDIA;
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
* @adapter: board private structure
*
@@ -5630,6 +5964,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5980,6 +6315,7 @@ dma_engine_disable:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -6224,6 +6560,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
}
/**
@@ -6279,6 +6617,7 @@ static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
@@ -6342,6 +6681,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
+ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME;
+
/* get_invariants needs the device IDs */
ii->get_invariants(hw);
@@ -6909,6 +7250,19 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
udp_tunnel_nic_reset_ntf(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ int err = ixgbe_update_link_info(&adapter->hw);
+
+ if (err)
+ e_dev_err("Failed to update link info, err %d.\n", err);
+
+ ixgbe_check_link_cfg_err(adapter,
+ adapter->hw.link.link_info.link_cfg_err);
+
+ err = ixgbe_non_sfp_link_config(&adapter->hw);
+ if (ixgbe_non_sfp_link_config(&adapter->hw))
+ e_dev_err("Link setup failed, err %d.\n", err);
+ }
return 0;
@@ -7062,6 +7416,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -7209,6 +7564,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -7221,11 +7577,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < 16; i++) {
hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540) ||
- (hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x) ||
- (hw->mac.type == ixgbe_mac_x550em_a)) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_x550em_a ||
+ hw->mac.type == ixgbe_mac_e610) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -7251,6 +7608,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -7551,6 +7909,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -8052,6 +8411,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->flags2 & IXGBE_FLAG2_FW_ASYNC_EVENT)
+ ixgbe_handle_fw_event(adapter);
+ ixgbe_check_media_subtask(adapter);
+ }
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -10771,6 +11135,24 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
}
/**
+ * ixgbe_set_fw_version_e610 - Set FW version specifically on E610 adapters
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ *
+ */
+static void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_orom_info *orom = &adapter->hw.flash.orom;
+ struct ixgbe_nvm_info *nvm = &adapter->hw.flash.nvm;
+
+ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
+ nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+/**
* ixgbe_set_fw_version - Set FW version
* @adapter: the adapter private structure
*
@@ -10782,6 +11164,11 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_nvm_version nvm_ver;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_set_fw_version_e610(adapter);
+ return;
+ }
+
ixgbe_get_oem_prod_version(hw, &nvm_ver);
if (nvm_ver.oem_valid) {
snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
@@ -10868,6 +11255,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#else
indices = IXGBE_MAX_RSS_INDICES;
#endif
+ } else if (ii->mac == ixgbe_mac_e610) {
+ indices = IXGBE_MAX_RSS_INDICES_X550;
}
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
@@ -10933,12 +11322,19 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ err = ixgbe_get_caps(&adapter->hw);
+ if (err)
+ dev_err(&pdev->dev, "ixgbe_get_caps failed %d\n", err);
+ }
+
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_e610:
netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
break;
case ixgbe_mac_x550em_a:
@@ -10959,6 +11355,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -11130,6 +11527,8 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -11275,6 +11674,8 @@ err_netdev:
err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
@@ -11321,6 +11722,11 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ ixgbe_disable_link_status_events(adapter);
+ mutex_destroy(&adapter->hw.aci.lock);
+ }
+
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -11452,6 +11858,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_x550em_a:
device_id = IXGBE_DEV_ID_X550EM_A_VF;
break;
+ case ixgbe_mac_e610:
+ device_id = IXGBE_DEV_ID_E610_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d67d77e5dacc..788b5af07c70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -283,6 +283,7 @@ static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
@@ -407,6 +408,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_x550em_a &&
+ hw->mac.type != ixgbe_mac_e610 &&
hw->mac.type != ixgbe_mac_X540)
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 07eaa3c3f4d3..0a03a8bb5f88 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1117,7 +1117,7 @@ int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
MDIO_MMD_AN, &autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_e610) {
/* Set or unset auto-negotiation 5G advertisement */
autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
@@ -1233,6 +1233,7 @@ static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
switch (hw->mac.type) {
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9baccacd02a1..5fdf32d79d82 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBE_TYPE_H_
#define _IXGBE_TYPE_H_
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include "ixgbe_type_e610.h"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@@ -71,12 +72,19 @@
#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4
#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
+
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -1600,7 +1608,7 @@ enum {
#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1636,6 +1644,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -1654,6 +1663,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1673,6 +1683,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
@@ -2068,6 +2079,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2168,6 +2180,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2288,6 +2301,7 @@ enum {
#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_TPE 0x00000080 /* Tag Promiscuous Ena*/
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
@@ -2351,6 +2365,7 @@ enum {
/* Multiple Transmit Queue Command Register */
#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */
#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_NUM_TC_OR_Q 0xC /* Number of TCs or TxQs per pool */
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
@@ -2970,6 +2985,29 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
+/* Physical layer type */
+typedef u64 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
+
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
@@ -3145,6 +3183,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
ixgbe_mac_x550em_a,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
@@ -3224,7 +3264,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui,
};
/* Flow Control Settings */
@@ -3233,7 +3275,8 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
- ixgbe_fc_default
+ ixgbe_fc_default,
+ ixgbe_fc_pfc,
};
/* Smart Speed Settings */
@@ -3533,6 +3576,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -3575,6 +3621,7 @@ struct ixgbe_mac_info {
u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
+ u32 max_link_up_time;
u8 led_link_act;
};
@@ -3599,6 +3646,10 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 curr_user_speed_req;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
struct ixgbe_mbx_stats {
@@ -3643,6 +3694,19 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..8d06ade3c7cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,1074 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+#define BYTES_PER_DWORD 4
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Shadow RAM related */
+#define IXGBE_SR_WORDS_IN_1KB 512
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_SR_SIZE_M GENMASK(7, 5)
+
+/* Flash Access Register */
+#define IXGBE_GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define IXGBE_GLNVM_FLA_LOCKED_S 6
+#define IXGBE_GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Admin Command Interface (ACI) registers */
+#define IXGBE_PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define IXGBE_PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define IXGBE_PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define IXGBE_PF_HICR 0x00082048
+
+#define IXGBE_PF_HICR_EN BIT(0)
+#define IXGBE_PF_HICR_C BIT(1)
+#define IXGBE_PF_HICR_SV BIT(2)
+#define IXGBE_PF_HICR_EV BIT(3)
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS (IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD)
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+#define IXGBE_ACI_SEND_TIMEOUT_MS \
+ (IXGBE_ACI_SEND_MAX_EXECUTE * IXGBE_ACI_SEND_DELAY_TIME_MS)
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+#define IXGBE_ACI_FLAG_DD BIT(0) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(1) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(2) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(3) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(9) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(10) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(11) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(12) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(13) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(14) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(15) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or alloc failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_get_link_topo_pin = 0x06E1,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+};
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_1588 0x0046
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_PENDING_NVM_VER 0x0049
+#define IXGBE_ACI_CAPS_OROM_VER 0x004A
+#define IXGBE_ACI_CAPS_PENDING_OROM_VER 0x004B
+#define IXGBE_ACI_CAPS_PENDING_NET_VER 0x004D
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_M GENMASK(3, 1)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define IXGBE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define IXGBE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 29
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK GENMASK(7, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK 0xdf
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK 0xef
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define IXGBE_ACI_LSE_M GENMASK(1, 0)
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK GENMASK(2, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_M GENMASK(6, 3)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M GENMASK(5, 0)
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M GENMASK(10, 0)
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3;
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+} __packed;
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M GENMASK(3, 0)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M GENMASK(7, 4)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_SI5384 0x25
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ixgbe_aci_cmd_get_link_topo_pin {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 input_io_params;
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GPIO 0
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RESET_N 1
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_INT_N 2
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LPMODE 6
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS0 9
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RS1 10
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_RED_LED 12
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define IXGBE_ACI_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define IXGBE_ACI_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+/* Use IXGBE_ACI_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define IXGBE_ACI_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M GENMASK(6, 0)
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M GENMASK(9, 0)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M GENMASK(1, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for
+ * struct ixgbe_aci_cmd_nvm.
+ */
+#define IXGBE_ACI_NVM_START_POINT 0
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/**
+ * struct ixgbe_aci_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the
+ * Admin Command Interface (ACI).
+ * The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_get_link_topo_pin get_link_topo_pin;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 topo_media_conflict;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE]
+ * of ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M GENMASK(3, 0)
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M GENMASK(7, 4)
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M GENMASK(11, 8)
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 ieee_1588;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_update_pending_nvm;
+ bool nvm_update_pending_orom;
+ bool nvm_update_pending_netlist;
+#define IXGBE_NVM_PENDING_NVM_IMAGE BIT(0)
+#define IXGBE_NVM_PENDING_OROM BIT(1)
+#define IXGBE_NVM_PENDING_NETLIST BIT(2)
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M GENMASK(15, 8)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+} __packed;
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ struct ixgbe_hw_caps common_cap;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u8 *msg_buf;
+ u16 msg_len;
+ u16 buf_len;
+};
+
+struct ixgbe_aci_info {
+ struct mutex lock; /* admin command interface lock */
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+} __packed;
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+} __packed;
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Ptr to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ u32 flash_size; /* Available flash size in bytes */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 81e1df83f136..1fc821fb351a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include <linux/pci.h>
#include <linux/delay.h>
@@ -66,7 +66,9 @@ int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
- **/
+ *
+ * Return: 0 on success or negative value on failure
+ */
int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -133,10 +135,14 @@ mac_reset_top:
hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
hw->mac.ops.init_rx_addrs(hw);
+ /* The following is not supported by E610. */
+ if (hw->mac.type == ixgbe_mac_e610)
+ return status;
+
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
- /* Add the SAN MAC address to the RAR only if it's a valid address */
+ /* Add the SAN MAC address to RAR if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index b69a680d3ab5..6ed360c5b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
#include "ixgbe_type.h"
@@ -17,3 +20,5 @@ int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_X540_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index d9a8cf018d3b..277ceaf8a793 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "ixgbe_x540.h"
+#include "ixgbe_x550.h"
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
@@ -2770,9 +2771,9 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub, u16 len,
- const char *driver_ver)
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
int ret_val;
@@ -3505,14 +3506,14 @@ mac_reset_top:
return status;
}
-/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
+/** ixgbe_set_ethertype_anti_spoofing_x550 - Enable/Disable Ethertype
* anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/
-static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
- bool enable, int vf)
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
@@ -3527,14 +3528,14 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
-/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
+/** ixgbe_set_source_address_pruning_x550 - Enable/Disable src address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
* @pool: Rx pool to set source address pruning for
**/
-static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
- bool enable,
- unsigned int pool)
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool)
{
u64 pfflp;
@@ -3831,9 +3832,9 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
.set_source_address_pruning = \
- &ixgbe_set_source_address_pruning_X550, \
+ &ixgbe_set_source_address_pruning_x550, \
.set_ethertype_anti_spoofing = \
- &ixgbe_set_ethertype_anti_spoofing_X550, \
+ &ixgbe_set_ethertype_anti_spoofing_x550, \
.disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
@@ -4047,7 +4048,7 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_a)
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
new file mode 100644
index 000000000000..3e4092f8da3e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IXGBE_X550_H_
+#define _IXGBE_X550_H_
+
+#include "ixgbe_type.h"
+
+extern const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT];
+
+int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver);
+void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
+ bool enable,
+ unsigned int pool);
+void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
+ bool enable, int vf);
+
+#endif /* _IXGBE_X550_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 5f08779c0e4e..a9bc96f6399d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 130cb868774c..4384e892f967 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -418,6 +418,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
+ board_e610_vf,
+ board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,12 +436,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
-extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 149911e3002a..6442f115a262 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2018 Intel Corporation.";
+ "Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
+ [board_e610_vf] = &ixgbevf_e610_vf_info,
+ [board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
+ {PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
+ IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -732,10 +737,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- /* XDP packets use error pointer so abort at this point */
- if (IS_ERR(skb))
- return true;
-
/* verify that the packet does not have any known errors */
if (unlikely(ixgbevf_test_staterr(rx_desc,
IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
@@ -1044,9 +1045,9 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
return IXGBEVF_XDP_TX;
}
-static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
- struct xdp_buff *xdp)
+static int ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ struct xdp_buff *xdp)
{
int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
@@ -1080,7 +1081,7 @@ out_failure:
break;
}
xdp_out:
- return ERR_PTR(-result);
+ return result;
}
static unsigned int ixgbevf_rx_frame_truesize(struct ixgbevf_ring *rx_ring,
@@ -1122,6 +1123,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
+ int xdp_res = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
@@ -1165,11 +1167,11 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
#endif
- skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
+ xdp_res = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
}
- if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
+ if (xdp_res) {
+ if (xdp_res == IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
@@ -1189,7 +1191,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
/* exit if we failed to retrieve a buffer */
- if (!skb) {
+ if (!xdp_res && !skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
@@ -1203,7 +1205,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
continue;
/* verify the packet layout is correct */
- if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+ if (xdp_res || ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
@@ -4693,6 +4695,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
+ case ixgbe_mac_e610_vf:
+ dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
+ break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index a55dd978f7ca..24d0237e7a99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -505,15 +505,3 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
};
-
-/* Mailbox operations when running on Hyper-V.
- * On Hyper-V, PF/VF communication is not through the
- * hardware mailbox; this communication is through
- * a software mediated path.
- * Most mail box operations are noop while running on
- * Hyper-V.
- */
-const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
- .init_params = ixgbevf_init_mbx_params_vf,
- .check_for_rst = ixgbevf_check_for_rst_vf,
-};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 1641d00d8ed3..da7a72ecce7a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -1076,3 +1076,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_e610_vf_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
+ .mac = ixgbe_mac_e610_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index b4eef5b6c172..2d791bc26ae4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2018 Intel Corporation. */
+/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -54,6 +54,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
+ ixgbe_mac_e610,
+ ixgbe_mac_e610_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 660dff5426e7..83ce3bfefa5c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -90,7 +90,6 @@ struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
- struct resource *res;
struct mii_bus *mii_bus;
@@ -643,31 +642,14 @@ ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
- struct resource *res;
int err;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get etop resource\n");
- err = -ENOENT;
- goto err_out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request etop resource\n");
- err = -EBUSY;
- goto err_out;
- }
-
- ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_etop_membase) {
+ ltq_etop_membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ltq_etop_membase)) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
- err = -ENOMEM;
+ err = PTR_ERR(ltq_etop_membase);
goto err_out;
}
@@ -679,7 +661,6 @@ ltq_etop_probe(struct platform_device *pdev)
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
- priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1fb285fa0bdb..aa049cee576d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3960,23 +3960,30 @@ static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvneta_port, phylink_pcs);
}
-static int mvneta_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
- * When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* QSGMII, SGMII and RGMII can be configured to use inband
+ * signalling of the AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_QSGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
-static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
@@ -4071,7 +4078,7 @@ static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
- .pcs_validate = mvneta_pcs_validate,
+ .pcs_inband_caps = mvneta_pcs_inband_caps,
.pcs_get_state = mvneta_pcs_get_state,
.pcs_config = mvneta_pcs_config,
.pcs_an_restart = mvneta_pcs_an_restart,
@@ -4385,6 +4392,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
if (pp->neta_armada3700)
return 0;
+ netdev_lock(port->napi.dev);
spin_lock(&pp->lock);
/*
* Configuring the driver for a new CPU while the driver is
@@ -4411,7 +4419,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
/* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
- napi_enable(&port->napi);
+ napi_enable_locked(&port->napi);
/*
* Enable per-CPU interrupts on the CPU that is
@@ -4432,6 +4440,8 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
MVNETA_CAUSE_LINK_CHANGE);
netif_tx_start_all_queues(pp->dev);
spin_unlock(&pp->lock);
+ netdev_unlock(port->napi.dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 571631a30320..d294580f4832 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6188,6 +6188,7 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
@@ -6224,22 +6225,30 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
-static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
- unsigned long *supported,
- const struct phylink_link_state *state)
+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
{
- /* When in 802.3z mode, we must have AN enabled:
+ /* When operating in an 802.3z mode, we must have AN enabled:
* Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ * Therefore, inband is "required".
*/
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- return -EINVAL;
+ if (phy_interface_mode_is_8023z(interface))
+ return LINK_INBAND_ENABLE;
- return 0;
+ /* SGMII and RGMII can be configured to use inband signalling of the
+ * AN result. Indicate these as "possible".
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_rgmii(interface))
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ /* For any other modes, indicate that inband is not supported. */
+ return LINK_INBAND_DISABLE;
}
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
@@ -6343,7 +6352,7 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
- .pcs_validate = mvpp2_gmac_pcs_validate,
+ .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 549436efc204..3a9825883d79 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1137,6 +1137,43 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ ivi->trusted = false;
+
+ return 0;
+}
+
+static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+ }
+
+ dev_dbg(&oct->pdev->dev, "set vf-%d mac to %pM\n", vf, mac);
+ ether_addr_copy(oct->vf_info[vf].mac_addr, mac);
+ oct->vf_info[vf].flags |= OCTEON_PFVF_FLAG_MAC_SET_BY_PF;
+
+ err = octep_ctrl_net_set_mac_addr(oct, vf, mac, true);
+ if (err)
+ dev_err(&oct->pdev->dev,
+ "Set VF%d MAC address failed via host control Mbox\n",
+ vf);
+
+ return err;
+}
+
static const struct net_device_ops octep_netdev_ops = {
.ndo_open = octep_open,
.ndo_stop = octep_stop,
@@ -1146,6 +1183,8 @@ static const struct net_device_ops octep_netdev_ops = {
.ndo_set_mac_address = octep_set_mac,
.ndo_change_mtu = octep_change_mtu,
.ndo_set_features = octep_set_features,
+ .ndo_get_vf_config = octep_get_vf_config,
+ .ndo_set_vf_mac = octep_set_vf_mac
};
/**
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index fee59e0e0138..3b56916af468 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -220,6 +220,7 @@ struct octep_iface_link_info {
/* The Octeon VF device specific info data structure.*/
struct octep_pfvf_info {
u8 mac_addr[ETH_ALEN];
+ u32 flags;
u32 mbox_version;
};
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index e6eb98d70f3c..ebecdd29f3bd 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -156,12 +156,23 @@ static void octep_pfvf_set_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_err(&oct->pdev->dev,
+ "VF%d attempted to override administrative set MAC address\n",
+ vf_id);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+ return;
+ }
+
err = octep_ctrl_net_set_mac_addr(oct, vf_id, cmd.s_set_mac.mac_addr, true);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Set VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Set VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
+
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, cmd.s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -171,10 +182,18 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
{
int err;
+ if (oct->vf_info[vf_id].flags & OCTEON_PFVF_FLAG_MAC_SET_BY_PF) {
+ dev_dbg(&oct->pdev->dev, "VF%d MAC address set by PF\n", vf_id);
+ ether_addr_copy(rsp->s_set_mac.mac_addr,
+ oct->vf_info[vf_id].mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ return;
+ }
err = octep_ctrl_net_get_mac_addr(oct, vf_id, rsp->s_set_mac.mac_addr);
if (err) {
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
- dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ dev_err(&oct->pdev->dev, "Get VF%d MAC address failed via host control Mbox\n",
+ vf_id);
return;
}
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
index 0dc6eead292a..386a095a99bc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h
@@ -8,8 +8,6 @@
#ifndef _OCTEP_PFVF_MBOX_H_
#define _OCTEP_PFVF_MBOX_H_
-/* VF flags */
-#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT_ULL(0) /* PF has set VF MAC address */
#define OCTEON_SDP_16K_HW_FRS 16380UL
#define OCTEON_SDP_64K_HW_FRS 65531UL
@@ -23,6 +21,10 @@ enum octep_pfvf_mbox_version {
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+/* VF flags */
+/* PF has set VF MAC address */
+#define OCTEON_PFVF_FLAG_MAC_SET_BY_PF BIT(0)
+
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 62c07407eb94..005ca8a056c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -313,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
+ nix_bp_cfg_rsp) \
+M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
+ msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5d1e2bddd58..613655fcd34f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -569,9 +569,17 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
-int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct msg_rsp *rsp)
+static u16 nix_get_channel(u16 chan, bool cpt_link)
+{
+ /* CPT channel for a given link channel is always
+ * assumed to be BIT(11) set in link channel.
+ */
+ return cpt_link ? chan | BIT(11) : chan;
+}
+
+static int nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp, bool cpt_link)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
@@ -579,6 +587,7 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
struct nix_bp *bp;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -589,6 +598,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
@@ -597,8 +609,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ chan_v = nix_get_channel(chan, cpt_link);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
@@ -617,6 +630,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ return nix_bp_disable(rvu, req, rsp, true);
+}
+
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
@@ -696,15 +723,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
return bpid;
}
-int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
- struct nix_bp_cfg_req *req,
- struct nix_bp_cfg_rsp *rsp)
+static int nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp,
+ bool cpt_link)
{
int blkaddr, pf, type, chan_id = 0;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
u16 chan_base, chan;
s16 bpid, bpid_base;
+ u16 chan_v;
u64 cfg;
pf = rvu_get_pf(pcifunc);
@@ -717,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
type != NIX_INTF_TYPE_SDP)
return 0;
+ if (cpt_link && !rvu->hw->cpt_links)
+ return 0;
+
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
@@ -730,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return -EINVAL;
}
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
+ chan_v = nix_get_channel(chan, cpt_link);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v));
cfg &= ~GENMASK_ULL(8, 0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v),
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
@@ -750,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, false);
+}
+
+int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu,
+ struct nix_bp_cfg_req *req,
+ struct nix_bp_cfg_rsp *rsp)
+{
+ return nix_bp_enable(rvu, req, rsp, true);
+}
+
static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
u64 format, bool v4, u64 *fidx)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index dbc971266865..cb6513ab35e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -15,5 +15,6 @@ rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
new file mode 100644
index 000000000000..09a5b5268205
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <net/xfrm.h>
+#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <crypto/aead.h>
+#include <crypto/gcm.h>
+
+#include "otx2_common.h"
+#include "otx2_struct.h"
+#include "cn10k_ipsec.h"
+
+static bool is_dev_support_ipsec_offload(struct pci_dev *pdev)
+{
+ return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev);
+}
+
+static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf)
+{
+ enum cn10k_cpt_hw_state_e state;
+
+ while (true) {
+ state = atomic_cmpxchg(&pf->ipsec.cpt_state,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE);
+ if (state == CN10K_CPT_HW_AVAILABLE)
+ return true;
+ if (state == CN10K_CPT_HW_UNAVAILABLE)
+ return false;
+
+ mdelay(1);
+ }
+}
+
+static void cn10k_cpt_device_set_available(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE);
+}
+
+static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf)
+{
+ atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE);
+}
+
+static int cn10k_outb_cptlf_attach(struct otx2_nic *pf)
+{
+ struct rsrc_attach *attach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ /* Get memory to put this msg */
+ attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox);
+ if (!attach)
+ goto unlock;
+
+ attach->cptlfs = true;
+ attach->modify = true;
+
+ /* Send attach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_detach(struct otx2_nic *pf)
+{
+ struct rsrc_detach *detach;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox);
+ if (!detach)
+ goto unlock;
+
+ detach->partial = true;
+ detach->cptlfs = true;
+
+ /* Send detach request to AF */
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf)
+{
+ struct cpt_lf_alloc_req_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ /* PF function */
+ req->nix_pf_func = pf->pcifunc;
+ /* Enable SE-IE Engine Group */
+ req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP;
+
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_free(struct otx2_nic *pf)
+{
+ mutex_lock(&pf->mbox.lock);
+ otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox);
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
+static int cn10k_outb_cptlf_config(struct otx2_nic *pf)
+{
+ struct cpt_inline_ipsec_cfg_msg *req;
+ int ret = -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox);
+ if (!req)
+ goto unlock;
+
+ req->dir = CPT_INLINE_OUTBOUND;
+ req->enable = 1;
+ req->nix_pf_func = pf->pcifunc;
+ ret = otx2_sync_mbox_msg(&pf->mbox);
+unlock:
+ mutex_unlock(&pf->mbox.lock);
+ return ret;
+}
+
+static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf)
+{
+ u64 reg_val;
+
+ /* Set Execution Enable of instruction queue */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ reg_val |= BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Set iqueue's enqueuing */
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL);
+ reg_val |= BIT_ULL(0);
+ otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val);
+}
+
+static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf)
+{
+ u32 inflight, grb_cnt, gwb_cnt;
+ u32 nq_ptr, dq_ptr;
+ int timeout = 20;
+ u64 reg_val;
+ int cnt;
+
+ /* Disable instructions enqueuing */
+ otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull);
+
+ /* Wait for instruction queue to become empty.
+ * CPT_LF_INPROG.INFLIGHT count is zero
+ */
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ if (!inflight)
+ break;
+
+ usleep_range(10000, 20000);
+ if (timeout-- < 0) {
+ netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n");
+ break;
+ }
+ } while (1);
+
+ /* Disable executions in the LF's queue,
+ * the queue should be empty at this point
+ */
+ reg_val &= ~BIT_ULL(16);
+ otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ if (reg_val & BIT_ULL(31))
+ cnt = 0;
+ else
+ cnt++;
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
+ nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
+ } while ((cnt < 10) && (nq_ptr != dq_ptr));
+
+ cnt = 0;
+ do {
+ reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
+ inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val);
+ grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val);
+ gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val);
+ if (inflight == 0 && gwb_cnt < 40 &&
+ (grb_cnt == 0 || grb_cnt == 40))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
+
+/* Allocate memory for CPT outbound Instruction queue.
+ * Instruction queue memory format is:
+ * -----------------------------
+ * | Instruction Group memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | x 16 Bytes) |
+ * | |
+ * ----------------------------- <-- CPT_LF_Q_BASE[ADDR]
+ * | Flow Control (128 Bytes) |
+ * | |
+ * -----------------------------
+ * | Instruction Memory |
+ * | (CPT_LF_Q_SIZE[SIZE_DIV40] |
+ * | × 40 × 64 bytes) |
+ * | |
+ * -----------------------------
+ */
+static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN +
+ CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN;
+
+ iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size,
+ &iq->real_dma_addr, GFP_KERNEL);
+ if (!iq->real_vaddr)
+ return -ENOMEM;
+
+ /* iq->vaddr/dma_addr points to Flow Control location */
+ iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+ iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES;
+
+ /* Align pointers */
+ iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN);
+ iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN);
+ return 0;
+}
+
+static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf)
+{
+ struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq;
+
+ if (iq->real_vaddr)
+ dma_free_coherent(pf->dev, iq->size, iq->real_vaddr,
+ iq->real_dma_addr);
+
+ iq->real_vaddr = NULL;
+ iq->vaddr = NULL;
+}
+
+static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf)
+{
+ u64 reg_val;
+ int ret;
+
+ /* Allocate Memory for CPT IQ */
+ ret = cn10k_outb_cptlf_iq_alloc(pf);
+ if (ret)
+ return ret;
+
+ /* Disable IQ */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr);
+
+ /* Set IQ size */
+ reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 +
+ CN10K_CPT_EXTRA_SIZE_DIV40);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val);
+
+ return 0;
+}
+
+static int cn10k_outb_cptlf_init(struct otx2_nic *pf)
+{
+ int ret;
+
+ /* Initialize CPTLF Instruction Queue (IQ) */
+ ret = cn10k_outb_cptlf_iq_init(pf);
+ if (ret)
+ return ret;
+
+ /* Configure CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_config(pf);
+ if (ret)
+ goto iq_clean;
+
+ /* Enable CPTLF IQ */
+ cn10k_outb_cptlf_iq_enable(pf);
+ return 0;
+iq_clean:
+ cn10k_outb_cptlf_iq_free(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int ret;
+
+ /* Attach a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_attach(pf);
+ if (ret)
+ return ret;
+
+ /* Allocate a CPT LF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_alloc(pf);
+ if (ret)
+ goto detach;
+
+ /* Initialize the CPTLF for outbound ipsec offload */
+ ret = cn10k_outb_cptlf_init(pf);
+ if (ret)
+ goto lf_free;
+
+ pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf,
+ CN10K_CPT_LF_NQX(0));
+
+ /* Set ipsec offload enabled for this device */
+ pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ cn10k_cpt_device_set_available(pf);
+ return 0;
+
+lf_free:
+ cn10k_outb_cptlf_free(pf);
+detach:
+ cn10k_outb_cptlf_detach(pf);
+ return ret;
+}
+
+static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
+{
+ int ret;
+
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ netdev_err(pf->netdev, "CPT LF device unavailable\n");
+ return -ENODEV;
+ }
+
+ /* Set ipsec offload disabled for this device */
+ pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED;
+
+ /* Disable CPTLF Instruction Queue (IQ) */
+ cn10k_outb_cptlf_iq_disable(pf);
+
+ /* Set IQ base address and size to 0 */
+ otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
+ otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
+
+ /* Free CPTLF IQ */
+ cn10k_outb_cptlf_iq_free(pf);
+
+ /* Free and detach CPT LF */
+ cn10k_outb_cptlf_free(pf);
+ ret = cn10k_outb_cptlf_detach(pf);
+ if (ret)
+ netdev_err(pf->netdev, "Failed to detach CPT LF\n");
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return ret;
+}
+
+static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst,
+ u64 size)
+{
+ struct otx2_lmt_info *lmt_info;
+ u64 val = 0, tar_addr = 0;
+
+ lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id());
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_info->lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy((u64 *)lmt_info->lmt_addr, inst, size);
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf,
+ struct cpt_res_s *res)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u64 *completion_ptr = (u64 *)res;
+
+ do {
+ if (time_after(jiffies, timeout)) {
+ netdev_err(pf->netdev, "CPT response timeout\n");
+ return -EBUSY;
+ }
+ } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) ==
+ CN10K_CPT_COMP_E_NOTDONE);
+
+ if (!(res->compcode == CN10K_CPT_COMP_E_GOOD ||
+ res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) {
+ netdev_err(pf->netdev, "compcode=%x doneint=%x\n",
+ res->compcode, res->doneint);
+ netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n",
+ res->uc_compcode, (u64)res->uc_info, res->esn);
+ }
+ return 0;
+}
+
+static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
+{
+ dma_addr_t res_iova, dptr_iova, sa_iova;
+ struct cn10k_tx_sa_s *sa_dptr;
+ struct cpt_inst_s inst = {};
+ struct cpt_res_s *res;
+ u32 sa_size, off;
+ u64 *sptr, *dptr;
+ u64 reg_val;
+ int ret;
+
+ sa_iova = sa_info->iova;
+ if (!sa_iova)
+ return -EINVAL;
+
+ res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s),
+ &res_iova, GFP_ATOMIC);
+ if (!res)
+ return -ENOMEM;
+
+ sa_size = sizeof(struct cn10k_tx_sa_s);
+ sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC);
+ if (!sa_dptr) {
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res,
+ res_iova);
+ return -ENOMEM;
+ }
+
+ sptr = (__force u64 *)sa_info->base;
+ dptr = (__force u64 *)sa_dptr;
+ for (off = 0; off < (sa_size / 8); off++)
+ *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off));
+
+ res->compcode = CN10K_CPT_COMP_E_NOTDONE;
+ inst.res_addr = res_iova;
+ inst.dptr = (u64)dptr_iova;
+ inst.param2 = sa_size >> 3;
+ inst.dlen = sa_size;
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA;
+ inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA;
+ inst.cptr = sa_iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* Check if CPT-LF available */
+ if (!cn10k_cpt_device_set_inuse(pf)) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ dma_wmb();
+ ret = cn10k_wait_for_cpt_respose(pf, res);
+ if (ret)
+ goto set_available;
+
+ /* Trigger CTX flush to write dirty data back to DRAM */
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
+
+set_available:
+ cn10k_cpt_device_set_available(pf);
+free_mem:
+ dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova);
+ dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova);
+ return ret;
+}
+
+static int cn10k_ipsec_get_hw_ctx_offset(void)
+{
+ /* Offset on Hardware-context offset in word */
+ return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F;
+}
+
+static int cn10k_ipsec_get_ctx_push_size(void)
+{
+ /* Context push size is round up and in multiple of 8 Byte */
+ return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F;
+}
+
+static int cn10k_ipsec_get_aes_key_len(int key_len)
+{
+ /* key_len is aes key length in bytes */
+ switch (key_len) {
+ case 16:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_128;
+ case 24:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_192;
+ default:
+ return CN10K_IPSEC_SA_AES_KEY_LEN_256;
+ }
+}
+
+static void cn10k_outb_prepare_sa(struct xfrm_state *x,
+ struct cn10k_tx_sa_s *sa_entry)
+{
+ int key_len = (x->aead->alg_key_len + 7) / 8;
+ struct net_device *netdev = x->xso.dev;
+ u8 *key = x->aead->alg_key;
+ struct otx2_nic *pf;
+ u32 *tmp_salt;
+ u64 *tmp_key;
+ int idx;
+
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+
+ /* context size, 128 Byte aligned up */
+ pf = netdev_priv(netdev);
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset();
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+
+ /* Ucode to skip two words of CPT_CTX_HW_S */
+ sa_entry->ctx_hdr_size = 1;
+
+ /* Allow Atomic operation (AOP) */
+ sa_entry->aop_valid = 1;
+
+ /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */
+ sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB;
+ sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP;
+ sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM;
+ sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET;
+ if (x->props.mode == XFRM_MODE_TUNNEL)
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL;
+ else
+ sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT;
+
+ /* Last 4 bytes are salt */
+ key_len -= 4;
+ sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len);
+ memcpy(sa_entry->cipher_key, key, key_len);
+ tmp_key = (u64 *)sa_entry->cipher_key;
+
+ for (idx = 0; idx < key_len / 8; idx++)
+ tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]);
+
+ memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4);
+ tmp_salt = (u32 *)&sa_entry->iv_gcm_salt;
+ *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt);
+
+ /* Write SA context data to memory before enabling */
+ wmb();
+
+ /* Enable SA */
+ sa_entry->sa_valid = 1;
+}
+
+static int cn10k_ipsec_validate_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload authenticated xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only AES-GCM-ICV16 xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload compressed xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET && x->props.family != AF_INET6) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only IPv4/v6 xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload other than crypto-mode");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only tunnel/transport xfrm states may be offloaded");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only ESP xfrm state may be offloaded");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulated xfrm state may not be offloaded");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without aead");
+ return -EINVAL;
+ }
+
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD ICV length other than 128bit");
+ return -EINVAL;
+ }
+ if (x->aead->alg_key_len != 128 + 32 &&
+ x->aead->alg_key_len != 192 + 32 &&
+ x->aead->alg_key_len != 256 + 32) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with AEAD key length other than 128/192/256bit");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with tfc padding");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states without geniv");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int cn10k_ipsec_inb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported");
+ return -EOPNOTSUPP;
+}
+
+static int cn10k_ipsec_outb_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ err = cn10k_ipsec_validate_state(x, extack);
+ if (err)
+ return err;
+
+ pf = netdev_priv(netdev);
+
+ err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN);
+ if (err)
+ return err;
+
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ cn10k_outb_prepare_sa(x, sa_entry);
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA");
+ qmem_free(pf->dev, sa_info);
+ return err;
+ }
+
+ x->xso.offload_handle = (unsigned long)sa_info;
+ /* Enable static branch when first SA setup */
+ if (!pf->ipsec.outb_sa_count)
+ static_branch_enable(&cn10k_ipsec_sa_enabled);
+ pf->ipsec.outb_sa_count++;
+ return 0;
+}
+
+static int cn10k_ipsec_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
+{
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return cn10k_ipsec_inb_add_state(x, extack);
+ else
+ return cn10k_ipsec_outb_add_state(x, extack);
+}
+
+static void cn10k_ipsec_del_state(struct xfrm_state *x)
+{
+ struct net_device *netdev = x->xso.dev;
+ struct cn10k_tx_sa_s *sa_entry;
+ struct qmem *sa_info;
+ struct otx2_nic *pf;
+ int err;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
+ return;
+
+ pf = netdev_priv(netdev);
+
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ sa_entry = (struct cn10k_tx_sa_s *)sa_info->base;
+ memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s));
+ /* Disable SA in CPT h/w */
+ sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size();
+ sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF;
+ sa_entry->aop_valid = 1;
+
+ err = cn10k_outb_write_sa(pf, sa_info);
+ if (err)
+ netdev_err(netdev, "Error (%d) deleting SA\n", err);
+
+ x->xso.offload_handle = 0;
+ qmem_free(pf->dev, sa_info);
+
+ /* If no more SA's then update netdev feature for potential change
+ * in NETIF_F_HW_ESP.
+ */
+ if (!--pf->ipsec.outb_sa_count)
+ queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
+}
+
+static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ if (x->props.family == AF_INET) {
+ /* Offload with IPv4 options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
+ return true;
+}
+
+static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = cn10k_ipsec_add_state,
+ .xdo_dev_state_delete = cn10k_ipsec_del_state,
+ .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
+};
+
+static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
+{
+ struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec,
+ sa_work);
+ struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec);
+
+ /* Disable static branch when no more SA enabled */
+ static_branch_disable(&cn10k_ipsec_sa_enabled);
+ rtnl_lock();
+ netdev_update_features(pf->netdev);
+ rtnl_unlock();
+}
+
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ /* IPsec offload supported on cn10k */
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return -EOPNOTSUPP;
+
+ /* Initialize CPT for outbound ipsec offload */
+ if (enable)
+ return cn10k_outb_cpt_init(netdev);
+
+ /* Don't do CPT cleanup if SA installed */
+ if (pf->ipsec.outb_sa_count) {
+ netdev_err(pf->netdev, "SA installed on this device\n");
+ return -EBUSY;
+ }
+
+ return cn10k_outb_cpt_clean(pf);
+}
+
+int cn10k_ipsec_init(struct net_device *netdev)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u32 sa_size;
+
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return 0;
+
+ /* Each SA entry size is 128 Byte round up in size */
+ sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ?
+ (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) *
+ OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s);
+ pf->ipsec.sa_size = sa_size;
+
+ INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ if (!pf->ipsec.sa_workq) {
+ netdev_err(pf->netdev, "SA alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ /* Set xfrm device ops */
+ netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops;
+ netdev->hw_features |= NETIF_F_HW_ESP;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+ cn10k_cpt_device_set_unavailable(pf);
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_ipsec_init);
+
+void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+ if (!is_dev_support_ipsec_offload(pf->pdev))
+ return;
+
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ return;
+
+ if (pf->ipsec.sa_workq) {
+ destroy_workqueue(pf->ipsec.sa_workq);
+ pf->ipsec.sa_workq = NULL;
+ }
+
+ cn10k_outb_cpt_clean(pf);
+}
+EXPORT_SYMBOL(cn10k_ipsec_clean);
+
+static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct iphdr *iph;
+ u8 *src;
+
+ src = (u8 *)skb->data + ETH_HLEN;
+
+ if (x->props.family == AF_INET) {
+ iph = (struct iphdr *)src;
+ return ntohs(iph->tot_len);
+ }
+
+ ipv6h = (struct ipv6hdr *)src;
+ return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr);
+}
+
+/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure.
+ * SG of NIX and CPT are same in size.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ struct cpt_sg_s *cpt_sg = NULL;
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 dma_addr, *iova = NULL;
+ u64 *cpt_iova = NULL;
+ u16 *sg_lens = NULL;
+ int seg, len;
+
+ sq->sg[sq->head].num_segs = 0;
+ cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size);
+
+ for (seg = 0; seg < num_segs; seg++) {
+ if ((seg % MAX_SEGS_PER_SG) == 0) {
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 0;
+ sg_lens = (void *)sg;
+ iova = (void *)sg + sizeof(*sg);
+ /* Next subdc always starts at a 16byte boundary.
+ * So if sg->segs is whether 2 or 3, offset += 16bytes.
+ */
+ if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
+ *offset += sizeof(*sg) + (3 * sizeof(u64));
+ else
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ cpt_sg += (seg / MAX_SEGS_PER_SG) * 4;
+ cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg);
+ }
+ dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
+ if (dma_mapping_error(pfvf->dev, dma_addr))
+ return false;
+
+ sg_lens[seg % MAX_SEGS_PER_SG] = len;
+ sg->segs++;
+ *iova++ = dma_addr;
+ *cpt_iova++ = dma_addr;
+
+ /* Save DMA mapping info for later unmapping */
+ sq->sg[sq->head].dma_addr[seg] = dma_addr;
+ sq->sg[sq->head].size[seg] = len;
+ sq->sg[sq->head].num_segs++;
+
+ *cpt_sg = *(struct cpt_sg_s *)sg;
+ cpt_sg->rsvd_63_50 = 0;
+ }
+
+ sq->sg[sq->head].skb = (u64)skb;
+ return true;
+}
+
+static u16 cn10k_ipsec_get_param1(u8 iv_offset)
+{
+ u16 param1_val;
+
+ /* Set Crypto mode, disable L3/L4 checksum */
+ param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM |
+ CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM;
+ param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT;
+ return param1_val;
+}
+
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ struct cpt_inst_s inst;
+ struct cpt_res_s *res;
+ struct xfrm_state *x;
+ struct qmem *sa_info;
+ dma_addr_t dptr_iova;
+ struct sec_path *sp;
+ u8 encap_offset;
+ u8 auth_offset;
+ u8 gthr_size;
+ u8 iv_offset;
+ u16 dlen;
+
+ /* Check for IPSEC offload enabled */
+ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED))
+ goto drop;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(!sp->len))
+ goto drop;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ goto drop;
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL)
+ goto drop;
+
+ dlen = cn10k_ipsec_get_ip_data_len(x, skb);
+ if (dlen == 0 && netif_msg_tx_err(pf)) {
+ netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n");
+ goto drop;
+ }
+
+ /* Check for valid SA context */
+ sa_info = (struct qmem *)x->xso.offload_handle;
+ if (!sa_info)
+ goto drop;
+
+ memset(&inst, 0, sizeof(struct cpt_inst_s));
+
+ /* Get authentication offset */
+ if (x->props.family == AF_INET)
+ auth_offset = sizeof(struct iphdr);
+ else
+ auth_offset = sizeof(struct ipv6hdr);
+
+ /* IV offset is after ESP header */
+ iv_offset = auth_offset + sizeof(struct ip_esp_hdr);
+ /* Encap will start after IV */
+ encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
+
+ /* CPT Instruction word-1 */
+ res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head));
+ res->compcode = 0;
+ inst.res_addr = sq->cpt_resp->iova + (64 * sq->head);
+
+ /* CPT Instruction word-2 */
+ inst.rvu_pf_func = pf->pcifunc;
+
+ /* CPT Instruction word-3:
+ * Set QORD to force CPT_RES_S write completion
+ */
+ inst.qord = 1;
+
+ /* CPT Instruction word-4 */
+ /* inst.dlen should not include ICV length */
+ inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8);
+ inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC;
+ inst.param1 = cn10k_ipsec_get_param1(iv_offset);
+
+ inst.param2 = encap_offset <<
+ CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT;
+ inst.param2 |= (u16)auth_offset <<
+ CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT;
+
+ /* CPT Instruction word-5 */
+ gthr_size = num_segs / MAX_SEGS_PER_SG;
+ gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size;
+
+ gthr_size &= 0xF;
+ dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2)));
+ inst.dptr = dptr_iova | ((u64)gthr_size << 60);
+
+ /* CPT Instruction word-6 */
+ inst.rptr = inst.dptr;
+
+ /* CPT Instruction word-7 */
+ inst.cptr = sa_info->iova;
+ inst.ctx_val = 1;
+ inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP;
+
+ /* CPT Instruction word-0 */
+ inst.nixtxl = (size / 16) - 1;
+ inst.dat_offset = ETH_HLEN;
+ inst.nixtx_offset = sq->sqe_size;
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* Finally Flush the CPT instruction */
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+ cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
+ return true;
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
new file mode 100644
index 000000000000..9965df0faa3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell IPSEC offload driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef CN10K_IPSEC_H
+#define CN10K_IPSEC_H
+
+#include <linux/types.h>
+
+DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
+/* CPT instruction size in bytes */
+#define CN10K_CPT_INST_SIZE 64
+
+/* CPT instruction (CPT_INST_S) queue length */
+#define CN10K_CPT_INST_QLEN 8200
+
+/* CPT instruction queue size passed to HW is in units of
+ * 40*CPT_INST_S messages.
+ */
+#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
+
+/* CPT needs 320 free entries */
+#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
+#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
+
+/* CPT instruction queue length in bytes */
+#define CN10K_CPT_INST_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
+ CN10K_CPT_INST_QLEN_EXTRA_BYTES)
+
+/* CPT instruction group queue length in bytes */
+#define CN10K_CPT_INST_GRP_QLEN_BYTES \
+ ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
+
+/* CPT FC length in bytes */
+#define CN10K_CPT_Q_FC_LEN 128
+
+/* Default CPT engine group for ipsec offload */
+#define CN10K_DEF_CPT_IPSEC_EGRP 1
+
+/* CN10K CPT LF registers */
+#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
+#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
+#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
+#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
+#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
+#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
+#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
+#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
+#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
+
+/* IPSEC Instruction opcodes */
+#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
+#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
+#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
+
+enum cn10k_cpt_comp_e {
+ CN10K_CPT_COMP_E_NOTDONE = 0x00,
+ CN10K_CPT_COMP_E_GOOD = 0x01,
+ CN10K_CPT_COMP_E_FAULT = 0x02,
+ CN10K_CPT_COMP_E_HWERR = 0x04,
+ CN10K_CPT_COMP_E_INSTERR = 0x05,
+ CN10K_CPT_COMP_E_WARN = 0x06,
+ CN10K_CPT_COMP_E_MASK = 0x3F
+};
+
+struct cn10k_cpt_inst_queue {
+ u8 *vaddr;
+ u8 *real_vaddr;
+ dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ u32 size;
+};
+
+enum cn10k_cpt_hw_state_e {
+ CN10K_CPT_HW_UNAVAILABLE,
+ CN10K_CPT_HW_AVAILABLE,
+ CN10K_CPT_HW_IN_USE
+};
+
+struct cn10k_ipsec {
+ /* Outbound CPT */
+ u64 io_addr;
+ atomic_t cpt_state;
+ struct cn10k_cpt_inst_queue iq;
+
+ /* SA info */
+ u32 sa_size;
+ u32 outb_sa_count;
+ struct work_struct sa_work;
+ struct workqueue_struct *sa_workq;
+};
+
+/* CN10K IPSEC Security Association (SA) */
+/* SA direction */
+#define CN10K_IPSEC_SA_DIR_INB 0
+#define CN10K_IPSEC_SA_DIR_OUTB 1
+/* SA protocol */
+#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
+#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
+/* SA Encryption Type */
+#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
+/* SA IPSEC mode Transport/Tunnel */
+#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
+#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
+/* SA AES Key Length */
+#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
+#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
+#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
+/* IV Source */
+#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
+#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
+
+struct cn10k_tx_sa_s {
+ u64 esn_en : 1; /* W0 */
+ u64 rsvd_w0_1_8 : 8;
+ u64 hw_ctx_off : 7;
+ u64 ctx_id : 16;
+ u64 rsvd_w0_32_47 : 16;
+ u64 ctx_push_size : 7;
+ u64 rsvd_w0_55 : 1;
+ u64 ctx_hdr_size : 2;
+ u64 aop_valid : 1;
+ u64 rsvd_w0_59 : 1;
+ u64 ctx_size : 4;
+ u64 w1; /* W1 */
+ u64 sa_valid : 1; /* W2 */
+ u64 sa_dir : 1;
+ u64 rsvd_w2_2_3 : 2;
+ u64 ipsec_mode : 1;
+ u64 ipsec_protocol : 1;
+ u64 aes_key_len : 2;
+ u64 enc_type : 3;
+ u64 rsvd_w2_11_19 : 9;
+ u64 iv_src : 2;
+ u64 rsvd_w2_22_31 : 10;
+ u64 rsvd_w2_32_63 : 32;
+ u64 w3; /* W3 */
+ u8 cipher_key[32]; /* W4 - W7 */
+ u32 rsvd_w8_0_31; /* W8 : IV */
+ u32 iv_gcm_salt;
+ u64 rsvd_w9_w30[22]; /* W9 - W30 */
+ u64 hw_ctx[6]; /* W31 - W36 */
+};
+
+/* CPT instruction parameter-1 */
+#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
+#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
+#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
+#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
+
+/* CPT instruction parameter-2 */
+#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
+#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
+
+/* CPT Instruction Structure */
+struct cpt_inst_s {
+ u64 nixtxl : 3; /* W0 */
+ u64 doneint : 1;
+ u64 rsvd_w0_4_15 : 12;
+ u64 dat_offset : 8;
+ u64 ext_param1 : 8;
+ u64 nixtx_offset : 20;
+ u64 rsvd_w0_52_63 : 12;
+ u64 res_addr; /* W1 */
+ u64 tag : 32; /* W2 */
+ u64 tt : 2;
+ u64 grp : 10;
+ u64 rsvd_w2_44_47 : 4;
+ u64 rvu_pf_func : 16;
+ u64 qord : 1; /* W3 */
+ u64 rsvd_w3_1_2 : 2;
+ u64 wqe_ptr : 61;
+ u64 dlen : 16; /* W4 */
+ u64 param2 : 16;
+ u64 param1 : 16;
+ u64 opcode_major : 8;
+ u64 opcode_minor : 8;
+ u64 dptr; /* W5 */
+ u64 rptr; /* W6 */
+ u64 cptr : 60; /* W7 */
+ u64 ctx_val : 1;
+ u64 egrp : 3;
+};
+
+/* CPT Instruction Result Structure */
+struct cpt_res_s {
+ u64 compcode : 7; /* W0 */
+ u64 doneint : 1;
+ u64 uc_compcode : 8;
+ u64 uc_info : 48;
+ u64 esn; /* W1 */
+};
+
+/* CPT SG structure */
+struct cpt_sg_s {
+ u64 seg1_size : 16;
+ u64 seg2_size : 16;
+ u64 seg3_size : 16;
+ u64 segs : 2;
+ u64 rsvd_63_50 : 14;
+};
+
+/* CPT LF_INPROG Register */
+#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
+#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
+#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
+
+/* CPT LF_Q_GRP_PTR Register */
+#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
+#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
+
+/* CPT LF_Q_SIZE Register */
+#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
+
+/* CPT LF CTX Flush Register */
+#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int cn10k_ipsec_init(struct net_device *netdev);
+void cn10k_ipsec_clean(struct otx2_nic *pf);
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
+bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset);
+bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size);
+#else
+static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
+{
+ return 0;
+}
+
+static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
+{
+}
+
+static inline __maybe_unused
+int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
+{
+ return 0;
+}
+
+static inline bool __maybe_unused
+otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ struct sk_buff *skb, int num_segs, int *offset)
+{
+ return true;
+}
+
+static inline bool __maybe_unused
+cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
+ struct otx2_snd_queue *sq, struct sk_buff *skb,
+ int num_segs, int size)
+{
+ return true;
+}
+#endif
+#endif // CN10K_IPSEC_H
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 6cc7a78968fc..f3b9daffaec3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
return "SA";
default:
return "Unknown";
- };
-
- return "Unknown";
+ }
}
static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 523ecb798a7a..2b49bfec7869 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -10,12 +10,19 @@
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
+#include <linux/dcbnl.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
+{
+ return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
+}
+
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
@@ -964,6 +971,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
+ /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
+ * SG of NIX and CPT are same in size. Allocate memory for CPT SG
+ * same as NIX SQE for base address alignment.
+ * Layout of a NIX SQE and CPT SG entry:
+ * -----------------------------
+ * | CPT Scatter Gather |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ * | NIX SQE |
+ * | (SQE SIZE) |
+ * | |
+ * -----------------------------
+ */
+ err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
+ sq->sqe_size * 2);
+ if (err)
+ return err;
+
+ err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
+ if (err)
+ return err;
+
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@@ -1722,18 +1752,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
-#ifdef CONFIG_DCB
- req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
- req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
-#else
- req->chan_cnt = 1;
- req->bpid_per_chan = 0;
-#endif
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
+{
+ struct nix_bp_cfg_req *req;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->chan_base = 0;
+ if (otx2_is_pfc_enabled(pfvf)) {
+ req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
+ req->bpid_per_chan = 1;
+ } else {
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+ }
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
+
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@@ -1947,3 +2002,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ const skb_frag_t *frag;
+ struct page *page;
+ int offset;
+
+ /* Crypto hardware need write permission for ipsec crypto offload */
+ if (unlikely(xfrm_offload(skb))) {
+ dir = DMA_BIDIRECTIONAL;
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ }
+
+ /* First segment is always skb->data */
+ if (!seg) {
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ *len = skb_headlen(skb);
+ } else {
+ frag = &skb_shinfo(skb)->frags[seg - 1];
+ page = skb_frag_page(frag);
+ offset = skb_frag_off(frag);
+ *len = skb_frag_size(frag);
+ }
+ return otx2_dma_map_page(pfvf, page, offset, *len, dir);
+}
+
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
+{
+ enum dma_data_direction dir = DMA_TO_DEVICE;
+ struct sk_buff *skb = NULL;
+ int seg;
+
+ skb = (struct sk_buff *)sg->skb;
+ if (unlikely(xfrm_offload(skb)))
+ dir = DMA_BIDIRECTIONAL;
+
+ for (seg = 0; seg < sg->num_segs; seg++) {
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
+ sg->size[seg], dir);
+ }
+ sg->num_segs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 566848663fea..65814e3dc93f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -30,6 +30,7 @@
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
+#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -40,6 +41,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
@@ -55,6 +57,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
+/* Number of segments per SG structure */
+#define MAX_SEGS_PER_SG 3
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -448,6 +453,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
#define OTX2_FLAG_PORT_UP BIT_ULL(19)
+#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@@ -499,9 +505,9 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
-#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
+#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
@@ -522,6 +528,9 @@ struct otx2_nic {
u16 rep_pf_map[RVU_MAX_REP];
u16 esw_mode;
#endif
+
+ /* Inline ipsec */
+ struct cn10k_ipsec ipsec;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -572,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev)
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -621,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
+ case BLKTYPE_CPT:
+ blkaddr = BLKADDR_CPT0;
+ break;
default:
blkaddr = BLKADDR_RVUM;
break;
@@ -985,6 +1006,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
+int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
@@ -1149,4 +1171,8 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
+
+dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
+ struct sk_buff *skb, int seg, int *len);
+void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 294fba58b670..f110dfa42360 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -435,6 +435,9 @@ process_pfc:
return err;
}
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pfvf, false);
+
/* Request Per channel Bpids */
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e310f99b1736..e1dde93e8af8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -26,6 +26,7 @@
#include "cn10k.h"
#include "qos.h"
#include <rvu_trace.h>
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1484,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
if (!sq->sqe)
continue;
qmem_free(pf->dev, sq->sqe);
+ qmem_free(pf->dev, sq->sqe_ring);
+ qmem_free(pf->dev, sq->cpt_resp);
qmem_free(pf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
@@ -1551,6 +1554,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (err)
goto err_free_npa_lf;
+ /* Default disable backpressure on NIX-CPT */
+ otx2_nix_cpt_config_bp(pf, false);
+
/* Enable backpressure for CGX mapped PF/VFs */
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
@@ -2273,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
+ if (changed & NETIF_F_HW_ESP)
+ return cn10k_ipsec_ethtool_init(netdev,
+ features & NETIF_F_HW_ESP);
+
return otx2_handle_ntuple_tc_features(netdev, features);
}
@@ -3162,10 +3172,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* reset CGX/RPM MAC stats */
otx2_reset_mac_stats(pf);
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_mcs_free;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_mcs_free;
+ goto err_ipsec_clean;
}
err = otx2_wq_init(pf);
@@ -3206,6 +3220,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(pf);
err_mcs_free:
cn10k_mcs_free(pf);
err_del_mcam_entries:
@@ -3403,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_ipsec_clean(pf);
cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 04bc06a80e23..224cef938927 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
+#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -26,12 +27,25 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
+DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
bool *need_xdp_flush);
+static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
+ struct sk_buff *skb)
+{
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
+ (sq->head * (sq->sqe_size * 2));
+ else
+ sq->sqe_base = sq->sqe->base;
+}
+
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
{
@@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i)
#endif
}
-static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
- struct sk_buff *skb, int seg, int *len)
-{
- const skb_frag_t *frag;
- struct page *page;
- int offset;
-
- /* First segment is always skb->data */
- if (!seg) {
- page = virt_to_page(skb->data);
- offset = offset_in_page(skb->data);
- *len = skb_headlen(skb);
- } else {
- frag = &skb_shinfo(skb)->frags[seg - 1];
- page = skb_frag_page(frag);
- offset = skb_frag_off(frag);
- *len = skb_frag_size(frag);
- }
- return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
-}
-
-static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
-{
- int seg;
-
- for (seg = 0; seg < sg->num_segs; seg++) {
- otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
- sg->size[seg], DMA_TO_DEVICE);
- }
- sg->num_segs = 0;
-}
-
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe)
@@ -625,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
-#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@@ -1161,6 +1142,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
+ bool ret;
/* Check if there is enough room between producer
* and consumer index.
@@ -1177,6 +1159,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
+
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@@ -1196,6 +1179,9 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
return true;
}
+ /* Set sqe base address */
+ otx2_sq_set_sqe_base(sq, skb);
+
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@@ -1208,7 +1194,13 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
- if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
+ else
+ ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
+
+ if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@@ -1217,11 +1209,15 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
sqe_hdr->sizem1 = (offset / 16) - 1;
+ if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
+ (xfrm_offload(skb)))
+ return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
+ offset);
+
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
-
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index e1db5f961877..d23810963fdb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -101,6 +101,9 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
+ /* SQE ring and CPT response queue for Inline IPSEC */
+ struct qmem *sqe_ring;
+ struct qmem *cpt_resp;
} ____cacheline_aligned_in_smp;
enum cq_type {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 839fc77c11b2..e926c6ce96cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@@ -693,10 +694,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->bus->number, n);
}
+ err = cn10k_ipsec_init(netdev);
+ if (err)
+ goto err_ptp_destroy;
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_ipsec_clean;
}
err = otx2_vf_wq_init(vf);
@@ -730,6 +735,8 @@ err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ipsec_clean:
+ cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@@ -782,6 +789,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 22ca6ee9665e..440a4c42b405 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -280,6 +280,7 @@ prestera_mac_select_pcs(struct phylink_config *config,
}
static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct prestera_port *port = container_of(pcs, struct prestera_port,
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 6c683a12d5aa..415d784de741 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -15,6 +15,7 @@
#include <linux/u64_stats_sync.h>
#include <net/dsa.h>
#include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
#define AIROHA_MAX_NUM_GDM_PORTS 1
@@ -23,8 +24,12 @@
#define AIROHA_MAX_NUM_XSI_RSTS 5
#define AIROHA_MAX_MTU 2000
#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
#define AIROHA_NUM_TX_RING 32
#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
#define AIROHA_FE_MC_MAX_VLAN_PORT 16
#define AIROHA_NUM_TX_IRQ 2
@@ -40,6 +45,9 @@
#define PSE_RSV_PAGES 128
#define PSE_QUEUE_RSV_PAGES 64
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
/* FE */
#define PSE_BASE 0x0100
#define CSR_IFC_BASE 0x0200
@@ -541,9 +549,24 @@
#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
#define REG_LMGR_INIT_CFG 0x1000
#define LMGR_INIT_START BIT(31)
#define LMGR_SRAM_MODE_MASK BIT(30)
@@ -565,13 +588,34 @@
#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
#define REG_TXWRR_MODE_CFG 0x1020
#define TWRR_WEIGHT_SCALE_MASK BIT(31)
#define TWRR_WEIGHT_BASE_MASK BIT(3)
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
#define REG_PSE_BUF_USAGE_CFG 0x1028
#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
#define REG_GLB_TRTCM_CFG 0x1080
#define GLB_TRTCM_EN_MASK BIT(31)
#define GLB_TRTCM_MODE_MASK BIT(30)
@@ -720,6 +764,40 @@ enum {
FE_PSE_PORT_DROP = 0xf,
};
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
struct airoha_queue_entry {
union {
void *buf;
@@ -810,6 +888,12 @@ struct airoha_gdm_port {
int id;
struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
};
struct airoha_eth {
@@ -1789,6 +1873,10 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
+ /* xmit ring drop default setting */
+ airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
+ TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
+
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
@@ -1955,6 +2043,27 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
}
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+ /* Tx-cpu transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ /* Tx-fwd transferred count */
+ airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+ airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+ CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+ CNTR_ALL_DSCP_RING_EN_MASK |
+ FIELD_PREP(CNTR_SRC_MASK, 1) |
+ FIELD_PREP(CNTR_CHAN_MASK, i));
+ }
+}
+
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
@@ -2005,6 +2114,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+ airoha_qdma_init_qos_stats(qdma);
return 0;
}
@@ -2138,17 +2248,14 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma)
if (!qdma->q_rx[i].ndesc)
continue;
- napi_disable(&qdma->q_rx[i].napi);
netif_napi_del(&qdma->q_rx[i].napi);
airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]);
if (qdma->q_rx[i].page_pool)
page_pool_destroy(qdma->q_rx[i].page_pool);
}
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- napi_disable(&qdma->q_tx_irq[i].napi);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
netif_napi_del(&qdma->q_tx_irq[i].napi);
- }
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -2173,6 +2280,21 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma)
}
}
+static void airoha_qdma_stop_napi(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ napi_disable(&qdma->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&qdma->q_rx[i].napi);
+ }
+}
+
static void airoha_update_hw_stats(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
@@ -2413,21 +2535,44 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int queue, channel;
+
+ /* For dsa device select QoS channel according to the dsa user port
+ * index, rely on port id otherwise. Select QoS queue based on the
+ * skb priority.
+ */
+ channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+ queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */
+ queue = channel * AIROHA_NUM_QOS_QUEUES + queue;
+
+ return queue < dev->num_tx_queues ? queue : 0;
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0 = 0, msg1, len = skb_headlen(skb);
- int i, qid = skb_get_queue_mapping(skb);
+ u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
u32 nr_frags = 1 + sinfo->nr_frags;
struct netdev_queue *txq;
struct airoha_queue *q;
void *data = skb->data;
+ int i, qid;
u16 index;
u8 fport;
+ qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
+ qid / AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
+ qid % AIROHA_NUM_QOS_QUEUES);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2597,13 +2742,399 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+ int channel, enum tx_sched_mode mode,
+ const u16 *weights, u8 n_weights)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+ airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+ TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+ for (i = 0; i < n_weights; i++) {
+ u32 status;
+ int err;
+
+ airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+ TWRR_RW_CMD_MASK |
+ FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+ FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+ FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+ err = read_poll_timeout(airoha_qdma_rr, status,
+ status & TWRR_RW_CMD_DONE,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+ true, port->qdma,
+ REG_TXWRR_WEIGHT_CFG);
+ if (err)
+ return err;
+ }
+
+ airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+ CHAN_QOS_MODE_MASK(channel),
+ mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+ return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+ int channel)
+{
+ static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+ int i, nstrict = 0, nwrr, qidx;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < p->bands; i++) {
+ if (!p->quanta[i])
+ nstrict++;
+ }
+
+ /* this configuration is not supported by the hw */
+ if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+ return -EINVAL;
+
+ /* EN7581 SoC supports fixed QoS band priority where WRR queues have
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+ nwrr = p->bands - nstrict;
+ qidx = nstrict && nwrr ? nstrict : 0;
+ for (i = 1; i <= p->bands; i++) {
+ if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
+ return -EINVAL;
+
+ qidx = i == nwrr ? 0 : qidx + 1;
+ }
+
+ for (i = 0; i < nwrr; i++)
+ w[i] = p->weights[nstrict + i];
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
+ else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+ mode = nstrict + 1;
+
+ return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+ ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+ int channel,
+ struct tc_ets_qopt_offload *opt)
+{
+ u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL(channel << 1));
+ u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+ REG_CNTR_VAL((channel << 1) + 1));
+ u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+ (fwd_tx_packets - port->fwd_tx_packets);
+ _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+ port->cpu_tx_packets = cpu_tx_packets;
+ port->fwd_tx_packets = fwd_tx_packets;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+ struct tc_ets_qopt_offload *opt)
+{
+ int channel;
+
+ if (opt->parent == TC_H_ROOT)
+ return -EINVAL;
+
+ channel = TC_H_MAJ(opt->handle) >> 16;
+ channel = channel % AIROHA_NUM_QOS_CHANNELS;
+
+ switch (opt->command) {
+ case TC_ETS_REPLACE:
+ return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+ case TC_ETS_DESTROY:
+ /* PRIO is default qdisc scheduler */
+ return airoha_qdma_set_tx_prio_sched(port, channel);
+ case TC_ETS_STATS:
+ return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+ u32 *val_low, u32 *val_high)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 val, config = FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+ if (read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr)))
+ return -ETIMEDOUT;
+
+ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
+ if (val_high)
+ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
+
+ return 0;
+}
+
+static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode, u32 val)
+{
+ u32 idx = QDMA_METER_IDX(channel), group = QDMA_METER_GROUP(channel);
+ u32 config = TRTCM_PARAM_RW_MASK |
+ FIELD_PREP(TRTCM_PARAM_TYPE_MASK, param) |
+ FIELD_PREP(TRTCM_METER_GROUP_MASK, group) |
+ FIELD_PREP(TRTCM_PARAM_INDEX_MASK, idx) |
+ FIELD_PREP(TRTCM_PARAM_RATE_TYPE_MASK, mode);
+
+ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
+ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
+
+ return read_poll_timeout(airoha_qdma_rr, val,
+ val & TRTCM_PARAM_RW_DONE_MASK,
+ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
+ qdma, REG_TRTCM_CFG_PARAM(addr));
+}
+
+static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_mode_type mode,
+ bool enable, u32 enable_mask)
+{
+ u32 val;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &val, NULL))
+ return -EINVAL;
+
+ val = enable ? val | enable_mask : val & ~enable_mask;
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma,
+ int channel, u32 addr,
+ enum trtcm_mode_type mode,
+ u32 rate_val, u32 bucket_size)
+{
+ u32 val, config, tick, unit, rate, rate_frac;
+ int err;
+
+ if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE,
+ mode, &config, NULL))
+ return -EINVAL;
+
+ val = airoha_qdma_rr(qdma, addr);
+ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
+ if (config & TRTCM_TICK_SEL)
+ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
+ if (!tick)
+ return -EINVAL;
+
+ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
+ if (!unit)
+ return -EINVAL;
+
+ rate = rate_val / unit;
+ rate_frac = rate_val % unit;
+ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
+ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
+ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
+
+ err = airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_TOKEN_RATE_MODE, mode, rate);
+ if (err)
+ return err;
+
+ val = max_t(u32, bucket_size, MIN_TOKEN_SIZE);
+ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
+
+ return airoha_qdma_set_trtcm_param(qdma, channel, addr,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ mode, val);
+}
+
+static int airoha_qdma_set_tx_rate_limit(struct airoha_gdm_port *port,
+ int channel, u32 rate,
+ u32 bucket_size)
+{
+ int i, err;
+
+ for (i = 0; i <= TRTCM_PEAK_MODE; i++) {
+ err = airoha_qdma_set_trtcm_config(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG, i,
+ !!rate, TRTCM_METER_MODE);
+ if (err)
+ return err;
+
+ err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel,
+ REG_EGRESS_TRTCM_CFG,
+ i, rate, bucket_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+ u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */
+ struct net_device *dev = port->dev;
+ int num_tx_queues = dev->real_num_tx_queues;
+ int err;
+
+ if (opt->parent_classid != TC_HTB_CLASSID_ROOT) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid");
+ return -EINVAL;
+ }
+
+ err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed configuring htb offload");
+ return err;
+ }
+
+ if (opt->command == TC_HTB_NODE_MODIFY)
+ return 0;
+
+ err = netif_set_real_num_tx_queues(dev, num_tx_queues + 1);
+ if (err) {
+ airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum);
+ NL_SET_ERR_MSG_MOD(opt->extack,
+ "failed setting real_num_tx_queues");
+ return err;
+ }
+
+ set_bit(channel, port->qos_sq_bmap);
+ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+}
+
+static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
+{
+ struct net_device *dev = port->dev;
+
+ netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1);
+ airoha_qdma_set_tx_rate_limit(port, queue + 1, 0, 0);
+ clear_bit(queue, port->qos_sq_bmap);
+}
+
+static int airoha_tc_htb_delete_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ airoha_tc_remove_htb_queue(port, channel);
+
+ return 0;
+}
+
+static int airoha_tc_htb_destroy(struct airoha_gdm_port *port)
+{
+ int q;
+
+ for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
+ airoha_tc_remove_htb_queue(port, q);
+
+ return 0;
+}
+
+static int airoha_tc_get_htb_get_leaf_queue(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS;
+
+ if (!test_bit(channel, port->qos_sq_bmap)) {
+ NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id");
+ return -EINVAL;
+ }
+
+ opt->qid = channel;
+
+ return 0;
+}
+
+static int airoha_tc_setup_qdisc_htb(struct airoha_gdm_port *port,
+ struct tc_htb_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_HTB_CREATE:
+ break;
+ case TC_HTB_DESTROY:
+ return airoha_tc_htb_destroy(port);
+ case TC_HTB_NODE_MODIFY:
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ return airoha_tc_htb_alloc_leaf_queue(port, opt);
+ case TC_HTB_LEAF_DEL:
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return airoha_tc_htb_delete_leaf_queue(port, opt);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ return airoha_tc_get_htb_get_leaf_queue(port, opt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_ETS:
+ return airoha_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_QDISC_HTB:
+ return airoha_tc_setup_qdisc_htb(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
.ndo_set_mac_address = airoha_dev_set_macaddr,
+ .ndo_setup_tc = airoha_dev_tc_setup,
};
static const struct ethtool_ops airoha_ethtool_ops = {
@@ -2640,7 +3171,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
- AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ AIROHA_NUM_NETDEV_TX_RINGS,
+ AIROHA_NUM_RX_RING);
if (!dev) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -2653,12 +3185,18 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
dev->watchdog_timeo = 5 * HZ;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
dev->features |= dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
+ /* reserve hw queues for HTB offloading */
+ err = netif_set_real_num_tx_queues(dev, AIROHA_NUM_TX_RING);
+ if (err)
+ return err;
+
err = of_get_ethdev_address(np, dev);
if (err) {
if (err == -EPROBE_DEFER)
@@ -2738,7 +3276,7 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_hw_init(pdev, eth);
if (err)
- goto error;
+ goto error_hw_cleanup;
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
@@ -2753,13 +3291,16 @@ static int airoha_probe(struct platform_device *pdev)
err = airoha_alloc_gdm_port(eth, np);
if (err) {
of_node_put(np);
- goto error;
+ goto error_napi_stop;
}
}
return 0;
-error:
+error_napi_stop:
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ airoha_qdma_stop_napi(&eth->qdma[i]);
+error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_hw_cleanup(&eth->qdma[i]);
@@ -2780,8 +3321,10 @@ static void airoha_remove(struct platform_device *pdev)
struct airoha_eth *eth = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ airoha_qdma_stop_napi(&eth->qdma[i]);
airoha_hw_cleanup(&eth->qdma[i]);
+ }
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index be3d0876c521..568bbe5f83f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
#
# Netdev basic
@@ -60,6 +60,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += en/tc/ct_fs_hmfs.o
endif
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -123,6 +124,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
steering/sws/dr_ste_v0.o \
steering/sws/dr_ste_v1.o \
steering/sws/dr_ste_v2.o \
+ steering/sws/dr_ste_v3.o \
steering/sws/dr_cmd.o \
steering/sws/dr_fw.o \
steering/sws/dr_action.o \
@@ -150,8 +152,9 @@ mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/cmd.o \
steering/hws/bwc.o \
steering/hws/debug.o \
steering/hws/vport.o \
- steering/hws/bwc_complex.o
-
+ steering/hws/bwc_complex.o \
+ steering/hws/fs_hws_pools.o \
+ steering/hws/fs_hws.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 9aed29fa4900..d6e736c1fb24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -292,7 +292,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
if (rule->dest_attr.type &
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
__entry->counter_id =
- rule->dest_attr.counter_id;
+ mlx5_fc_id(rule->dest_attr.counter);
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
index 62b3f7ff5562..e5b30801314b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h
@@ -48,4 +48,14 @@ mlx5_ct_fs_smfs_ops_get(void)
}
#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+#if IS_ENABLED(CONFIG_MLX5_HW_STEERING)
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void);
+#else
+static inline struct mlx5_ct_fs_ops *
+mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_MLX5_SW_STEERING) */
+
#endif /* __MLX5_EN_TC_CT_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
new file mode 100644
index 000000000000..a4263137fef5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "en_tc.h"
+#include "en/tc_ct.h"
+#include "en/tc_priv.h"
+#include "en/tc/ct_fs.h"
+#include "fs_core.h"
+#include "steering/hws/fs_hws_pools.h"
+#include "steering/hws/mlx5hws.h"
+#include "steering/hws/table.h"
+
+struct mlx5_ct_fs_hmfs_matcher {
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ refcount_t ref;
+};
+
+/* We need {ipv4, ipv6} x {tcp, udp, gre} matchers. */
+#define NUM_MATCHERS (2 * 3)
+
+struct mlx5_ct_fs_hmfs {
+ struct mlx5hws_table *ct_tbl;
+ struct mlx5hws_table *ct_nat_tbl;
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5hws_action *fwd_action;
+ struct mlx5hws_action *last_action;
+ struct mlx5hws_context *ctx;
+ struct mutex lock; /* Guards matchers */
+ struct mlx5_ct_fs_hmfs_matcher matchers[NUM_MATCHERS];
+ struct mlx5_ct_fs_hmfs_matcher matchers_nat[NUM_MATCHERS];
+};
+
+struct mlx5_ct_fs_hmfs_rule {
+ struct mlx5_ct_fs_rule fs_rule;
+ struct mlx5hws_bwc_rule *hws_bwc_rule;
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_fc *counter;
+};
+
+static u32 get_matcher_idx(bool ipv4, bool tcp, bool gre)
+{
+ return ipv4 * 3 + tcp * 2 + gre;
+}
+
+static int mlx5_ct_fs_hmfs_init(struct mlx5_ct_fs *fs, struct mlx5_flow_table *ct,
+ struct mlx5_flow_table *ct_nat, struct mlx5_flow_table *post_ct)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_table *ct_tbl, *ct_nat_tbl, *post_ct_tbl;
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ ct_tbl = ct->fs_hws_table.hws_table;
+ ct_nat_tbl = ct_nat->fs_hws_table.hws_table;
+ post_ct_tbl = post_ct->fs_hws_table.hws_table;
+ fs_hmfs->ct_nat = ct_nat;
+
+ if (!ct_tbl || !ct_nat_tbl || !post_ct_tbl) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to init, missing backing hws tables");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_dbg(fs->netdev, "using hmfs steering");
+
+ fs_hmfs->ct_tbl = ct_tbl;
+ fs_hmfs->ct_nat_tbl = ct_nat_tbl;
+ fs_hmfs->ctx = ct_tbl->ctx;
+ mutex_init(&fs_hmfs->lock);
+
+ fs_hmfs->fwd_action = mlx5hws_action_create_dest_table(ct_tbl->ctx, post_ct_tbl, flags);
+ if (!fs_hmfs->fwd_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create fwd action\n");
+ return -EINVAL;
+ }
+ fs_hmfs->last_action = mlx5hws_action_create_last(ct_tbl->ctx, flags);
+ if (!fs_hmfs->last_action) {
+ netdev_warn(fs->netdev, "ct_fs_hmfs: failed to create last action\n");
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ct_fs_hmfs_destroy(struct mlx5_ct_fs *fs)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ mlx5hws_action_destroy(fs_hmfs->last_action);
+ mlx5hws_action_destroy(fs_hmfs->fwd_action);
+}
+
+static struct mlx5hws_bwc_matcher *
+mlx5_ct_fs_hmfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5hws_table *tbl,
+ struct mlx5_flow_spec *spec, bool ipv4, bool tcp, bool gre)
+{
+ u8 match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
+ struct mlx5hws_match_parameters mask = {
+ .match_buf = spec->match_criteria,
+ .match_sz = sizeof(spec->match_criteria),
+ };
+ u32 priority = get_matcher_idx(ipv4, tcp, gre); /* Static priority based on params. */
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+
+ hws_bwc_matcher = mlx5hws_bwc_matcher_create(tbl, priority, match_criteria_enable, &mask);
+ if (!hws_bwc_matcher)
+ return ERR_PTR(-EINVAL);
+
+ return hws_bwc_matcher;
+}
+
+static struct mlx5_ct_fs_hmfs_matcher *
+mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ bool nat, bool ipv4, bool tcp, bool gre)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ u32 matcher_idx = get_matcher_idx(ipv4, tcp, gre);
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5hws_bwc_matcher *hws_bwc_matcher;
+ struct mlx5hws_table *tbl;
+
+ hmfs_matcher = nat ?
+ (fs_hmfs->matchers_nat + matcher_idx) :
+ (fs_hmfs->matchers + matcher_idx);
+
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ return hmfs_matcher;
+
+ mutex_lock(&fs_hmfs->lock);
+
+ /* Retry with lock, as the matcher might be already created by another cpu. */
+ if (refcount_inc_not_zero(&hmfs_matcher->ref))
+ goto out_unlock;
+
+ tbl = nat ? fs_hmfs->ct_nat_tbl : fs_hmfs->ct_tbl;
+
+ hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
+ if (IS_ERR(hws_bwc_matcher)) {
+ netdev_warn(fs->netdev,
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
+ nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
+
+ hmfs_matcher = ERR_CAST(hws_bwc_matcher);
+ goto out_unlock;
+ }
+
+ hmfs_matcher->hws_bwc_matcher = hws_bwc_matcher;
+ refcount_set(&hmfs_matcher->ref, 1);
+
+out_unlock:
+ mutex_unlock(&fs_hmfs->lock);
+ return hmfs_matcher;
+}
+
+static void
+mlx5_ct_fs_hmfs_matcher_put(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher)
+{
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+
+ if (!refcount_dec_and_mutex_lock(&hmfs_matcher->ref, &fs_hmfs->lock))
+ return;
+
+ mlx5hws_bwc_matcher_destroy(hmfs_matcher->hws_bwc_matcher);
+ mutex_unlock(&fs_hmfs->lock);
+}
+
+#define NUM_CT_HMFS_RULES 4
+
+static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+ struct mlx5_flow_attr *attr,
+ struct mlx5hws_rule_action *rule_actions)
+{
+ struct mlx5_fs_hws_action *mh_action = &attr->modify_hdr->fs_hws_action;
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+ rule_actions[1].modify_header.data = mh_action->mh_data->data;
+ }
+ rule_actions[1].action = mh_action->hws_action;
+ rule_actions[2].action = fs_hmfs->fwd_action;
+ rule_actions[3].action = fs_hmfs->last_action;
+}
+
+static struct mlx5_ct_fs_rule *
+mlx5_ct_fs_hmfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
+{
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ struct mlx5hws_match_parameters match_params = {
+ .match_buf = spec->match_value,
+ .match_sz = ARRAY_SIZE(spec->match_value),
+ };
+ struct mlx5_ct_fs_hmfs_matcher *hmfs_matcher;
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule;
+ bool nat, tcp, ipv4, gre;
+ int err;
+
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ hmfs_rule = kzalloc(sizeof(*hmfs_rule), GFP_KERNEL);
+ if (!hmfs_rule)
+ return ERR_PTR(-ENOMEM);
+
+ nat = (attr->ft == fs_hmfs->ct_nat);
+ ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
+ tcp = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_TCP;
+ gre = MLX5_GET(fte_match_param, spec->match_value,
+ outer_headers.ip_protocol) == IPPROTO_GRE;
+
+ hmfs_matcher = mlx5_ct_fs_hmfs_matcher_get(fs, spec, nat, ipv4, tcp, gre);
+ if (IS_ERR(hmfs_matcher)) {
+ err = PTR_ERR(hmfs_matcher);
+ goto err_free_rule;
+ }
+ hmfs_rule->hmfs_matcher = hmfs_matcher;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+ hmfs_rule->counter = attr->counter;
+
+ hmfs_rule->hws_bwc_rule =
+ mlx5hws_bwc_rule_create(hmfs_matcher->hws_bwc_matcher, &match_params,
+ spec->flow_context.flow_source, rule_actions);
+ if (!hmfs_rule->hws_bwc_rule) {
+ err = -EINVAL;
+ goto err_put_matcher;
+ }
+
+ return &hmfs_rule->fs_rule;
+
+err_put_matcher:
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_matcher);
+err_free_rule:
+ kfree(hmfs_rule);
+ return ERR_PTR(err);
+}
+
+static void mlx5_ct_fs_hmfs_ct_rule_del(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ mlx5hws_bwc_rule_destroy(hmfs_rule->hws_bwc_rule);
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ mlx5_ct_fs_hmfs_matcher_put(fs, hmfs_rule->hmfs_matcher);
+ kfree(hmfs_rule);
+}
+
+static int mlx5_ct_fs_hmfs_ct_rule_update(struct mlx5_ct_fs *fs, struct mlx5_ct_fs_rule *fs_rule,
+ struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_ct_fs_hmfs_rule *hmfs_rule = container_of(fs_rule,
+ struct mlx5_ct_fs_hmfs_rule,
+ fs_rule);
+ struct mlx5hws_rule_action rule_actions[NUM_CT_HMFS_RULES];
+ struct mlx5_ct_fs_hmfs *fs_hmfs = mlx5_ct_fs_priv(fs);
+ int err;
+
+ mlx5_ct_fs_hmfs_fill_rule_actions(fs_hmfs, attr, rule_actions);
+
+ err = mlx5hws_bwc_rule_action_update(hmfs_rule->hws_bwc_rule, rule_actions);
+ if (err) {
+ mlx5_fc_put_hws_action(attr->counter);
+ return err;
+ }
+
+ mlx5_fc_put_hws_action(hmfs_rule->counter);
+ hmfs_rule->counter = attr->counter;
+
+ return 0;
+}
+
+static struct mlx5_ct_fs_ops hmfs_ops = {
+ .ct_rule_add = mlx5_ct_fs_hmfs_ct_rule_add,
+ .ct_rule_del = mlx5_ct_fs_hmfs_ct_rule_del,
+ .ct_rule_update = mlx5_ct_fs_hmfs_ct_rule_update,
+
+ .init = mlx5_ct_fs_hmfs_init,
+ .destroy = mlx5_ct_fs_hmfs_destroy,
+
+ .priv_size = sizeof(struct mlx5_ct_fs_hmfs),
+};
+
+struct mlx5_ct_fs_ops *mlx5_ct_fs_hmfs_ops_get(void)
+{
+ return &hmfs_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 45737d039252..0c97c5899904 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -13,7 +13,6 @@
#define INIT_ERR_PREFIX "ct_fs_smfs init failed"
#define ct_dbg(fmt, args...)\
netdev_dbg(fs->netdev, "ct_fs_smfs debug: " fmt "\n", ##args)
-#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
struct mlx5_ct_fs_smfs_matcher {
struct mlx5dr_matcher *dr_matcher;
@@ -220,78 +219,6 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
mlx5_smfs_action_destroy(fs_smfs->fwd_action);
}
-static inline bool
-mlx5_tc_ct_valid_used_dissector_keys(const u64 used_keys)
-{
-#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
- const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
- DISS_BIT(META);
- const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS) | DISS_BIT(TCP);
- const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
- DISS_BIT(PORTS);
- const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
- const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
-
- return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
- used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
-}
-
-static bool
-mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *flow_rule)
-{
- struct flow_match_ipv4_addrs ipv4_addrs;
- struct flow_match_ipv6_addrs ipv6_addrs;
- struct flow_match_control control;
- struct flow_match_basic basic;
- struct flow_match_ports ports;
- struct flow_match_tcp tcp;
-
- if (!mlx5_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
- ct_dbg("rule uses unexpected dissectors (0x%016llx)",
- flow_rule->match.dissector->used_keys);
- return false;
- }
-
- flow_rule_match_basic(flow_rule, &basic);
- flow_rule_match_control(flow_rule, &control);
- flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
- flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
- if (basic.key->ip_proto != IPPROTO_GRE)
- flow_rule_match_ports(flow_rule, &ports);
- if (basic.key->ip_proto == IPPROTO_TCP)
- flow_rule_match_tcp(flow_rule, &tcp);
-
- if (basic.mask->n_proto != htons(0xFFFF) ||
- (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
- basic.mask->ip_proto != 0xFF ||
- (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
- basic.key->ip_proto != IPPROTO_GRE)) {
- ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
- ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
- basic.key->ip_proto, basic.mask->ip_proto);
- return false;
- }
-
- if (basic.key->ip_proto != IPPROTO_GRE &&
- (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
- ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
- ports.mask->src, ports.mask->dst);
- return false;
- }
-
- if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
- ct_dbg("rule uses unexpected tcp match (flags 0x%02x)", tcp.mask->flags);
- return false;
- }
-
- return true;
-}
-
static struct mlx5_ct_fs_rule *
mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr, struct flow_rule *flow_rule)
@@ -304,7 +231,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
int num_actions = 0, err;
bool nat, tcp, ipv4, gre;
- if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
+ if (!mlx5e_tc_ct_is_valid_flow_rule(fs->netdev, flow_rule))
return ERR_PTR(-EOPNOTSUPP);
smfs_rule = kzalloc(sizeof(*smfs_rule), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a84ebac2f011..a065e8fafb1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2065,10 +2065,19 @@ mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
int err;
- if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
- ct_dbg("Using SMFS ct flow steering provider");
- fs_ops = mlx5_ct_fs_smfs_ops_get();
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_HMFS) {
+ ct_dbg("Using HMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_hmfs_ops_get();
+ } else if (ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
+ ct_dbg("Using SMFS ct flow steering provider");
+ fs_ops = mlx5_ct_fs_smfs_ops_get();
+ }
+
+ if (!fs_ops) {
+ ct_dbg("Requested flow steering mode is not enabled.");
+ return -EOPNOTSUPP;
+ }
}
ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
@@ -2421,3 +2430,74 @@ out_inc_drop:
atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
return false;
}
+
+static bool mlx5e_tc_ct_valid_used_dissector_keys(const u64 used_keys)
+{
+#define DISS_BIT(name) BIT_ULL(FLOW_DISSECTOR_KEY_ ## name)
+ const u64 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) |
+ DISS_BIT(META);
+ const u64 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS) | DISS_BIT(TCP);
+ const u64 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) |
+ DISS_BIT(PORTS);
+ const u64 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
+ const u64 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
+
+ return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
+ used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
+}
+
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule)
+{
+ struct flow_match_ipv4_addrs ipv4_addrs;
+ struct flow_match_ipv6_addrs ipv6_addrs;
+ struct flow_match_control control;
+ struct flow_match_basic basic;
+ struct flow_match_ports ports;
+ struct flow_match_tcp tcp;
+
+ if (!mlx5e_tc_ct_valid_used_dissector_keys(flow_rule->match.dissector->used_keys)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected dissectors (0x%016llx)",
+ flow_rule->match.dissector->used_keys);
+ return false;
+ }
+
+ flow_rule_match_basic(flow_rule, &basic);
+ flow_rule_match_control(flow_rule, &control);
+ flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
+ flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
+ if (basic.key->ip_proto != IPPROTO_GRE)
+ flow_rule_match_ports(flow_rule, &ports);
+ if (basic.key->ip_proto == IPPROTO_TCP)
+ flow_rule_match_tcp(flow_rule, &tcp);
+
+ if (basic.mask->n_proto != htons(0xFFFF) ||
+ (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
+ basic.mask->ip_proto != 0xFF ||
+ (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
+ basic.key->ip_proto != IPPROTO_GRE)) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
+ ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
+ basic.key->ip_proto, basic.mask->ip_proto);
+ return false;
+ }
+
+ if (basic.key->ip_proto != IPPROTO_GRE &&
+ (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
+ netdev_dbg(dev, "ct_debug: rule uses ports match (src 0x%04x, dst 0x%04x)",
+ ports.mask->src, ports.mask->dst);
+ return false;
+ }
+
+ if (basic.key->ip_proto == IPPROTO_TCP && tcp.mask->flags != MLX5_CT_TCP_FLAGS_MASK) {
+ netdev_dbg(dev, "ct_debug: rule uses unexpected tcp match (flags 0x%02x)",
+ tcp.mask->flags);
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index b66c5f98067f..5e9dbdd4a5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -128,6 +128,9 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
+#define MLX5_CT_TCP_FLAGS_MASK cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16)
+bool mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev, struct flow_rule *flow_rule);
+
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
@@ -202,5 +205,12 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
return false;
}
+static inline bool
+mlx5e_tc_ct_is_valid_flow_rule(const struct net_device *dev,
+ struct flow_rule *flow_rule)
+{
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 1baf8933a07c..501709ac310f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -94,25 +94,14 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 esn, esn_msb;
u8 overlap;
- switch (x->xso.type) {
- case XFRM_DEV_OFFLOAD_PACKET:
- switch (x->xso.dir) {
- case XFRM_DEV_OFFLOAD_IN:
- esn = x->replay_esn->seq;
- esn_msb = x->replay_esn->seq_hi;
- break;
- case XFRM_DEV_OFFLOAD_OUT:
- esn = x->replay_esn->oseq;
- esn_msb = x->replay_esn->oseq_hi;
- break;
- default:
- WARN_ON(true);
- return false;
- }
- break;
- case XFRM_DEV_OFFLOAD_CRYPTO:
- /* Already parsed by XFRM core */
+ switch (x->xso.dir) {
+ case XFRM_DEV_OFFLOAD_IN:
esn = x->replay_esn->seq;
+ esn_msb = x->replay_esn->seq_hi;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ esn = x->replay_esn->oseq;
+ esn_msb = x->replay_esn->oseq_hi;
break;
default:
WARN_ON(true);
@@ -121,11 +110,15 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
overlap = sa_entry->esn_state.overlap;
- if (esn >= x->replay_esn->replay_window)
- seq_bottom = esn - x->replay_esn->replay_window + 1;
+ if (!x->replay_esn->replay_window) {
+ seq_bottom = esn;
+ } else {
+ if (esn >= x->replay_esn->replay_window)
+ seq_bottom = esn - x->replay_esn->replay_window + 1;
- if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
- esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
+ esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
+ }
if (sa_entry->esn_state.esn_msb)
sa_entry->esn_state.esn = esn;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 57861d34d46f..e7b64679f121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -194,7 +194,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -223,7 +223,7 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
}
sa_entry->ipsec_rule.trailer.fc = flow_counter;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -275,7 +275,7 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags = FLOW_ACT_NO_APPEND;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
@@ -348,7 +348,7 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(flow_counter);
+ dest.counter = flow_counter;
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -686,7 +686,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
rx->ft.status = ft;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+ dest[1].counter = rx->fc->cnt;
err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
goto err_add;
@@ -873,7 +873,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ dest.counter = tx->fc->cnt;
fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
@@ -1649,7 +1649,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1760,7 +1760,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(counter);
+ dest[1].counter = counter;
rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1833,7 +1833,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dest[dstn].counter = tx->fc->drop;
dstn++;
break;
default:
@@ -1911,7 +1911,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
case XFRM_POLICY_BLOCK:
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dest[dstn].counter = rx->fc->drop;
dstn++;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0ec17c276bdd..bd41b75d246e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3946,6 +3946,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 6b3b1afe8312..9ba99609999f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1282,7 +1282,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest[dest_ix].counter = attr->counter;
dest_ix++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 6b4c9ffad95b..7dd1dc3f77c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -135,7 +135,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
if (drop_counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter);
+ drop_ctr_dst.counter = drop_counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 093ed86a0acd..1c37098e09ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -260,7 +260,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (counter) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- drop_ctr_dst.counter_id = mlx5_fc_id(counter);
+ drop_ctr_dst.counter = counter;
dst = &drop_ctr_dst;
dest_num++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index c5ea1d1d2b03..5f647358a05c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -570,7 +570,8 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge,
struct mlx5_eswitch *esw)
{
@@ -628,7 +629,7 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[0].ft = bridge->egress_ft;
dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dests[1].counter_id = counter_id;
+ dests[1].counter = counter;
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
ARRAY_SIZE(dests));
@@ -639,17 +640,19 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
- return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, bridge->br_offloads->esw);
}
static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
- struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_fc *counter,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
@@ -671,7 +674,7 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
goto out;
}
- handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
+ handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter,
bridge, peer_esw);
out:
@@ -1385,10 +1388,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
- addr, vlan, mlx5_fc_id(counter),
- bridge) :
+ addr, vlan, counter, bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge);
+ counter, bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 06076dd9ec64..20cc01ceee8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -721,7 +721,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[i].counter_id = mlx5_fc_id(attr->counter);
+ dest[i].counter = attr->counter;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 676005854dad..ae20c061e0fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -217,7 +217,8 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
- underlay_qpn == 0)
+ underlay_qpn == 0 &&
+ (ft->type != FS_FT_RDMA_RX && ft->type != FS_FT_RDMA_TX))
return 0;
if (ft->type == FS_FT_FDB &&
@@ -718,7 +719,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
- dst->dest_attr.counter_id);
+ mlx5_fc_id(dst->dest_attr.counter));
in_dests += dst_cnt_size;
list_size++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ce999706d41..22dc23d991d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -658,6 +658,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ mlx5_fc_local_destroy(rule->dest_attr.counter);
goto out;
}
@@ -820,11 +821,17 @@ static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
return index;
fte->index = index + fg->start_index;
+retry_insert:
ret = rhashtable_insert_fast(&fg->ftes_hash,
&fte->hash,
rhash_fte);
- if (ret)
+ if (ret) {
+ if (ret == -EBUSY) {
+ cond_resched();
+ goto retry_insert;
+ }
goto err_ida_remove;
+ }
tree_add_node(&fte->node, &fg->node);
list_add_tail(&fte->node.list, &fg->node.children);
@@ -3529,35 +3536,42 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
char *value = val.vstr;
- int err = 0;
+ u8 eswitch_mode;
- if (!strcmp(value, "dmfs")) {
+ if (!strcmp(value, "dmfs"))
return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
+ if (!strcmp(value, "smfs")) {
+ bool smfs_cap = mlx5_fs_dr_is_supported(dev);
if (!smfs_cap) {
- err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack,
"Software managed steering is not supported by current device");
+ return -EOPNOTSUPP;
}
+ } else if (!strcmp(value, "hmfs")) {
+ bool hmfs_cap = mlx5_fs_hws_is_supported(dev);
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ if (!hmfs_cap) {
NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
+ "Hardware steering is not supported by current device");
+ return -EOPNOTSUPP;
}
} else {
NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\", \"hmfs\"]");
+ return -EINVAL;
}
- return err;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Moving to %s is not supported when eswitch offloads enabled.",
+ value);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
@@ -3569,6 +3583,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
if (!strcmp(ctx->val.vstr, "smfs"))
mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else if (!strcmp(ctx->val.vstr, "hmfs"))
+ mode = MLX5_FLOW_STEERING_MODE_HMFS;
else
mode = MLX5_FLOW_STEERING_MODE_DMFS;
dev->priv.steering->mode = mode;
@@ -3581,10 +3597,17 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ switch (dev->priv.steering->mode) {
+ case MLX5_FLOW_STEERING_MODE_SMFS:
strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
- else
+ break;
+ case MLX5_FLOW_STEERING_MODE_HMFS:
+ strscpy(ctx->val.vstr, "hmfs", sizeof(ctx->val.vstr));
+ break;
+ default:
strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
+ }
+
return 0;
}
@@ -3659,8 +3682,7 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
- MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support)) {
err = init_rdma_rx_root_ns(steering);
if (err)
goto err;
@@ -4004,6 +4026,8 @@ int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
cmds = mlx5_fs_cmd_get_dr_cmds();
+ else if (mode == MLX5_FLOW_STEERING_MODE_HMFS)
+ cmds = mlx5_fs_cmd_get_hws_cmds();
else
cmds = mlx5_fs_cmd_get_fw_cmds();
if (!cmds)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index bad2df0715ec..20837e526679 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -38,6 +38,7 @@
#include <linux/rhashtable.h>
#include <linux/llist.h>
#include <steering/sws/fs_dr.h>
+#include <steering/hws/fs_hws.h>
#define FDB_TC_MAX_CHAIN 3
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
@@ -64,6 +65,7 @@ struct mlx5_modify_hdr {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -74,6 +76,7 @@ struct mlx5_pkt_reformat {
enum mlx5_flow_resource_owner owner;
union {
struct mlx5_fs_dr_action fs_dr_action;
+ struct mlx5_fs_hws_action fs_hws_action;
u32 id;
};
};
@@ -126,7 +129,8 @@ enum fs_fte_status {
enum mlx5_flow_steering_mode {
MLX5_FLOW_STEERING_MODE_DMFS,
- MLX5_FLOW_STEERING_MODE_SMFS
+ MLX5_FLOW_STEERING_MODE_SMFS,
+ MLX5_FLOW_STEERING_MODE_HMFS,
};
enum mlx5_flow_steering_capabilty {
@@ -190,7 +194,10 @@ struct mlx5_flow_handle {
/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
struct fs_node node;
- struct mlx5_fs_dr_table fs_dr_table;
+ union {
+ struct mlx5_fs_dr_table fs_dr_table;
+ struct mlx5_fs_hws_table fs_hws_table;
+ };
u32 id;
u16 vport;
unsigned int max_fte;
@@ -247,7 +254,10 @@ struct fs_fte_dup {
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
- struct mlx5_fs_dr_rule fs_dr_rule;
+ union {
+ struct mlx5_fs_dr_rule fs_dr_rule;
+ struct mlx5_fs_hws_rule fs_hws_rule;
+ };
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
@@ -280,7 +290,10 @@ struct mlx5_flow_group_mask {
/* Type of children is fs_fte */
struct mlx5_flow_group {
struct fs_node node;
- struct mlx5_fs_dr_matcher fs_dr_matcher;
+ union {
+ struct mlx5_fs_dr_matcher fs_dr_matcher;
+ struct mlx5_fs_hws_matcher fs_hws_matcher;
+ };
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
@@ -293,7 +306,10 @@ struct mlx5_flow_group {
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
enum mlx5_flow_steering_mode mode;
- struct mlx5_fs_dr_domain fs_dr_domain;
+ union {
+ struct mlx5_fs_dr_domain fs_dr_domain;
+ struct mlx5_fs_hws_context fs_hws_context;
+ };
enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
@@ -303,6 +319,42 @@ struct mlx5_flow_root_namespace {
const struct mlx5_flow_cmds *cmds;
};
+enum mlx5_fc_type {
+ MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_LOCAL,
+};
+
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ u32 id;
+ bool aging;
+ enum mlx5_fc_type type;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_cache cache;
+ /* last{packets,bytes} are used for calculating deltas since last reading. */
+ u64 lastpackets;
+ u64 lastbytes;
+};
+
+struct mlx5_fc_bulk_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fc_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ u32 base_id;
+ struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fc fcs[];
+};
+
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 62d0c689796b..492775d3d193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "fs_core.h"
+#include "fs_pool.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
@@ -43,33 +44,6 @@
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- u32 id;
- bool aging;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_cache cache;
- /* last{packets,bytes} are used for calculating deltas since last reading. */
- u64 lastpackets;
- u64 lastbytes;
-};
-
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
struct mlx5_fc_stats {
struct xarray counters;
@@ -80,13 +54,13 @@ struct mlx5_fc_stats {
int bulk_query_len;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
+ struct mlx5_fs_pool fc_pool;
};
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
-static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
-static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+static void mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc);
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
@@ -186,6 +160,9 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = dev->priv.fc_stats;
+ if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
+ return;
+
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
@@ -435,15 +412,7 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval);
}
-/* Flow counter bluks */
-
-struct mlx5_fc_bulk {
- struct list_head pool_list;
- u32 base_id;
- int bulk_len;
- unsigned long *bitmask;
- struct mlx5_fc fcs[] __counted_by(bulk_len);
-};
+/* Flow counter bulks */
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
@@ -452,16 +421,16 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
- return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+ return counter->bulk->base_id;
}
-static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+static struct mlx5_fs_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev,
+ void *pool_ctx)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
- struct mlx5_fc_bulk *bulk;
- int err = -ENOMEM;
+ struct mlx5_fc_bulk *fc_bulk;
int bulk_len;
u32 base_id;
int i;
@@ -469,207 +438,141 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
- if (!bulk)
- goto err_alloc_bulk;
-
- bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
- GFP_KERNEL);
- if (!bulk->bitmask)
- goto err_alloc_bitmask;
+ fc_bulk = kvzalloc(struct_size(fc_bulk, fcs, bulk_len), GFP_KERNEL);
+ if (!fc_bulk)
+ return NULL;
- err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
- if (err)
- goto err_mlx5_cmd_bulk_alloc;
+ if (mlx5_fs_bulk_init(dev, &fc_bulk->fs_bulk, bulk_len))
+ goto fc_bulk_free;
- bulk->base_id = base_id;
- bulk->bulk_len = bulk_len;
- for (i = 0; i < bulk_len; i++) {
- mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
- set_bit(i, bulk->bitmask);
- }
+ if (mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id))
+ goto fs_bulk_cleanup;
+ fc_bulk->base_id = base_id;
+ for (i = 0; i < bulk_len; i++)
+ mlx5_fc_init(&fc_bulk->fcs[i], fc_bulk, base_id + i);
- return bulk;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+ return &fc_bulk->fs_bulk;
-err_mlx5_cmd_bulk_alloc:
- kvfree(bulk->bitmask);
-err_alloc_bitmask:
- kvfree(bulk);
-err_alloc_bulk:
- return ERR_PTR(err);
+fs_bulk_cleanup:
+ mlx5_fs_bulk_cleanup(&fc_bulk->fs_bulk);
+fc_bulk_free:
+ kvfree(fc_bulk);
+ return NULL;
}
static int
-mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
{
- if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ struct mlx5_fc_bulk *fc_bulk = container_of(fs_bulk,
+ struct mlx5_fc_bulk,
+ fs_bulk);
+
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
- mlx5_cmd_fc_free(dev, bulk->base_id);
- kvfree(bulk->bitmask);
- kvfree(bulk);
+ mlx5_cmd_fc_free(dev, fc_bulk->base_id);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(fc_bulk);
return 0;
}
-static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+static void mlx5_fc_pool_update_threshold(struct mlx5_fs_pool *fc_pool)
{
- int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
-
- if (free_fc_index >= bulk->bulk_len)
- return ERR_PTR(-ENOSPC);
-
- clear_bit(free_fc_index, bulk->bitmask);
- return &bulk->fcs[free_fc_index];
-}
-
-static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
-{
- int fc_index = fc->id - bulk->base_id;
-
- if (test_bit(fc_index, bulk->bitmask))
- return -EINVAL;
-
- set_bit(fc_index, bulk->bitmask);
- return 0;
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_units / MLX5_FC_POOL_USED_BUFF_RATIO);
}
/* Flow counters pool API */
-static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
-{
- fc_pool->dev = dev;
- mutex_init(&fc_pool->pool_lock);
- INIT_LIST_HEAD(&fc_pool->fully_used);
- INIT_LIST_HEAD(&fc_pool->partially_used);
- INIT_LIST_HEAD(&fc_pool->unused);
- fc_pool->available_fcs = 0;
- fc_pool->used_fcs = 0;
- fc_pool->threshold = 0;
-}
+static const struct mlx5_fs_pool_ops mlx5_fc_pool_ops = {
+ .bulk_destroy = mlx5_fc_bulk_destroy,
+ .bulk_create = mlx5_fc_bulk_create,
+ .update_threshold = mlx5_fc_pool_update_threshold,
+};
-static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+static void
+mlx5_fc_pool_init(struct mlx5_fs_pool *fc_pool, struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc_bulk *tmp;
-
- list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
- list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
- mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fs_pool_init(fc_pool, dev, &mlx5_fc_pool_ops, NULL);
}
-static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+static void mlx5_fc_pool_cleanup(struct mlx5_fs_pool *fc_pool)
{
- fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
- fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+ mlx5_fs_pool_cleanup(fc_pool);
}
-static struct mlx5_fc_bulk *
-mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fs_pool *fc_pool)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fc_bulk *fc_bulk;
+ int err;
- new_bulk = mlx5_fc_bulk_create(dev);
- if (!IS_ERR(new_bulk))
- fc_pool->available_fcs += new_bulk->bulk_len;
- mlx5_fc_pool_update_threshold(fc_pool);
- return new_bulk;
+ err = mlx5_fs_pool_acquire_index(fc_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ fc_bulk = container_of(pool_index.fs_bulk, struct mlx5_fc_bulk, fs_bulk);
+ return &fc_bulk->fcs[pool_index.index];
}
static void
-mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+mlx5_fc_pool_release_counter(struct mlx5_fs_pool *fc_pool, struct mlx5_fc *fc)
{
+ struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
struct mlx5_core_dev *dev = fc_pool->dev;
- fc_pool->available_fcs -= bulk->bulk_len;
- mlx5_fc_bulk_destroy(dev, bulk);
- mlx5_fc_pool_update_threshold(fc_pool);
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = fc->id - fc->bulk->base_id;
+ if (mlx5_fs_pool_release_index(fc_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
}
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
- struct list_head *next_list,
- bool move_non_full_bulk)
+/**
+ * mlx5_fc_local_create - Allocate mlx5_fc struct for a counter which
+ * was already acquired using its counter id and bulk data.
+ *
+ * @counter_id: counter acquired counter id
+ * @offset: counter offset from bulk base
+ * @bulk_size: counter's bulk size as was allocated
+ *
+ * Return: Pointer to mlx5_fc on success, ERR_PTR otherwise.
+ */
+struct mlx5_fc *
+mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
{
- struct mlx5_fc_bulk *bulk;
- struct mlx5_fc *fc;
-
- if (list_empty(src_list))
- return ERR_PTR(-ENODATA);
-
- bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
- fc = mlx5_fc_bulk_acquire_fc(bulk);
- if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
- list_move(&bulk->pool_list, next_list);
- return fc;
-}
+ struct mlx5_fc_bulk *fc_bulk;
+ struct mlx5_fc *counter;
-static struct mlx5_fc *
-mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
-{
- struct mlx5_fc_bulk *new_bulk;
- struct mlx5_fc *fc;
-
- mutex_lock(&fc_pool->pool_lock);
-
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
- &fc_pool->fully_used, false);
- if (IS_ERR(fc))
- fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
- &fc_pool->partially_used,
- true);
- if (IS_ERR(fc)) {
- new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
- if (IS_ERR(new_bulk)) {
- fc = ERR_CAST(new_bulk);
- goto out;
- }
- fc = mlx5_fc_bulk_acquire_fc(new_bulk);
- list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ kfree(counter);
+ return ERR_PTR(-ENOMEM);
}
- fc_pool->available_fcs--;
- fc_pool->used_fcs++;
-out:
- mutex_unlock(&fc_pool->pool_lock);
- return fc;
+ counter->type = MLX5_FC_TYPE_LOCAL;
+ counter->id = counter_id;
+ fc_bulk->base_id = counter_id - offset;
+ fc_bulk->fs_bulk.bulk_len = bulk_size;
+ counter->bulk = fc_bulk;
+ return counter;
}
+EXPORT_SYMBOL(mlx5_fc_local_create);
-static void
-mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- struct mlx5_core_dev *dev = fc_pool->dev;
- struct mlx5_fc_bulk *bulk = fc->bulk;
- int bulk_free_fcs_amount;
-
- mutex_lock(&fc_pool->pool_lock);
-
- if (mlx5_fc_bulk_release_fc(bulk, fc)) {
- mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
- goto unlock;
- }
-
- fc_pool->available_fcs++;
- fc_pool->used_fcs--;
-
- bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
- if (bulk_free_fcs_amount == 1)
- list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
- if (bulk_free_fcs_amount == bulk->bulk_len) {
- list_del(&bulk->pool_list);
- if (fc_pool->available_fcs > fc_pool->threshold)
- mlx5_fc_pool_free_bulk(fc_pool, bulk);
- else
- list_add(&bulk->pool_list, &fc_pool->unused);
- }
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
-unlock:
- mutex_unlock(&fc_pool->pool_lock);
+ kfree(counter->bulk);
+ kfree(counter);
}
+EXPORT_SYMBOL(mlx5_fc_local_destroy);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
new file mode 100644
index 000000000000..f6c226664602
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_pool.h"
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len)
+{
+ int i;
+
+ fs_bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fs_bulk->bitmask)
+ return -ENOMEM;
+
+ fs_bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++)
+ set_bit(i, fs_bulk->bitmask);
+
+ return 0;
+}
+
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk)
+{
+ kvfree(fs_bulk->bitmask);
+}
+
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static int mlx5_fs_bulk_acquire_index(struct mlx5_fs_bulk *fs_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ int free_index = find_first_bit(fs_bulk->bitmask, fs_bulk->bulk_len);
+
+ WARN_ON_ONCE(!pool_index || !fs_bulk);
+ if (free_index >= fs_bulk->bulk_len)
+ return -ENOSPC;
+
+ clear_bit(free_index, fs_bulk->bitmask);
+ pool_index->fs_bulk = fs_bulk;
+ pool_index->index = free_index;
+ return 0;
+}
+
+static int mlx5_fs_bulk_release_index(struct mlx5_fs_bulk *fs_bulk, int index)
+{
+ if (test_bit(index, fs_bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(index, fs_bulk->bitmask);
+ return 0;
+}
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx)
+{
+ WARN_ON_ONCE(!ops || !ops->bulk_destroy || !ops->bulk_create ||
+ !ops->update_threshold);
+ pool->dev = dev;
+ pool->pool_ctx = pool_ctx;
+ mutex_init(&pool->pool_lock);
+ INIT_LIST_HEAD(&pool->fully_used);
+ INIT_LIST_HEAD(&pool->partially_used);
+ INIT_LIST_HEAD(&pool->unused);
+ pool->available_units = 0;
+ pool->used_units = 0;
+ pool->threshold = 0;
+ pool->ops = ops;
+}
+
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool)
+{
+ struct mlx5_core_dev *dev = pool->dev;
+ struct mlx5_fs_bulk *bulk;
+ struct mlx5_fs_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &pool->fully_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->partially_used, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &pool->unused, pool_list)
+ pool->ops->bulk_destroy(dev, bulk);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_pool_alloc_new_bulk(struct mlx5_fs_pool *fs_pool)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+ struct mlx5_fs_bulk *new_bulk;
+
+ new_bulk = fs_pool->ops->bulk_create(dev, fs_pool->pool_ctx);
+ if (new_bulk)
+ fs_pool->available_units += new_bulk->bulk_len;
+ fs_pool->ops->update_threshold(fs_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fs_pool_free_bulk(struct mlx5_fs_pool *fs_pool, struct mlx5_fs_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fs_pool->dev;
+
+ fs_pool->available_units -= bulk->bulk_len;
+ fs_pool->ops->bulk_destroy(dev, bulk);
+ fs_pool->ops->update_threshold(fs_pool);
+}
+
+static int
+mlx5_fs_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *fs_bulk;
+ int err;
+
+ if (list_empty(src_list))
+ return -ENODATA;
+
+ fs_bulk = list_first_entry(src_list, struct mlx5_fs_bulk, pool_list);
+ err = mlx5_fs_bulk_acquire_index(fs_bulk, pool_index);
+ if (move_non_full_bulk || mlx5_fs_bulk_get_free_amount(fs_bulk) == 0)
+ list_move(&fs_bulk->pool_list, next_list);
+ return err;
+}
+
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *new_bulk;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->partially_used,
+ &fs_pool->fully_used, false,
+ pool_index);
+ if (err)
+ err = mlx5_fs_pool_acquire_from_list(&fs_pool->unused,
+ &fs_pool->partially_used,
+ true, pool_index);
+ if (err) {
+ new_bulk = mlx5_fs_pool_alloc_new_bulk(fs_pool);
+ if (!new_bulk) {
+ err = -ENOENT;
+ goto out;
+ }
+ err = mlx5_fs_bulk_acquire_index(new_bulk, pool_index);
+ WARN_ON_ONCE(err);
+ list_add(&new_bulk->pool_list, &fs_pool->partially_used);
+ }
+ fs_pool->available_units--;
+ fs_pool->used_units++;
+
+out:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
+
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index)
+{
+ struct mlx5_fs_bulk *bulk = pool_index->fs_bulk;
+ int bulk_free_amount;
+ int err;
+
+ mutex_lock(&fs_pool->pool_lock);
+
+ /* TBD would rather return void if there was no warn here in original code */
+ err = mlx5_fs_bulk_release_index(bulk, pool_index->index);
+ if (err)
+ goto unlock;
+
+ fs_pool->available_units++;
+ fs_pool->used_units--;
+
+ bulk_free_amount = mlx5_fs_bulk_get_free_amount(bulk);
+ if (bulk_free_amount == 1)
+ list_move_tail(&bulk->pool_list, &fs_pool->partially_used);
+ if (bulk_free_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fs_pool->available_units > fs_pool->threshold)
+ mlx5_fs_pool_free_bulk(fs_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fs_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fs_pool->pool_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
new file mode 100644
index 000000000000..f04ec3107498
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_POOL_H__
+#define __MLX5_FS_POOL_H__
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_fs_bulk {
+ struct list_head pool_list;
+ int bulk_len;
+ unsigned long *bitmask;
+};
+
+struct mlx5_fs_pool_index {
+ struct mlx5_fs_bulk *fs_bulk;
+ int index;
+};
+
+struct mlx5_fs_pool;
+
+struct mlx5_fs_pool_ops {
+ int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk);
+ struct mlx5_fs_bulk * (*bulk_create)(struct mlx5_core_dev *dev,
+ void *pool_ctx);
+ void (*update_threshold)(struct mlx5_fs_pool *pool);
+};
+
+struct mlx5_fs_pool {
+ struct mlx5_core_dev *dev;
+ void *pool_ctx;
+ const struct mlx5_fs_pool_ops *ops;
+ struct mutex pool_lock; /* protects pool lists */
+ struct list_head fully_used;
+ struct list_head partially_used;
+ struct list_head unused;
+ int available_units;
+ int used_units;
+ int threshold;
+};
+
+int mlx5_fs_bulk_init(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk,
+ int bulk_len);
+void mlx5_fs_bulk_cleanup(struct mlx5_fs_bulk *fs_bulk);
+int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
+
+void mlx5_fs_pool_init(struct mlx5_fs_pool *pool, struct mlx5_core_dev *dev,
+ const struct mlx5_fs_pool_ops *ops, void *pool_ctx);
+void mlx5_fs_pool_cleanup(struct mlx5_fs_pool *pool);
+int mlx5_fs_pool_acquire_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+int mlx5_fs_pool_release_index(struct mlx5_fs_pool *fs_pool,
+ struct mlx5_fs_pool_index *pool_index);
+
+#endif /* __MLX5_FS_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 76ad46bf477d..b253d1673398 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -281,6 +281,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_SHAMPO, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index f4b777d4e108..62b6faa4276a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -105,20 +105,20 @@ static int mapping_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool hash = false;
bool lag_active;
+ int i, idx = 0;
int num_ports;
- int i;
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, ports,
&num_ports);
hash = true;
} else {
- for (i = 0; i < ldev->ports; i++)
- ports[i] = ldev->v2p_map[i];
+ mlx5_ldev_for_each(i, 0, ldev)
+ ports[idx++] = ldev->v2p_map[i];
num_ports = ldev->ports;
}
}
@@ -144,11 +144,8 @@ static int members_show(struct seq_file *file, void *priv)
ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
+ mlx5_ldev_for_each(i, 0, ldev)
seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
- }
mutex_unlock(&ldev->lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 7f68468c2e75..cea5aa314f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -43,10 +43,6 @@
#include "mp.h"
#include "mpesw.h"
-enum {
- MLX5_LAG_EGRESS_PORT_1 = 1,
- MLX5_LAG_EGRESS_PORT_2,
-};
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -72,7 +68,7 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
int num_enabled;
int idx;
- mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev, enabled_ports,
&num_enabled);
for (idx = 0; idx < num_enabled; idx++)
active_port |= BIT_MASK(enabled_ports[idx]);
@@ -80,23 +76,30 @@ static u8 lag_active_port_bits(struct mlx5_lag *ldev)
return active_port;
}
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
- unsigned long flags)
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
+ int mode, unsigned long flags)
{
bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
&flags);
int port_sel_mode = get_port_sel_mode(mode, flags);
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
+ u8 *ports = ldev->v2p_map;
+ int idx0, idx1;
void *lag_ctx;
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
switch (port_sel_mode) {
case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
break;
case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
@@ -113,17 +116,23 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
return mlx5_cmd_exec_in(dev, create_lag, in);
}
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, struct mlx5_lag *ldev,
u8 *ports)
{
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+ int idx0, idx1;
+
+ idx0 = mlx5_lag_get_dev_index_by_seq(ldev, 0);
+ idx1 = mlx5_lag_get_dev_index_by_seq(ldev, 1);
+ if (idx0 < 0 || idx1 < 0)
+ return -EINVAL;
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[idx0]);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[idx1]);
return mlx5_cmd_exec_in(dev, modify_lag, in);
}
@@ -148,33 +157,31 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
+static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
- }
}
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled)
{
int i;
*num_enabled = 0;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev)
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
ports[(*num_enabled)++] = i;
- }
if (*num_enabled == 0)
- mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled);
+ mlx5_infer_tx_disabled(tracker, ldev, ports, num_enabled);
}
static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
@@ -192,7 +199,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
int j;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
- mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports,
+ mlx5_infer_tx_enabled(tracker, ldev, enabled_ports,
&num_enabled);
for (i = 0; i < num_enabled; i++) {
err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1);
@@ -203,7 +210,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
buf[written - 2] = 0;
mlx5_core_info(dev, "lag map active ports: %s\n", buf);
} else {
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
err = scnprintf(buf + written, 10,
@@ -286,13 +293,55 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].netdev == ndev)
return i;
return -ENOENT;
}
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return -ENOENT;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (num == seq)
+ return i;
+ num++;
+ }
+ return -ENOENT;
+}
+
+int mlx5_lag_num_devs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev) {
+ (void)i;
+ num++;
+ }
+ return num;
+}
+
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev)
+{
+ int i, num = 0;
+
+ if (!ldev)
+ return 0;
+
+ mlx5_ldev_for_each(i, 0, ldev)
+ if (ldev->pf[i].netdev)
+ num++;
+ return num;
+}
+
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_ROCE;
@@ -310,7 +359,7 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
* with mapping that points to active ports.
*/
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
- u8 num_ports,
+ struct mlx5_lag *ldev,
u8 buckets,
u8 *ports)
{
@@ -323,7 +372,7 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
int i;
int j;
- for (i = 0; i < num_ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
@@ -334,15 +383,16 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
/* Use native mapping by default where each port's buckets
* point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc
*/
- for (i = 0; i < num_ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < buckets; j++) {
idx = i * buckets + j;
- ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i;
+ ports[idx] = i + 1;
}
+ }
/* If all ports are disabled/enabled keep native mapping */
- if (enabled_ports_num == num_ports ||
- disabled_ports_num == num_ports)
+ if (enabled_ports_num == ldev->ports ||
+ disabled_ports_num == ldev->ports)
return;
/* Go over the disabled ports and for each assign a random active port */
@@ -358,7 +408,7 @@ static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->pf[i].has_drop)
return true;
return false;
@@ -368,7 +418,7 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (!ldev->pf[i].has_drop)
continue;
@@ -396,7 +446,7 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive)
return;
- mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
+ mlx5_infer_tx_disabled(tracker, ldev, disabled_ports, &num_disabled);
for (i = 0; i < num_disabled; i++) {
disabled_index = disabled_ports[i];
@@ -428,10 +478,15 @@ static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
u8 active_ports;
int ret;
+ if (idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[idx].dev;
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
ret = mlx5_lag_port_sel_modify(ldev, ports);
if (ret ||
@@ -442,7 +497,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
return mlx5_cmd_modify_active_port(dev0, active_ports);
}
- return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
+ return mlx5_cmd_modify_lag(dev0, ldev, ports);
}
static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev *dev)
@@ -450,7 +505,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
struct net_device *ndev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
- int i;
+ int i, last_idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
@@ -458,11 +513,15 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
if (!ldev)
goto unlock;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (ldev->tracker.netdev_state[i].tx_enabled)
ndev = ldev->pf[i].netdev;
- if (!ndev)
- ndev = ldev->pf[ldev->ports - 1].netdev;
+ if (!ndev) {
+ last_idx = mlx5_lag_get_dev_index_by_seq(ldev, ldev->ports - 1);
+ if (last_idx < 0)
+ goto unlock;
+ ndev = ldev->pf[last_idx].netdev;
+ }
if (ndev)
dev_hold(ndev);
@@ -476,16 +535,21 @@ unlock:
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {};
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
int idx;
int err;
int i;
int j;
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports);
+ if (first_idx < 0)
+ return;
+
+ dev0 = ldev->pf[first_idx].dev;
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ports);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ports[idx] == ldev->v2p_map[idx])
@@ -523,8 +587,13 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
unsigned long *flags)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev0 = ldev->pf[first_idx].dev;
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -544,11 +613,13 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
unsigned long *flags)
{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct lag_func *dev0;
- if (mode == MLX5_LAG_MODE_MPESW)
+ if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
return;
+ dev0 = &ldev->pf[first_idx];
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
if (ldev->ports > 2)
@@ -593,12 +664,18 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
+ int i, j;
int err;
- int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
+ mlx5_ldev_for_each(i, first_idx + 1, ldev) {
struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
@@ -608,9 +685,9 @@ static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
}
return 0;
err:
- for (; i > MLX5_LAG_P1; i--)
+ mlx5_ldev_for_each_reverse(j, i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
- ldev->pf[i].dev->priv.eswitch);
+ ldev->pf[j].dev->priv.eswitch);
return err;
}
@@ -620,16 +697,21 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
unsigned long flags)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
+ struct mlx5_core_dev *dev0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
if (tracker)
mlx5_lag_print_mapping(dev0, ldev, tracker, flags);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));
- err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);
+ err = mlx5_cmd_create_lag(dev0, ldev, mode, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -661,17 +743,22 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
enum mlx5_lag_mode mode,
bool shared_fdb)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev0;
unsigned long flags = 0;
int err;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags);
if (err)
return err;
if (mode != MLX5_LAG_MODE_MPESW) {
- mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map);
+ mlx5_infer_tx_affinity_mapping(tracker, ldev, ldev->buckets, ldev->v2p_map);
if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map);
@@ -709,20 +796,26 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
+ struct mlx5_eswitch *master_esw;
+ struct mlx5_core_dev *dev0;
int err;
int i;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev0 = ldev->pf[first_idx].dev;
+ master_esw = dev0->priv.eswitch;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
@@ -754,6 +847,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
@@ -761,30 +855,29 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
bool roce_support;
int i;
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].dev)
- return false;
+ if (first_idx < 0 || mlx5_lag_num_devs(ldev) != ldev->ports)
+ return false;
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[first_idx].dev;
mode = mlx5_eswitch_mode(dev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
#else
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
- roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
- for (i = 1; i < ldev->ports; i++)
+ roce_support = mlx5_get_roce_state(ldev->pf[first_idx].dev);
+ mlx5_ldev_for_each(i, first_idx + 1, ldev)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
@@ -795,10 +888,7 @@ void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -812,10 +902,7 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
- for (i = 0; i < ldev->ports; i++) {
- if (!ldev->pf[i].dev)
- continue;
-
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].dev->priv.flags &
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
continue;
@@ -828,11 +915,16 @@ void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
bool roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
roce_lag = __mlx5_lag_is_roce(ldev);
if (shared_fdb) {
@@ -842,7 +934,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
}
- for (i = 1; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, idx + 1, ldev)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
}
@@ -854,17 +946,21 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_lag_add_devices(ldev);
if (shared_fdb)
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
int i;
- for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ if (idx < 0)
+ return false;
+
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
dev = ldev->pf[i].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
@@ -876,7 +972,7 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false;
}
- dev = ldev->pf[MLX5_LAG_P1].dev;
+ dev = ldev->pf[idx].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_esw_offloads_devcom_is_ready(dev->priv.eswitch) &&
@@ -892,11 +988,11 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
bool roce_lag = true;
int i;
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
@@ -917,13 +1013,18 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct lag_tracker tracker = { };
+ struct mlx5_core_dev *dev0;
struct net_device *ndev;
bool do_bond, roce_lag;
int err;
int i;
+ if (idx < 0)
+ return;
+
+ dev0 = ldev->pf[idx].dev;
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
@@ -956,7 +1057,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, idx + 1, ldev) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
@@ -966,7 +1067,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
@@ -977,7 +1078,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
@@ -1010,12 +1111,9 @@ struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
int i;
mutex_lock(&ldev->lock);
- for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[i].dev) {
- devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
- break;
- }
- }
+ i = mlx5_get_next_ldev_func(ldev, 0);
+ if (i < MLX5_MAX_PORTS)
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
mutex_unlock(&ldev->lock);
return devcom;
}
@@ -1068,7 +1166,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
u8 bond_status = 0;
int num_slaves = 0;
int changed = 0;
- int idx;
+ int i, idx = -1;
if (!netif_is_lag_master(upper))
return 0;
@@ -1083,8 +1181,13 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
- idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx >= 0) {
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == ndev_tmp) {
+ idx++;
+ break;
+ }
+ }
+ if (i < MLX5_MAX_PORTS) {
slave = bond_slave_get_rcu(ndev_tmp);
if (slave)
has_inactive |= bond_is_slave_inactive(slave);
@@ -1234,15 +1337,12 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
}
static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = mlx5_get_dev_index(dev);
unsigned long flags;
- if (fn >= ldev->ports)
- return;
-
spin_lock_irqsave(&lag_lock, flags);
ldev->pf[fn].netdev = netdev;
ldev->tracker.netdev_state[fn].link_up = 0;
@@ -1257,7 +1357,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i;
spin_lock_irqsave(&lag_lock, flags);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL;
break;
@@ -1267,13 +1367,10 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
}
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+ struct mlx5_core_dev *dev)
{
unsigned int fn = mlx5_get_dev_index(dev);
- if (fn >= ldev->ports)
- return;
-
ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
}
@@ -1281,16 +1378,13 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- int i;
-
- for (i = 0; i < ldev->ports; i++)
- if (ldev->pf[i].dev == dev)
- break;
+ int fn;
- if (i == ldev->ports)
+ fn = mlx5_get_dev_index(dev);
+ if (ldev->pf[fn].dev != dev)
return;
- ldev->pf[i].dev = NULL;
+ ldev->pf[fn].dev = NULL;
dev->priv.lag = NULL;
}
@@ -1398,7 +1492,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
+ int num = 0;
ldev = mlx5_lag_dev(dev);
if (!ldev)
@@ -1406,17 +1500,33 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev);
-
- for (i = 0; i < ldev->ports; i++)
- if (!ldev->pf[i].netdev)
- break;
-
- if (i >= ldev->ports)
+ num = mlx5_lag_num_netdevs(ldev);
+ if (num >= ldev->ports)
set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0);
}
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= end_idx; i--)
+ if (ldev->pf[i].dev)
+ return i;
+ return -1;
+}
+
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ return i;
+ return MLX5_MAX_PORTS;
+}
+
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1467,12 +1577,13 @@ bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
unsigned long flags;
- bool res;
+ bool res = false;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_active(ldev) &&
- dev == ldev->pf[MLX5_LAG_P1].dev;
+ idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ res = ldev && __mlx5_lag_is_active(ldev) && idx >= 0 && dev == ldev->pf[idx].dev;
spin_unlock_irqrestore(&lag_lock, flags);
return res;
@@ -1555,7 +1666,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
@@ -1594,13 +1705,13 @@ struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int
if (!ldev)
goto unlock;
- if (*i == ldev->ports)
+ if (*i == MLX5_MAX_PORTS)
goto unlock;
- for (idx = *i; idx < ldev->ports; idx++)
+ mlx5_ldev_for_each(idx, *i, ldev)
if (ldev->pf[idx].dev != dev)
break;
- if (idx == ldev->ports) {
+ if (idx == MLX5_MAX_PORTS) {
*i = idx;
goto unlock;
}
@@ -1621,10 +1732,10 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev **mdev;
+ int ret = 0, i, j, idx = 0;
struct mlx5_lag *ldev;
unsigned long flags;
int num_ports;
- int ret, i, j;
void *out;
out = kvzalloc(outlen, GFP_KERNEL);
@@ -1643,8 +1754,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = ldev->ports;
- for (i = 0; i < ldev->ports; i++)
- mdev[i] = ldev->pf[i].dev;
+ mlx5_ldev_for_each(i, 0, ldev)
+ mdev[idx++] = ldev->pf[i].dev;
} else {
num_ports = 1;
mdev[MLX5_LAG_P1] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 50fcb1eee574..01cf72366947 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -103,7 +103,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
-void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
+void mlx5_infer_tx_enabled(struct lag_tracker *tracker, struct mlx5_lag *ldev,
u8 *ports, int *num_enabled);
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
@@ -119,9 +119,24 @@ static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ mlx5_get_dev_index(dev) >= MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
+#define mlx5_ldev_for_each(i, start_index, ldev) \
+ for (int tmp = start_index; tmp = mlx5_get_next_ldev_func(ldev, tmp), \
+ i = tmp, tmp < MLX5_MAX_PORTS; tmp++)
+
+#define mlx5_ldev_for_each_reverse(i, start_index, end_index, ldev) \
+ for (int tmp = start_index, tmp1 = end_index; \
+ tmp = mlx5_get_pre_ldev_func(ldev, tmp, tmp1), \
+ i = tmp, tmp >= tmp1; tmp--)
+
+int mlx5_get_pre_ldev_func(struct mlx5_lag *ldev, int start_idx, int end_idx);
+int mlx5_get_next_ldev_func(struct mlx5_lag *ldev, int start_idx);
+int mlx5_lag_get_dev_index_by_seq(struct mlx5_lag *ldev, int seq);
+int mlx5_lag_num_devs(struct mlx5_lag *ldev);
+int mlx5_lag_num_netdevs(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index b1aa494c76ba..aee17fcf3b36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -17,7 +17,10 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!mlx5_lag_is_ready(ldev))
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
+
+ if (idx0 < 0 || idx1 < 0 || !mlx5_lag_is_ready(ldev))
return false;
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
@@ -26,8 +29,8 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
return false;
- return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
- ldev->pf[MLX5_LAG_P2].dev);
+ return mlx5_esw_multipath_prereq(ldev->pf[idx0].dev,
+ ldev->pf[idx1].dev);
}
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
@@ -50,43 +53,45 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
enum mlx5_lag_port_affinity port)
{
+ int idx0 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ int idx1 = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P2);
struct lag_tracker tracker = {};
- if (!__mlx5_lag_is_multipath(ldev))
+ if (idx0 < 0 || idx1 < 0 || !__mlx5_lag_is_multipath(ldev))
return;
switch (port) {
case MLX5_LAG_NORMAL_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
case MLX5_LAG_P1_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P1].link_up = true;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P2].link_up = false;
+ tracker.netdev_state[idx0].tx_enabled = true;
+ tracker.netdev_state[idx0].link_up = true;
+ tracker.netdev_state[idx1].tx_enabled = false;
+ tracker.netdev_state[idx1].link_up = false;
break;
case MLX5_LAG_P2_AFFINITY:
- tracker.netdev_state[MLX5_LAG_P1].tx_enabled = false;
- tracker.netdev_state[MLX5_LAG_P1].link_up = false;
- tracker.netdev_state[MLX5_LAG_P2].tx_enabled = true;
- tracker.netdev_state[MLX5_LAG_P2].link_up = true;
+ tracker.netdev_state[idx0].tx_enabled = false;
+ tracker.netdev_state[idx0].link_up = false;
+ tracker.netdev_state[idx1].tx_enabled = true;
+ tracker.netdev_state[idx1].link_up = true;
break;
default:
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx0].dev,
"Invalid affinity port %d", port);
return;
}
- if (tracker.netdev_state[MLX5_LAG_P1].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P1].dev->priv.events,
+ if (tracker.netdev_state[idx0].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx0].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
- if (tracker.netdev_state[MLX5_LAG_P2].tx_enabled)
- mlx5_notifier_call_chain(ldev->pf[MLX5_LAG_P2].dev->priv.events,
+ if (tracker.netdev_state[idx1].tx_enabled)
+ mlx5_notifier_call_chain(ldev->pf[idx1].dev->priv.events,
MLX5_DEV_EVENT_PORT_AFFINITY,
(void *)0);
@@ -150,9 +155,14 @@ mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
+
+ if (idx < 0)
+ return;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -179,17 +189,19 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
if (nh_dev0 == nh_dev1) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ mlx5_core_warn(ldev->pf[idx].dev,
"Multipath offload doesn't support routes with multiple nexthops of the same device");
return;
}
if (!nh_dev1) {
if (__mlx5_lag_is_active(ldev)) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
-
- i++;
- mlx5_lag_set_port_affinity(ldev, i);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ dev_idx++;
+ if (ldev->pf[i].netdev == nh_dev0)
+ break;
+ }
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len);
}
@@ -214,6 +226,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
struct fib_info *fi)
{
struct lag_mp *mp = &ldev->lag_mp;
+ int i, dev_idx = 0;
/* Check the nh event is related to the route */
if (!mp->fib.mfi || mp->fib.mfi != fi)
@@ -221,11 +234,15 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
+ mlx5_ldev_for_each(i, 0, ldev) {
+ if (ldev->pf[i].netdev == fib_nh->fib_nh_dev)
+ break;
+ dev_idx++;
+ }
- if (i >= 0) {
- i = (i + 1) % 2 + 1; /* peer port */
- mlx5_lag_set_port_affinity(ldev, i);
+ if (dev_idx >= 0) {
+ dev_idx = (dev_idx + 1) % 2 + 1; /* peer port */
+ mlx5_lag_set_port_affinity(ldev, dev_idx);
}
} else if (event == FIB_EVENT_NH_ADD &&
fib_info_num_path(fi) == 2) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 571ea26edd0c..ffac0bd6c895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -15,7 +15,7 @@ static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
u32 pf_metadata;
int i;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = ldev->lag_mpesw.pf_metadata[i];
@@ -36,7 +36,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
u32 pf_metadata;
int i, err;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
esw = dev->priv.eswitch;
pf_metadata = mlx5_esw_match_metadata_alloc(esw);
@@ -52,7 +52,7 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
goto err_metadata;
}
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
dev = ldev->pf[i].dev;
mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
(void *)0);
@@ -68,13 +68,15 @@ err_metadata:
#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev0;
int err;
int i;
- if (ldev->mode != MLX5_LAG_MODE_NONE)
+ if (idx < 0 || ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
+ dev0 = ldev->pf[idx].dev;
if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
return -EOPNOTSUPP;
@@ -98,7 +100,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
@@ -112,7 +114,7 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- for (i = 0; i < ldev->ports; i++)
+ mlx5_ldev_for_each(i, 0, ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 39e80704b1c4..bde79cac33a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -39,15 +39,18 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer,
u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_namespace *ns;
- int err, i;
- int idx;
- int j;
+ struct mlx5_core_dev *dev;
+ int err, i, j, k, idx;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
ft_attr.max_fte = ldev->ports * ldev->buckets;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
@@ -74,7 +77,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
u8 affinity;
@@ -88,13 +91,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- do {
+ mlx5_ldev_for_each_reverse(k, i, 0, ldev) {
while (j--) {
- idx = i * ldev->buckets + j;
+ idx = k * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
}
j = ldev->buckets;
- } while (i--);
+ };
goto destroy_fg;
}
}
@@ -295,11 +298,16 @@ static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_definer *lag_definer;
+ struct mlx5_core_dev *dev;
u32 *match_definer_mask;
int format_id, err;
+ if (first_idx < 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = ldev->pf[first_idx].dev;
lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
if (!lag_definer)
return ERR_PTR(-ENOMEM);
@@ -341,12 +349,15 @@ free_lag_definer:
static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int idx;
- int i;
- int j;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
+ struct mlx5_core_dev *dev;
+ int idx, i, j;
+
+ if (first_idx < 0)
+ return;
- for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[first_idx].dev;
+ mlx5_ldev_for_each(i, first_idx, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
@@ -501,10 +512,15 @@ static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+ if (first_idx < 0)
+ return -EINVAL;
+
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
@@ -512,10 +528,15 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct ttc_params ttc_params = {};
+ struct mlx5_core_dev *dev;
+
+ if (first_idx < 0)
+ return -EINVAL;
+ dev = ldev->pf[first_idx].dev;
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
@@ -567,7 +588,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- for (i = 0; i < ldev->ports; i++) {
+ mlx5_ldev_for_each(i, 0, ldev) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
if (ldev->v2p_map[idx] == ports[idx])
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 4822d01123b4..d61a1a9297c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -322,17 +322,16 @@ static void mlx5_pps_out(struct work_struct *work)
}
}
-static void mlx5_timestamp_overflow(struct work_struct *work)
+static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info)
{
- struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- timer = container_of(dwork, struct mlx5_timer, overflow_work);
- clock = container_of(timer, struct mlx5_clock, timer);
+ clock = container_of(ptp_info, struct mlx5_clock, ptp_info);
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ timer = &clock->timer;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
@@ -343,7 +342,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_sequnlock_irqrestore(&clock->lock, flags);
out:
- schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+ return timer->overflow_period;
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
@@ -517,6 +516,7 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+ ptp_schedule_worker(clock->ptp, timer->overflow_period);
return 0;
}
@@ -852,6 +852,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
+ .do_aux_work = mlx5_timestamp_overflow,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
@@ -1052,12 +1053,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock)
do_div(ns, NSEC_PER_SEC / HZ);
timer->overflow_period = ns;
- INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
- if (timer->overflow_period)
- schedule_delayed_work(&timer->overflow_work, 0);
- else
+ if (!timer->overflow_period) {
+ timer->overflow_period = HZ;
mlx5_core_warn(mdev,
- "invalid overflow period, overflow_work is not scheduled\n");
+ "invalid overflow period, overflow_work is scheduled once per second\n");
+ }
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
@@ -1172,6 +1172,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
mlx5_eq_notifier_register(mdev, &clock->pps_nb);
+
+ if (clock->ptp)
+ ptp_schedule_worker(clock->ptp, 0);
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
@@ -1188,7 +1191,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
}
cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 4a078113e292..762d55ba9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -497,7 +497,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&dest, 0, sizeof(struct mlx5_flow_destination));
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ dest.counter = tx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
@@ -519,7 +519,7 @@ static int macsec_fs_tx_create(struct mlx5_macsec_fs *macsec_fs)
flow_act.flags = FLOW_ACT_NO_APPEND;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ dest.counter = tx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1200,7 +1200,7 @@ static int macsec_fs_rx_create_check_decap_rule(struct mlx5_macsec_fs *macsec_fs
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
roce_dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- roce_dest[dstn].counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ roce_dest[dstn].counter = rx_tables->check_rule_counter;
rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, roce_dest, dstn + 1);
if (IS_ERR(rule)) {
@@ -1592,7 +1592,7 @@ static int macsec_fs_rx_create(struct mlx5_macsec_fs *macsec_fs)
memset(&flow_act, 0, sizeof(flow_act));
dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ dest.counter = rx_tables->check_miss_rule_counter;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 220a9ac75c8b..ec956c4bcebd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -368,6 +368,10 @@ int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_ty
u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
+ if (WARN_ON(!dev->caps.hca[cap_type]))
+ /* this cap_type must be added to mlx5_hca_caps_alloc() */
+ return -EINVAL;
+
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
@@ -664,6 +668,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
ilog2(max_uc_list));
+ /* enable absolute native port num */
+ if (MLX5_CAP_GEN_MAX(dev, abs_native_port_num))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, abs_native_port_num, 1);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -941,9 +949,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
mlx5_pci_vsc_init(dev);
- err = pci_enable_ptm(pdev, NULL);
- if (err)
- mlx5_core_info(dev, "PTM is not supported by PCIe\n");
+ pci_enable_ptm(pdev, NULL);
return 0;
@@ -1788,6 +1794,7 @@ static const int types[] = {
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
+ MLX5_CAP_SHAMPO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e393391966e0..39a209b9b684 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -56,6 +56,8 @@ bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierar
return cap & TSAR_TYPE_CAP_MASK_ROUND_ROBIN;
case TSAR_ELEMENT_TSAR_TYPE_ETS:
return cap & TSAR_TYPE_CAP_MASK_ETS;
+ case TSAR_ELEMENT_TSAR_TYPE_TC_ARB:
+ return cap & TSAR_TYPE_CAP_MASK_TC_ARB;
}
return false;
@@ -87,6 +89,8 @@ bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hie
return cap & ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP:
return cap & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT:
+ return cap & ELEMENT_TYPE_CAP_MASK_RATE_LIMIT;
}
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index a897cdc60fdb..b5332c54d4fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -11,31 +11,29 @@
/* This is the longest supported action sequence for FDB table:
* DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
*/
-static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
- [MLX5HWS_TABLE_TYPE_FDB] = {
- BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
- BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
- BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
- BIT(MLX5HWS_ACTION_TYP_CTR),
- BIT(MLX5HWS_ACTION_TYP_TAG),
- BIT(MLX5HWS_ACTION_TYP_ASO_METER),
- BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
- BIT(MLX5HWS_ACTION_TYP_TBL) |
- BIT(MLX5HWS_ACTION_TYP_VPORT) |
- BIT(MLX5HWS_ACTION_TYP_DROP) |
- BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
- BIT(MLX5HWS_ACTION_TYP_RANGE) |
- BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
- BIT(MLX5HWS_ACTION_TYP_LAST),
- },
+static const u32 action_order_arr[MLX5HWS_ACTION_TYP_MAX] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
};
static const char * const mlx5hws_action_type_str[] = {
@@ -83,8 +81,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
int ret;
mutex_lock(&ctx->ctrl_lock);
- if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ if (ctx->common_res.shared_stc[stc_type]) {
+ ctx->common_res.shared_stc[stc_type]->refcount++;
mutex_unlock(&ctx->ctrl_lock);
return 0;
}
@@ -124,8 +122,8 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
goto free_shared_stc;
}
- ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
- ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+ ctx->common_res.shared_stc[stc_type] = shared_stc;
+ ctx->common_res.shared_stc[stc_type]->refcount = 1;
mutex_unlock(&ctx->ctrl_lock);
@@ -178,16 +176,16 @@ static void hws_action_put_shared_stc(struct mlx5hws_action *action,
}
mutex_lock(&ctx->ctrl_lock);
- if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ if (--ctx->common_res.shared_stc[stc_type]->refcount) {
mutex_unlock(&ctx->ctrl_lock);
return;
}
- shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+ shared_stc = ctx->common_res.shared_stc[stc_type];
mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
kfree(shared_stc);
- ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ ctx->common_res.shared_stc[stc_type] = NULL;
mutex_unlock(&ctx->ctrl_lock);
}
@@ -206,10 +204,10 @@ bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
enum mlx5hws_action_type *user_actions,
enum mlx5hws_table_type table_type)
{
- const u32 *order_arr = action_order_arr[table_type];
+ const u32 *order_arr = action_order_arr;
+ bool valid_combo;
u8 order_idx = 0;
u8 user_idx = 0;
- bool valid_combo;
if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
mlx5hws_err(ctx, "Invalid table_type %d", table_type);
@@ -321,8 +319,8 @@ int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
__must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
bool use_fixup;
u32 obj_0_id;
int ret;
@@ -387,8 +385,8 @@ void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
struct mlx5hws_pool_chunk *stc)
__must_hold(&ctx->ctrl_lock)
{
- struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
u32 obj_id;
/* Modify the STC not to point to an object */
@@ -473,6 +471,7 @@ static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
break;
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
attr->dest_table_id = obj_id;
@@ -561,7 +560,7 @@ hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto out_err;
}
@@ -585,7 +584,7 @@ hws_action_destroy_stcs(struct mlx5hws_action *action)
if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
mutex_unlock(&ctx->ctrl_lock);
}
@@ -1639,8 +1638,8 @@ hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
- default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ stc_pool = ctx->stc_pool;
+ default_stc = ctx->common_res.default_stc;
obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
rtc_attr.stc_base = obj_id;
@@ -1731,7 +1730,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
ste_attr.used_id_rtc_0 = &used_rtc_0_id;
ste_attr.used_id_rtc_1 = &used_rtc_1_id;
- common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+ common_res = &ctx->common_res;
/* init an empty match STE which will always hit */
ste_attr.wqe_ctrl = &wqe_ctrl;
@@ -1750,7 +1749,7 @@ hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
- htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+ htonl(hit_ft_action->stc.offset);
wqe_data_arr = (__force __be32 *)&range_wqe_data;
@@ -1843,7 +1842,7 @@ mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
- &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ &action->stc);
if (ret)
goto error_unlock;
@@ -1875,7 +1874,50 @@ struct mlx5hws_action *
mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
u32 sampler_id, u32 flags)
{
- mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_cmd_set_fte_dest dest;
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (flags != (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ mlx5hws_err(ctx, "Unsupported flags for flow sampler\n");
+ return NULL;
+ }
+
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ dest.destination_id = sampler_id;
+
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ return NULL;
+
+ action = hws_action_create_generic(ctx, flags,
+ MLX5HWS_ACTION_TYP_SAMPLER);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->flow_sampler.fw_island = fw_island;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
return NULL;
}
@@ -1914,6 +1956,11 @@ static void hws_action_destroy_hws(struct mlx5hws_action *action)
}
kfree(action->dest_array.dest_list);
break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev,
+ action->flow_sampler.fw_island);
+ break;
case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5HWS_ACTION_TYP_MODIFY_HDR:
shared_arg = false;
@@ -1970,8 +2017,8 @@ __must_hold(&ctx->ctrl_lock)
struct mlx5hws_action_default_stc *default_stc;
int ret;
- if (ctx->common_res[tbl_type].default_stc) {
- ctx->common_res[tbl_type].default_stc->refcount++;
+ if (ctx->common_res.default_stc) {
+ ctx->common_res.default_stc->refcount++;
return 0;
}
@@ -2023,8 +2070,8 @@ __must_hold(&ctx->ctrl_lock)
goto free_nop_dw7;
}
- ctx->common_res[tbl_type].default_stc = default_stc;
- ctx->common_res[tbl_type].default_stc->refcount++;
+ ctx->common_res.default_stc = default_stc;
+ ctx->common_res.default_stc->refcount++;
return 0;
@@ -2046,9 +2093,7 @@ __must_hold(&ctx->ctrl_lock)
{
struct mlx5hws_action_default_stc *default_stc;
- default_stc = ctx->common_res[tbl_type].default_stc;
-
- default_stc = ctx->common_res[tbl_type].default_stc;
+ default_stc = ctx->common_res.default_stc;
if (--default_stc->refcount)
return;
@@ -2058,7 +2103,7 @@ __must_hold(&ctx->ctrl_lock)
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
kfree(default_stc);
- ctx->common_res[tbl_type].default_stc = NULL;
+ ctx->common_res.default_stc = NULL;
}
static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
@@ -2150,8 +2195,7 @@ hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
{
struct mlx5hws_action *action = apply->rule_action[action_idx].action;
- apply->wqe_ctrl->stc_ix[stc_idx] =
- htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[stc_idx] = htonl(action->stc.offset);
}
static void
@@ -2181,7 +2225,7 @@ hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
rule_action = &apply->rule_action[setter->idx_double];
action = rule_action->action;
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2240,7 +2284,7 @@ hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2272,7 +2316,7 @@ hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
- stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ stc_idx = htonl(action->stc.offset);
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
@@ -2434,6 +2478,7 @@ int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
case MLX5HWS_ACTION_TYP_DROP:
case MLX5HWS_ACTION_TYP_TBL:
case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_SAMPLER:
case MLX5HWS_ACTION_TYP_VPORT:
case MLX5HWS_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
index e8f562c31826..64b76075f7f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h
@@ -70,12 +70,12 @@ struct mlx5hws_action_default_stc {
struct mlx5hws_pool_chunk nop_dw6;
struct mlx5hws_pool_chunk nop_dw7;
struct mlx5hws_pool_chunk default_hit;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_action_shared_stc {
struct mlx5hws_pool_chunk stc_chunk;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_actions_apply_data {
@@ -124,7 +124,7 @@ struct mlx5hws_action {
struct mlx5hws_context *ctx;
union {
struct {
- struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool_chunk stc;
union {
struct {
u32 pat_id;
@@ -166,6 +166,9 @@ struct mlx5hws_action {
struct mlx5hws_cmd_set_fte_dest *dest_list;
} dest_array;
struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ } flow_sampler;
+ struct {
u8 type;
u8 start_anchor;
u8 end_anchor;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index baacf662c0ab..3dbd4efa21a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -152,6 +152,8 @@ mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
if (!bwc_matcher)
return NULL;
+ atomic_set(&bwc_matcher->num_of_rules, 0);
+
/* Check if the required match params can be all matched
* in single STE, otherwise complex matcher is needed.
*/
@@ -199,10 +201,12 @@ int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (bwc_matcher->num_of_rules)
+ u32 num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
+
+ if (num_of_rules)
mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
"BWC matcher destroy: matcher still has %d rules\n",
- bwc_matcher->num_of_rules);
+ num_of_rules);
mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
@@ -215,6 +219,8 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
u32 *pending_rules,
bool drain)
{
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
@@ -250,6 +256,11 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
}
got_comp = !!ret;
+
+ if (unlikely(!got_comp && time_after(jiffies, timeout))) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d - TIMEOUT\n", queue_id);
+ return -ETIMEDOUT;
+ }
}
return err;
@@ -309,7 +320,7 @@ static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules++;
+ atomic_inc(&bwc_matcher->num_of_rules);
bwc_rule->bwc_queue_idx = idx;
list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
}
@@ -318,7 +329,7 @@ static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
{
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- bwc_matcher->num_of_rules--;
+ atomic_dec(&bwc_matcher->num_of_rules);
list_del_init(&bwc_rule->list_node);
}
@@ -334,22 +345,21 @@ hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_rule_attr *rule_attr)
{
struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_flow_op_result completion;
+ u32 expected_completions = 1;
int ret;
ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
if (unlikely(ret))
return ret;
- do {
- ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
- } while (ret != 1);
-
- if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
- (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
- bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
- mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
- completion.status, bwc_rule->rule->status);
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING)) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: rule status %d\n",
+ bwc_rule->rule->status);
return -EINVAL;
}
@@ -458,8 +468,22 @@ hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
- return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
- caps->ste_alloc_log_max - 1;
+ /* check the match RTC size */
+ if ((bwc_matcher->size_log +
+ MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP) >
+ (caps->ste_alloc_log_max - 1))
+ return true;
+
+ /* check the action RTC size */
+ if ((bwc_matcher->size_log +
+ MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP +
+ ilog2(roundup_pow_of_two(bwc_matcher->matcher->action_ste.max_stes)) +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT) >
+ (caps->ste_alloc_log_max - 1))
+ return true;
+
+ return false;
}
static bool
@@ -615,8 +639,12 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
&pending_rules[i], false);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
goto free_bwc_rules;
+ }
}
}
} while (!all_done);
@@ -629,8 +657,11 @@ static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_match
mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
ret = hws_bwc_queue_poll(ctx, queue_id,
&pending_rules[i], true);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n", ret);
goto free_bwc_rules;
+ }
}
}
@@ -704,7 +735,8 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* Need to check again if we really need rehash.
* If the reason for rehash was size, but not any more - skip rehash.
*/
- if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher,
+ atomic_read(&bwc_matcher->num_of_rules)))
return 0;
/* Now we're done all the checking - do the rehash:
@@ -797,7 +829,7 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
}
/* check if number of rules require rehash */
- num_of_rules = bwc_matcher->num_of_rules;
+ num_of_rules = atomic_read(&bwc_matcher->num_of_rules);
if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
mutex_unlock(queue_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index 0b745968e21e..f9f569131dde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -8,10 +8,18 @@
#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
-#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+/* Max number of AT attach operations for the same matcher.
+ * When the limit is reached, next attempt to attach new AT
+ * will result in creation of a new matcher and moving all
+ * the rules to this matcher.
+ */
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 8
#define MLX5HWS_BWC_MAX_ACTS 16
+#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+
struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
@@ -19,7 +27,7 @@ struct mlx5hws_bwc_matcher {
u8 num_of_at;
u16 priority;
u8 size_log;
- u32 num_of_rules; /* atomically accessed */
+ atomic_t num_of_rules;
struct list_head *rules;
};
@@ -60,9 +68,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index c00c138c3366..487e75476b0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -257,6 +257,12 @@ int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
dest->ext_reformat_id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ MLX5_SET(dest_format, in_dests,
+ destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -359,7 +365,7 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
ft_attr->type = fw_ft_type;
ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
- default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ default_miss_tbl = ctx->common_res.default_miss->ft_id;
if (!default_miss_tbl) {
pr_warn("HWS: no flow table ID for default miss\n");
return;
@@ -622,12 +628,12 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
u32 pd,
u32 *arg_id)
{
+ u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
void *attr;
int ret;
- attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
MLX5_SET(general_obj_in_cmd_hdr,
attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr,
@@ -635,8 +641,8 @@ int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
MLX5_SET(general_obj_in_cmd_hdr,
attr, op_param.create.log_obj_range, log_obj_range);
- attr = MLX5_ADDR_OF(create_arg_in, in, arg);
- MLX5_SET(arg, attr, access_pd, pd);
+ attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+ MLX5_SET(modify_header_arg, attr, access_pd, pd);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (ret) {
@@ -812,7 +818,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_packet_reformat_create_attr *attr,
u32 *reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {0};
size_t insz, cmd_data_sz, cmd_total_sz;
void *prctx;
void *pdata;
@@ -845,7 +851,7 @@ int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
goto out;
}
- *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+ *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
out:
kfree(in);
return ret;
@@ -854,13 +860,13 @@ out:
int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
u32 reformat_id)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {0};
int ret;
- MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(dealloc_packet_reformat_in, in,
+ MLX5_SET(dealloc_packet_reformat_context_in, in,
packet_reformat_id, reformat_id);
ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
@@ -889,73 +895,6 @@ int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
return ret;
}
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
-{
- u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
- void *key;
- int ret;
-
- MLX5_SET(allow_other_vhca_access_in,
- in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_type_to_be_accessed, attr->obj_type);
- MLX5_SET(allow_other_vhca_access_in,
- in, object_id_to_be_accessed, attr->obj_id);
-
- key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
- memcpy(key, attr->access_key, sizeof(attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret)
- mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
-
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id)
-{
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
- void *attr;
- void *key;
- int ret;
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr,
- attr, obj_type, alias_attr->obj_type);
- MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
-
- attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
- MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
- MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
-
- key = MLX5_ADDR_OF(alias_context, attr, access_key);
- memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
-
- ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (ret) {
- mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
- goto out;
- }
-
- *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-out:
- return ret;
-}
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id)
-{
- return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
-}
-
int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_generate_wqe_attr *attr,
struct mlx5_cqe64 *ret_cqe)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 434f62b0904e..610c63d81ad9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -63,7 +63,7 @@ struct mlx5hws_cmd_forward_tbl {
u8 type;
u32 ft_id;
u32 fg_id;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
};
struct mlx5hws_cmd_rtc_create_attr {
@@ -334,14 +334,6 @@ mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_forward_tbl *tbl);
-int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
- u32 *obj_id);
-
-int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
- u16 obj_type,
- u32 obj_id);
-
int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
@@ -352,9 +344,6 @@ void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
enum mlx5hws_table_type type,
struct mlx5hws_cmd_ft_modify_attr *ft_attr);
-int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
- struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
-
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
index fd48b05e91e0..9cda2774fd64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c
@@ -23,7 +23,6 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
struct mlx5hws_pool_attr pool_attr = {0};
u8 max_log_sz;
int ret;
- int i;
ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
if (ret)
@@ -39,23 +38,17 @@ static int hws_context_pools_init(struct mlx5hws_context *ctx)
max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- pool_attr.table_type = i;
- ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
- if (!ctx->stc_pool[i]) {
- mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
- ret = -ENOMEM;
- goto free_stc_pools;
- }
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ ctx->stc_pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool\n");
+ ret = -ENOMEM;
+ goto uninit_cache;
}
return 0;
-free_stc_pools:
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
-
+uninit_cache:
mlx5hws_definer_uninit_cache(ctx->definer_cache);
uninit_pat_cache:
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -64,12 +57,8 @@ uninit_pat_cache:
static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
{
- int i;
-
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- if (ctx->stc_pool[i])
- mlx5hws_pool_destroy(ctx->stc_pool[i]);
- }
+ if (ctx->stc_pool)
+ mlx5hws_pool_destroy(ctx->stc_pool);
mlx5hws_definer_uninit_cache(ctx->definer_cache);
mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
@@ -161,8 +150,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
index 47f5cc8de73f..38c3647444ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h
@@ -8,6 +8,7 @@ enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -37,8 +38,8 @@ struct mlx5hws_context {
struct mlx5_core_dev *mdev;
struct mlx5hws_cmd_query_caps *caps;
u32 pd_num;
- struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
- struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pool *stc_pool;
+ struct mlx5hws_context_common_res common_res;
struct mlx5hws_pattern_cache *pattern_cache;
struct mlx5hws_definer_cache *definer_cache;
struct mutex ctrl_lock; /* control lock to protect the whole context */
@@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
index 5b200b4bc1a8..696275fd0ce2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c
@@ -148,8 +148,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
matcher->match_ste.rtc_1_id,
(int)ste_1_id);
- ste = &matcher->action_ste[0].ste;
- ste_pool = matcher->action_ste[0].pool;
+ ste = &matcher->action_ste.ste;
+ ste_pool = matcher->action_ste.pool;
if (ste_pool) {
ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
@@ -171,10 +171,8 @@ static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *ma
return ret;
seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
- matcher->action_ste[0].rtc_0_id,
- (int)ste_0_id,
- matcher->action_ste[0].rtc_1_id,
- (int)ste_1_id,
+ matcher->action_ste.rtc_0_id, (int)ste_0_id,
+ matcher->action_ste.rtc_1_id, (int)ste_1_id,
0,
mlx5hws_debug_icm_to_idx(icm_addr_0),
mlx5hws_debug_icm_to_idx(icm_addr_1));
@@ -368,9 +366,10 @@ static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_contex
static int hws_debug_dump_context_stc_resource(struct seq_file *f,
struct mlx5hws_context *ctx,
- u32 tbl_type,
struct mlx5hws_pool_resource *resource)
{
+ u32 tbl_type = MLX5HWS_TABLE_TYPE_BASE + MLX5HWS_TABLE_TYPE_FDB;
+
seq_printf(f, "%d,0x%llx,%u,%u\n",
MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
HWS_PTR_TO_ID(ctx),
@@ -382,31 +381,22 @@ static int hws_debug_dump_context_stc_resource(struct seq_file *f,
static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
{
- struct mlx5hws_pool *stc_pool;
- u32 table_type;
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool;
int ret;
- int i;
- for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
- stc_pool = ctx->stc_pool[i];
- table_type = MLX5HWS_TABLE_TYPE_BASE + i;
-
- if (!stc_pool)
- continue;
+ if (!stc_pool)
+ return 0;
- if (stc_pool->resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
- if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
- ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
- stc_pool->mirror_resource[0]);
- if (ret)
- return ret;
- }
+ if (stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 8fe96eb76baf..10ece7df1cfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -70,7 +70,7 @@
u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
_HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
_HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
- (bit_off) % BITS_IN_DW, second_dw_mask); \
+ (bit_off + BITS_IN_DW) % BITS_IN_DW, second_dw_mask); \
} else { \
_HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
} \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 9432d5084def..5c1a2086efba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -785,7 +785,7 @@ struct mlx5hws_definer_cache {
struct mlx5hws_definer_cache_item {
struct mlx5hws_definer definer;
- u32 refcount;
+ u32 refcount; /* protected by context ctrl lock */
struct list_head list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
new file mode 100644
index 000000000000..05329afeb9ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <linux/mlx5/vport.h>
+#include <mlx5_core.h>
+#include <fs_core.h>
+#include <fs_cmd.h>
+#include "fs_hws_pools.h"
+#include "mlx5hws.h"
+
+#define MLX5HWS_CTX_MAX_NUM_OF_QUEUES 16
+#define MLX5HWS_CTX_QUEUE_SIZE 256
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx);
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index);
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index);
+
+static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_context *fs_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ enum mlx5hws_action_type action_type;
+ int err = -ENOSPC;
+
+ hws_pool->tag_action = mlx5hws_action_create_tag(ctx, flags);
+ if (!hws_pool->tag_action)
+ return err;
+ hws_pool->pop_vlan_action = mlx5hws_action_create_pop_vlan(ctx, flags);
+ if (!hws_pool->pop_vlan_action)
+ goto destroy_tag;
+ hws_pool->push_vlan_action = mlx5hws_action_create_push_vlan(ctx, flags);
+ if (!hws_pool->push_vlan_action)
+ goto destroy_pop_vlan;
+ hws_pool->drop_action = mlx5hws_action_create_dest_drop(ctx, flags);
+ if (!hws_pool->drop_action)
+ goto destroy_push_vlan;
+ action_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
+ hws_pool->decapl2_action =
+ mlx5hws_action_create_reformat(ctx, action_type, 1,
+ &reformat_hdr, 0, flags);
+ if (!hws_pool->decapl2_action)
+ goto destroy_drop;
+ hws_pool->remove_hdr_vlan_action =
+ mlx5_fs_create_action_remove_header_vlan(ctx);
+ if (!hws_pool->remove_hdr_vlan_action)
+ goto destroy_decapl2;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->insert_hdr_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (err)
+ goto destroy_remove_hdr;
+ err = mlx5_fs_hws_pr_pool_init(&hws_pool->dl3tnltol2_pool, dev, 0,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2);
+ if (err)
+ goto cleanup_insert_hdr;
+ xa_init(&hws_pool->el2tol3tnl_pools);
+ xa_init(&hws_pool->el2tol2tnl_pools);
+ xa_init(&hws_pool->mh_pools);
+ xa_init(&hws_pool->table_dests);
+ xa_init(&hws_pool->vport_dests);
+ xa_init(&hws_pool->vport_vhca_dests);
+ return 0;
+
+cleanup_insert_hdr:
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+destroy_remove_hdr:
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+destroy_decapl2:
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+destroy_drop:
+ mlx5hws_action_destroy(hws_pool->drop_action);
+destroy_push_vlan:
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+destroy_pop_vlan:
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+destroy_tag:
+ mlx5hws_action_destroy(hws_pool->tag_action);
+ return err;
+}
+
+static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5hws_action *action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i;
+
+ xa_for_each(&hws_pool->vport_vhca_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_vhca_dests);
+ xa_for_each(&hws_pool->vport_dests, i, action)
+ mlx5hws_action_destroy(action);
+ xa_destroy(&hws_pool->vport_dests);
+ xa_destroy(&hws_pool->table_dests);
+ xa_for_each(&hws_pool->mh_pools, i, pool)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, i);
+ xa_destroy(&hws_pool->mh_pools);
+ xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol2tnl_pools);
+ xa_for_each(&hws_pool->el2tol3tnl_pools, i, pool)
+ mlx5_fs_destroy_pr_pool(pool, &hws_pool->el2tol3tnl_pools, i);
+ xa_destroy(&hws_pool->el2tol3tnl_pools);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->dl3tnltol2_pool);
+ mlx5_fs_hws_pr_pool_cleanup(&hws_pool->insert_hdr_pool);
+ mlx5hws_action_destroy(hws_pool->remove_hdr_vlan_action);
+ mlx5hws_action_destroy(hws_pool->decapl2_action);
+ mlx5hws_action_destroy(hws_pool->drop_action);
+ mlx5hws_action_destroy(hws_pool->push_vlan_action);
+ mlx5hws_action_destroy(hws_pool->pop_vlan_action);
+ mlx5hws_action_destroy(hws_pool->tag_action);
+}
+
+static int mlx5_cmd_hws_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ struct mlx5hws_context_attr hws_ctx_attr = {};
+ int err;
+
+ hws_ctx_attr.queues = min_t(int, num_online_cpus(),
+ MLX5HWS_CTX_MAX_NUM_OF_QUEUES);
+ hws_ctx_attr.queue_size = MLX5HWS_CTX_QUEUE_SIZE;
+
+ ns->fs_hws_context.hws_ctx =
+ mlx5hws_context_open(ns->dev, &hws_ctx_attr);
+ if (!ns->fs_hws_context.hws_ctx) {
+ mlx5_core_err(ns->dev, "Failed to create hws flow namespace\n");
+ return -EINVAL;
+ }
+ err = mlx5_fs_init_hws_actions_pool(ns->dev, &ns->fs_hws_context);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed to init hws actions pool\n");
+ mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ mlx5_fs_cleanup_hws_actions_pool(&ns->fs_hws_context);
+ return mlx5hws_context_close(ns->fs_hws_context.hws_ctx);
+}
+
+static int mlx5_cmd_hws_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns,
+ u16 peer_vhca_id)
+{
+ struct mlx5hws_context *peer_ctx = NULL;
+
+ if (peer_ns)
+ peer_ctx = peer_ns->fs_hws_context.hws_ctx;
+ mlx5hws_context_set_peer(ns->fs_hws_context.hws_ctx, peer_ctx,
+ peer_vhca_id);
+ return 0;
+}
+
+static int mlx5_fs_set_ft_default_miss(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_table *next_tbl;
+ int err;
+
+ if (!ns->fs_hws_context.hws_ctx)
+ return -EINVAL;
+
+ /* if no change required, return */
+ if (!next_ft && !ft->fs_hws_table.miss_ft_set)
+ return 0;
+
+ next_tbl = next_ft ? next_ft->fs_hws_table.hws_table : NULL;
+ err = mlx5hws_table_set_default_miss(ft->fs_hws_table.hws_table, next_tbl);
+ if (err) {
+ mlx5_core_err(ns->dev, "Failed setting FT default miss (%d)\n", err);
+ return err;
+ }
+ ft->fs_hws_table.miss_ft_set = !!next_tbl;
+ return 0;
+}
+
+static int mlx5_fs_add_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dest_ft_action = mlx5hws_action_create_dest_table_num(fs_ctx->hws_ctx,
+ ft->id, flags);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed creating dest table action\n");
+ return -ENOMEM;
+ }
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ err = xa_insert(dests_xa, ft->id, dest_ft_action, GFP_KERNEL);
+ if (err)
+ mlx5hws_action_destroy(dest_ft_action);
+ return err;
+}
+
+static int mlx5_fs_del_flow_table_dest_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action *dest_ft_action;
+ struct xarray *dests_xa;
+ int err;
+
+ dests_xa = &fs_ctx->hws_pool.table_dests;
+ dest_ft_action = xa_erase(dests_xa, ft->id);
+ if (!dest_ft_action) {
+ mlx5_core_err(ns->dev, "Failed to erase dest ft action\n");
+ return -ENOENT;
+ }
+
+ err = mlx5hws_action_destroy(dest_ft_action);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy dest ft action\n");
+ return err;
+}
+
+static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5hws_context *ctx = ns->fs_hws_context.hws_ctx;
+ struct mlx5hws_table_attr tbl_attr = {};
+ struct mlx5hws_table *tbl;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ err = mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, ft_attr,
+ next_ft);
+ if (err)
+ return err;
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+ return err;
+ }
+
+ if (ns->table_type != FS_FT_FDB) {
+ mlx5_core_err(ns->dev, "Table type %d not supported for HWS\n",
+ ns->table_type);
+ return -EOPNOTSUPP;
+ }
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+ return -EINVAL;
+ }
+
+ ft->fs_hws_table.hws_table = tbl;
+ ft->id = mlx5hws_table_get_id(tbl);
+
+ if (next_ft) {
+ err = mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+ if (err)
+ goto destroy_table;
+ }
+
+ ft->max_fte = INT_MAX;
+
+ err = mlx5_fs_add_flow_table_dest_action(ns, ft);
+ if (err)
+ goto clear_ft_miss;
+ return 0;
+
+clear_ft_miss:
+ mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+destroy_table:
+ mlx5hws_table_destroy(tbl);
+ ft->fs_hws_table.hws_table = NULL;
+ return err;
+}
+
+static int mlx5_cmd_hws_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ int err;
+
+ err = mlx5_fs_del_flow_table_dest_action(ns, ft);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to remove dest action (%d)\n", err);
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+ err = mlx5_fs_set_ft_default_miss(ns, ft, NULL);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to disconnect next table (%d)\n", err);
+
+ err = mlx5hws_table_destroy(ft->fs_hws_table.hws_table);
+ if (err)
+ mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n", err);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+ return mlx5_fs_set_ft_default_miss(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_hws_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+ disconnect);
+}
+
+static int mlx5_cmd_hws_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5hws_match_parameters mask;
+ struct mlx5hws_bwc_matcher *matcher;
+ u8 match_criteria_enable;
+ u32 priority;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in, fg);
+
+ mask.match_buf = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mask.match_sz = sizeof(fg->mask.match_criteria);
+
+ match_criteria_enable = MLX5_GET(create_flow_group_in, in,
+ match_criteria_enable);
+ priority = MLX5_GET(create_flow_group_in, in, start_flow_index);
+ matcher = mlx5hws_bwc_matcher_create(ft->fs_hws_table.hws_table,
+ priority, match_criteria_enable,
+ &mask);
+ if (!matcher) {
+ mlx5_core_err(ns->dev, "Failed creating matcher\n");
+ return -EINVAL;
+ }
+
+ fg->fs_hws_matcher.matcher = matcher;
+ return 0;
+}
+
+static int mlx5_cmd_hws_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+ return mlx5hws_bwc_matcher_destroy(fg->fs_hws_matcher.matcher);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_ft(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ return xa_load(&fs_ctx->hws_pool.table_dests, dst->dest_attr.ft->id);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return xa_load(&fs_ctx->hws_pool.table_dests, table_num);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_table_num(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ u32 table_num = dst->dest_attr.ft_num;
+
+ return mlx5hws_action_create_dest_table_num(ctx, table_num, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_vport(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst,
+ bool is_dest_type_uplink)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5hws_action *dest;
+ struct xarray *dests_xa;
+ bool vhca_id_valid;
+ unsigned long idx;
+ u16 vport_num;
+ int err;
+
+ vhca_id_valid = is_dest_type_uplink ||
+ (dest_attr->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID);
+ vport_num = is_dest_type_uplink ? MLX5_VPORT_UPLINK : dest_attr->vport.num;
+ if (vhca_id_valid) {
+ dests_xa = &fs_ctx->hws_pool.vport_vhca_dests;
+ idx = dest_attr->vport.vhca_id << 16 | vport_num;
+ } else {
+ dests_xa = &fs_ctx->hws_pool.vport_dests;
+ idx = vport_num;
+ }
+dest_load:
+ dest = xa_load(dests_xa, idx);
+ if (dest)
+ return dest;
+
+ dest = mlx5hws_action_create_dest_vport(ctx, vport_num, vhca_id_valid,
+ dest_attr->vport.vhca_id, flags);
+
+ err = xa_insert(dests_xa, idx, dest, GFP_KERNEL);
+ if (err) {
+ mlx5hws_action_destroy(dest);
+ dest = NULL;
+
+ if (err == -EBUSY)
+ /* xarray entry was already stored by another thread */
+ goto dest_load;
+ }
+
+ return dest;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
+ struct mlx5_flow_rule *dst)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5hws_action_create_dest_match_range(ctx,
+ dest_attr->range.field,
+ dest_attr->range.hit_ft,
+ dest_attr->range.miss_ft,
+ dest_attr->range.min,
+ dest_attr->range.max,
+ flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_dest_attr *dests,
+ u32 num_of_dests, bool ignore_flow_level,
+ u32 flow_source)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
+ ignore_flow_level,
+ flow_source, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_push_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.push_vlan_action;
+}
+
+static u32 mlx5_fs_calc_vlan_hdr(struct mlx5_fs_vlan *vlan)
+{
+ u16 n_ethtype = vlan->ethtype;
+ u8 prio = vlan->prio;
+ u16 vid = vlan->vid;
+
+ return (u32)n_ethtype << 16 | (u32)(prio) << 12 | (u32)vid;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_pop_vlan(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.pop_vlan_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_decap_tnl_l2_to_l2(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.decapl2_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_drop(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.drop_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_tag(struct mlx5_fs_hws_context *fs_ctx)
+{
+ return fs_ctx->hws_pool.tag_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ return mlx5hws_action_create_last(ctx, flags);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+{
+ switch (mlx5hws_action_get_type(fs_action->action)) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ mlx5_fc_put_hws_action(fs_action->counter);
+ break;
+ default:
+ mlx5hws_action_destroy(fs_action->action);
+ }
+}
+
+static void
+mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+ int *num_fs_actions)
+{
+ int i;
+
+ /* Free in reverse order to handle action dependencies */
+ for (i = *num_fs_actions - 1; i >= 0; i--)
+ mlx5_fs_destroy_fs_action(*fs_actions + i);
+ *num_fs_actions = 0;
+ kfree(*fs_actions);
+ *fs_actions = NULL;
+}
+
+/* Splits FTE's actions into cached, rule and destination actions.
+ * The cached and destination actions are saved on the fte hws rule.
+ * The rule actions are returned as a parameter, together with their count.
+ * We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ * 32 is SW limitation for array size, keep. HWS limitation is 16M STEs per matcher
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
+static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte,
+ struct mlx5hws_rule_action **ractions)
+{
+ struct mlx5_flow_act *fte_action = &fte->act_dests.action;
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5hws_action_dest_attr *dest_actions;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_rule_action *fs_actions;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5hws_action *dest_action;
+ struct mlx5hws_action *tmp_action;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+ bool delay_encap_set = false;
+ struct mlx5_flow_rule *dst;
+ int num_dest_actions = 0;
+ int num_fs_actions = 0;
+ int num_actions = 0;
+ int err;
+
+ *ractions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(**ractions),
+ GFP_KERNEL);
+ if (!*ractions) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ fs_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*fs_actions), GFP_KERNEL);
+ if (!fs_actions) {
+ err = -ENOMEM;
+ goto free_actions_alloc;
+ }
+
+ dest_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+ sizeof(*dest_actions), GFP_KERNEL);
+ if (!dest_actions) {
+ err = -ENOMEM;
+ goto free_fs_actions_alloc;
+ }
+
+ /* The order of the actions are must to be kept, only the following
+ * order is supported by HW steering:
+ * HWS: decap -> remove_hdr -> pop_vlan -> modify header -> push_vlan
+ * -> reformat (insert_hdr/encap) -> ctr -> tag -> aso
+ * -> drop -> FWD:tbl/vport/sampler/tbl_num/range -> dest_array -> last
+ */
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ tmp_action = mlx5_fs_get_action_decap_tnl_l2_to_l2(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_dest_actions_alloc;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ int reformat_type = fte_action->pkt_reformat->reformat_type;
+
+ if (fte_action->pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ mlx5_core_err(dev, "FW-owned reformat can't be used in HWS rule\n");
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ if (reformat_type == MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.hdr_idx = pr_data->hdr_idx;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else if (reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR) {
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ } else {
+ delay_encap_set = true;
+ }
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ tmp_action = mlx5_fs_get_action_pop_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mh_data = fte_action->modify_hdr->fs_hws_action.mh_data;
+ (*ractions)[num_actions].modify_header.offset = mh_data->offset;
+ (*ractions)[num_actions].modify_header.data = mh_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->modify_hdr->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[0]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = mlx5_fs_get_action_push_vlan(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].push_vlan.vlan_hdr =
+ htonl(mlx5_fs_calc_vlan_hdr(&fte_action->vlan[1]));
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (delay_encap_set) {
+ pr_data = fte_action->pkt_reformat->fs_hws_action.pr_data;
+ (*ractions)[num_actions].reformat.offset = pr_data->offset;
+ (*ractions)[num_actions].reformat.data = pr_data->data;
+ (*ractions)[num_actions++].action =
+ fte_action->pkt_reformat->fs_hws_action.hws_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_fc *counter;
+
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ counter = dst->dest_attr.counter;
+ tmp_action = mlx5_fc_get_hws_action(ctx, counter);
+ if (!tmp_action) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ (*ractions)[num_actions].counter.offset =
+ mlx5_fc_id(counter) - mlx5_fc_get_base_id(counter);
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].counter = counter;
+ }
+ }
+
+ if (fte->act_dests.flow_context.flow_tag) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ tmp_action = mlx5_fs_get_action_tag(fs_ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].tag.value = fte->act_dests.flow_context.flow_tag;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ dest_action = mlx5_fs_get_dest_action_drop(fs_ctx);
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+
+ if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ struct mlx5_flow_destination *attr = &dst->dest_attr;
+ bool type_uplink =
+ attr->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+
+ if (num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_dest_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (attr->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (attr->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
+ dst);
+ if (dest_action)
+ break;
+ dest_action = mlx5_fs_create_dest_action_table_num(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ dest_action = mlx5_fs_create_dest_action_range(ctx, dst);
+ fs_actions[num_fs_actions++].action = dest_action;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
+ type_uplink);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ if (!dest_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ dest_actions[num_dest_actions++].dest = dest_action;
+ }
+ }
+
+ if (num_dest_actions == 1) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ (*ractions)[num_actions++].action = dest_actions->dest;
+ } else if (num_dest_actions > 1) {
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
+ bool ignore_flow_level;
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ ignore_flow_level =
+ !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ tmp_action = mlx5_fs_create_action_dest_array(ctx, dest_actions,
+ num_dest_actions,
+ ignore_flow_level,
+ flow_source);
+ if (!tmp_action) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+ }
+
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_create_action_last(ctx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_actions[num_fs_actions++].action = tmp_action;
+ (*ractions)[num_actions++].action = tmp_action;
+
+ kfree(dest_actions);
+
+ /* Actions created specifically for this rule will be destroyed
+ * once rule is deleted.
+ */
+ fte->fs_hws_rule.num_fs_actions = num_fs_actions;
+ fte->fs_hws_rule.hws_fs_actions = fs_actions;
+
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
+free_dest_actions_alloc:
+ kfree(dest_actions);
+free_fs_actions_alloc:
+ kfree(fs_actions);
+free_actions_alloc:
+ kfree(*ractions);
+ *ractions = NULL;
+out_err:
+ return err;
+}
+
+static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5hws_match_parameters params;
+ struct mlx5hws_rule_action *ractions;
+ struct mlx5hws_bwc_rule *rule;
+ int err = 0;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft)) {
+ /* Packet reformat on terminamtion table not supported yet */
+ if (fte->act_dests.action.action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EOPNOTSUPP;
+ return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+ }
+
+ err = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (err)
+ goto out_err;
+
+ params.match_sz = sizeof(fte->val);
+ params.match_buf = fte->val;
+
+ rule = mlx5hws_bwc_rule_create(group->fs_hws_matcher.matcher, &params,
+ fte->act_dests.flow_context.flow_source,
+ ractions);
+ kfree(ractions);
+ if (!rule) {
+ err = -EINVAL;
+ goto free_actions;
+ }
+
+ fte->fs_hws_rule.bwc_rule = rule;
+ return 0;
+
+free_actions:
+ mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+out_err:
+ mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
+ return err;
+}
+
+static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ struct mlx5_fs_hws_rule *rule = &fte->fs_hws_rule;
+ int err;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+ err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
+ rule->bwc_rule = NULL;
+
+ mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+
+ return err;
+}
+
+static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int allowed_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_fs_hws_rule_action *saved_hws_fs_actions;
+ struct mlx5hws_rule_action *ractions;
+ int saved_num_fs_actions;
+ int ret;
+
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
+ return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group,
+ modify_mask, fte);
+
+ if ((modify_mask & ~allowed_mask) != 0)
+ return -EINVAL;
+
+ saved_hws_fs_actions = fte->fs_hws_rule.hws_fs_actions;
+ saved_num_fs_actions = fte->fs_hws_rule.num_fs_actions;
+
+ ret = mlx5_fs_fte_get_hws_actions(ns, ft, group, fte, &ractions);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_bwc_rule_action_update(fte->fs_hws_rule.bwc_rule, ractions);
+ kfree(ractions);
+ if (ret)
+ goto restore_actions;
+
+ mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+ return ret;
+
+restore_actions:
+ mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ &fte->fs_hws_rule.num_fs_actions);
+ fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
+ fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
+ return ret;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_create_action_remove_header_vlan(struct mlx5hws_context *ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5hws_action_remove_header_attr remove_hdr_vlan = {};
+
+ /* MAC anchor not supported in HWS reformat, use VLAN anchor */
+ remove_hdr_vlan.anchor = MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START;
+ remove_hdr_vlan.offset = 0;
+ remove_hdr_vlan.size = sizeof(struct vlan_hdr);
+ return mlx5hws_action_create_remove_header(ctx, &remove_hdr_vlan, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_remove_header_vlan(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (!params ||
+ params->param_0 != MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START ||
+ params->param_1 != offsetof(struct vlan_ethhdr, h_vlan_proto) ||
+ params->size != sizeof(struct vlan_hdr))
+ return NULL;
+
+ return fs_ctx->hws_pool.remove_hdr_vlan_action;
+}
+
+static int
+mlx5_fs_verify_insert_header_params(struct mlx5_core_dev *mdev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if ((!params->data && params->size) || (params->data && !params->size) ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_size) < params->size ||
+ MLX5_CAP_GEN_2(mdev, max_reformat_insert_offset) < params->param_1) {
+ mlx5_core_err(mdev, "Invalid reformat params for INSERT_HDR\n");
+ return -EINVAL;
+ }
+ if (params->param_0 != MLX5_FS_INSERT_HDR_VLAN_ANCHOR ||
+ params->param_1 != MLX5_FS_INSERT_HDR_VLAN_OFFSET ||
+ params->size != MLX5_FS_INSERT_HDR_VLAN_SIZE) {
+ mlx5_core_err(mdev, "Only vlan insert header supported\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+mlx5_fs_verify_encap_decap_params(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params)
+{
+ if (params->param_0 || params->param_1) {
+ mlx5_core_err(dev, "Invalid reformat params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_get_pr_encap_pool(struct mlx5_core_dev *dev, struct xarray *pr_pools,
+ enum mlx5hws_action_type reformat_type, size_t size)
+{
+ struct mlx5_fs_pool *pr_pool;
+ unsigned long index = size;
+ int err;
+
+ pr_pool = xa_load(pr_pools, index);
+ if (pr_pool)
+ return pr_pool;
+
+ pr_pool = kzalloc(sizeof(*pr_pool), GFP_KERNEL);
+ if (!pr_pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_pr_pool_init(pr_pool, dev, size, reformat_type);
+ if (err)
+ goto free_pr_pool;
+ err = xa_insert(pr_pools, index, pr_pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pr_pool;
+ return pr_pool;
+
+cleanup_pr_pool:
+ mlx5_fs_hws_pr_pool_cleanup(pr_pool);
+free_pr_pool:
+ kfree(pr_pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
+ unsigned long index)
+{
+ xa_erase(pr_pools, index);
+ mlx5_fs_hws_pr_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int
+mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+ struct mlx5_fs_hws_actions_pool *hws_pool;
+ struct mlx5hws_action *hws_action = NULL;
+ struct mlx5_fs_hws_pr *pr_data = NULL;
+ struct mlx5_fs_pool *pr_pool = NULL;
+ struct mlx5_core_dev *dev = ns->dev;
+ u8 hdr_idx = 0;
+ int err;
+
+ if (!params)
+ return -EINVAL;
+
+ hws_pool = &fs_ctx->hws_pool;
+
+ switch (params->type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol3tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ params->size);
+ if (IS_ERR(pr_pool))
+ return PTR_ERR(pr_pool);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (mlx5_fs_verify_encap_decap_params(dev, params))
+ return -EINVAL;
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ hdr_idx = params->size == ETH_HLEN ?
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX :
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ err = mlx5_fs_verify_insert_header_params(dev, params);
+ if (err)
+ return err;
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+ hws_action = mlx5_fs_get_action_remove_header_vlan(fs_ctx, params);
+ if (!hws_action)
+ mlx5_core_err(dev, "Only vlan remove header supported\n");
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+ params->type);
+ return -EOPNOTSUPP;
+ }
+
+ if (pr_pool) {
+ pr_data = mlx5_fs_hws_pr_pool_acquire_pr(pr_pool);
+ if (IS_ERR_OR_NULL(pr_data))
+ return !pr_data ? -EINVAL : PTR_ERR(pr_data);
+ hws_action = pr_data->bulk->hws_action;
+ if (!hws_action) {
+ mlx5_core_err(dev,
+ "Failed allocating packet-reformat action\n");
+ err = -EINVAL;
+ goto release_pr;
+ }
+ pr_data->data = kmemdup(params->data, params->size, GFP_KERNEL);
+ if (!pr_data->data) {
+ err = -ENOMEM;
+ goto release_pr;
+ }
+ pr_data->hdr_idx = hdr_idx;
+ pr_data->data_size = params->size;
+ pkt_reformat->fs_hws_action.pr_data = pr_data;
+ }
+
+ pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ pkt_reformat->fs_hws_action.hws_action = hws_action;
+ return 0;
+
+release_pr:
+ if (pr_pool && pr_data)
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ return err;
+}
+
+static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5_core_dev *dev = ns->dev;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_pool *pr_pool;
+
+ if (pkt_reformat->reformat_type == MLX5_REFORMAT_TYPE_REMOVE_HDR)
+ return;
+
+ if (!pkt_reformat->fs_hws_action.pr_data) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ pr_data = pkt_reformat->fs_hws_action.pr_data;
+
+ switch (pkt_reformat->reformat_type) {
+ case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+ case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+ case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ pr_pool = mlx5_fs_get_pr_encap_pool(dev, &hws_pool->el2tol2tnl_pools,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ pr_data->data_size);
+ break;
+ case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ pr_pool = &hws_pool->dl3tnltol2_pool;
+ break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ pr_pool = &hws_pool->insert_hdr_pool;
+ break;
+ default:
+ mlx5_core_err(ns->dev, "Unknown packet-reformat type\n");
+ return;
+ }
+ if (!pkt_reformat->fs_hws_action.pr_data || IS_ERR(pr_pool)) {
+ mlx5_core_err(ns->dev, "Failed release packet-reformat\n");
+ return;
+ }
+ kfree(pr_data->data);
+ mlx5_fs_hws_pr_pool_release_pr(pr_pool, pr_data);
+ pkt_reformat->fs_hws_action.pr_data = NULL;
+}
+
+static struct mlx5_fs_pool *
+mlx5_fs_create_mh_pool(struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern,
+ struct xarray *mh_pools, unsigned long index)
+{
+ struct mlx5_fs_pool *pool;
+ int err;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ err = mlx5_fs_hws_mh_pool_init(pool, dev, pattern);
+ if (err)
+ goto free_pool;
+ err = xa_insert(mh_pools, index, pool, GFP_KERNEL);
+ if (err)
+ goto cleanup_pool;
+ return pool;
+
+cleanup_pool:
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+free_pool:
+ kfree(pool);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_fs_destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
+ unsigned long index)
+{
+ xa_erase(mh_pools, index);
+ mlx5_fs_hws_mh_pool_cleanup(pool);
+ kfree(pool);
+}
+
+static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
+ struct mlx5hws_action_mh_pattern pattern = {};
+ struct mlx5_fs_hws_mh *mh_data = NULL;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *pool;
+ unsigned long i, cnt = 0;
+ bool known_pattern;
+ int err;
+
+ pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ pattern.data = modify_actions;
+
+ known_pattern = false;
+ xa_for_each(&hws_pool->mh_pools, i, pool) {
+ if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
+ known_pattern = true;
+ break;
+ }
+ cnt++;
+ }
+
+ if (!known_pattern) {
+ pool = mlx5_fs_create_mh_pool(ns->dev, &pattern,
+ &hws_pool->mh_pools, cnt);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
+ if (IS_ERR(mh_data)) {
+ err = PTR_ERR(mh_data);
+ goto destroy_pool;
+ }
+ hws_action = mh_data->bulk->hws_action;
+ mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
+ if (!mh_data->data) {
+ err = -ENOMEM;
+ goto release_mh;
+ }
+ modify_hdr->fs_hws_action.mh_data = mh_data;
+ modify_hdr->fs_hws_action.fs_pool = pool;
+ modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+ modify_hdr->fs_hws_action.hws_action = hws_action;
+
+ return 0;
+
+release_mh:
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+destroy_pool:
+ if (!known_pattern)
+ mlx5_fs_destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
+ return err;
+}
+
+static void mlx5_cmd_hws_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_fs_hws_mh *mh_data;
+ struct mlx5_fs_pool *pool;
+
+ if (!modify_hdr->fs_hws_action.fs_pool || !modify_hdr->fs_hws_action.mh_data) {
+ mlx5_core_err(ns->dev, "Failed release modify-header\n");
+ return;
+ }
+
+ mh_data = modify_hdr->fs_hws_action.mh_data;
+ kfree(mh_data->data);
+ pool = modify_hdr->fs_hws_action.fs_pool;
+ mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
+ modify_hdr->fs_hws_action.mh_data = NULL;
+}
+
+static int mlx5_cmd_hws_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_hws_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 mlx5_cmd_hws_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ if (ft_type != FS_FT_FDB)
+ return 0;
+
+ return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX |
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX |
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+}
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return mlx5hws_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_hws = {
+ .create_flow_table = mlx5_cmd_hws_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_hws_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_hws_modify_flow_table,
+ .update_root_ft = mlx5_cmd_hws_update_root_ft,
+ .create_flow_group = mlx5_cmd_hws_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_hws_destroy_flow_group,
+ .create_fte = mlx5_cmd_hws_create_fte,
+ .delete_fte = mlx5_cmd_hws_delete_fte,
+ .update_fte = mlx5_cmd_hws_update_fte,
+ .packet_reformat_alloc = mlx5_cmd_hws_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_hws_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_hws_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_hws_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_hws_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_hws_destroy_match_definer,
+ .create_ns = mlx5_cmd_hws_create_ns,
+ .destroy_ns = mlx5_cmd_hws_destroy_ns,
+ .set_peer = mlx5_cmd_hws_set_peer,
+ .get_capabilities = mlx5_cmd_hws_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return &mlx5_flow_cmds_hws;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
new file mode 100644
index 000000000000..cbddb72d4362
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef _MLX5_FS_HWS_
+#define _MLX5_FS_HWS_
+
+#include "mlx5hws.h"
+#include "fs_hws_pools.h"
+
+struct mlx5_fs_hws_actions_pool {
+ struct mlx5hws_action *tag_action;
+ struct mlx5hws_action *pop_vlan_action;
+ struct mlx5hws_action *push_vlan_action;
+ struct mlx5hws_action *drop_action;
+ struct mlx5hws_action *decapl2_action;
+ struct mlx5hws_action *remove_hdr_vlan_action;
+ struct mlx5_fs_pool insert_hdr_pool;
+ struct mlx5_fs_pool dl3tnltol2_pool;
+ struct xarray el2tol3tnl_pools;
+ struct xarray el2tol2tnl_pools;
+ struct xarray mh_pools;
+ struct xarray table_dests;
+ struct xarray vport_vhca_dests;
+ struct xarray vport_dests;
+};
+
+struct mlx5_fs_hws_context {
+ struct mlx5hws_context *hws_ctx;
+ struct mlx5_fs_hws_actions_pool hws_pool;
+};
+
+struct mlx5_fs_hws_table {
+ struct mlx5hws_table *hws_table;
+ bool miss_ft_set;
+};
+
+struct mlx5_fs_hws_action {
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_pool *fs_pool;
+ struct mlx5_fs_hws_pr *pr_data;
+ struct mlx5_fs_hws_mh *mh_data;
+};
+
+struct mlx5_fs_hws_matcher {
+ struct mlx5hws_bwc_matcher *matcher;
+};
+
+struct mlx5_fs_hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct mlx5_fc *counter;
+ };
+};
+
+struct mlx5_fs_hws_rule {
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct mlx5_fs_hws_rule_action *hws_fs_actions;
+ int num_fs_actions;
+};
+
+#ifdef CONFIG_MLX5_HW_STEERING
+
+bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void);
+
+#else
+
+static inline bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_hws_cmds(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_MLX5_HWS_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
new file mode 100644
index 000000000000..2ae4ac62b0e2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#include <mlx5_core.h>
+#include "fs_hws_pools.h"
+
+#define MLX5_FS_HWS_DEFAULT_BULK_LEN 65536
+#define MLX5_FS_HWS_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FS_HWS_POOL_USED_BUFF_RATIO 10
+
+static struct mlx5hws_action *
+mlx5_fs_dl3tnltol2_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr[2] = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX].sz = ETH_HLEN;
+ reformat_hdr[MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX].sz = ETH_HLEN + VLAN_HLEN;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 2,
+ reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol3tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_el2tol2tnl_bulk_action_create(struct mlx5hws_context *ctx, size_t data_size)
+{
+ struct mlx5hws_action_reformat_header reformat_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ enum mlx5hws_action_type reformat_type;
+ u32 log_bulk_size;
+
+ reformat_type = MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ reformat_hdr.sz = data_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_reformat(ctx, reformat_type, 1,
+ &reformat_hdr, log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_insert_hdr_bulk_action_create(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_action_insert_header insert_hdr = {};
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ insert_hdr.hdr.sz = MLX5_FS_INSERT_HDR_VLAN_SIZE;
+ insert_hdr.anchor = MLX5_FS_INSERT_HDR_VLAN_ANCHOR;
+ insert_hdr.offset = MLX5_FS_INSERT_HDR_VLAN_OFFSET;
+
+ return mlx5hws_action_create_insert_header(ctx, 1, &insert_hdr,
+ log_bulk_size, flags);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_pr_bulk_action_create(struct mlx5_core_dev *dev,
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5hws_context *ctx;
+ size_t encap_data_size;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ encap_data_size = pr_pool_ctx->encap_data_size;
+ switch (pr_pool_ctx->reformat_type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ return mlx5_fs_dl3tnltol2_bulk_action_create(ctx);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ return mlx5_fs_el2tol3tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ return mlx5_fs_el2tol2tnl_bulk_action_create(ctx, encap_data_size);
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ return mlx5_fs_insert_hdr_bulk_action_create(ctx);
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_pr_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int bulk_len;
+ int i;
+
+ if (!pool_ctx)
+ return NULL;
+ pr_pool_ctx = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ pr_bulk = kvzalloc(struct_size(pr_bulk, prs_data, bulk_len), GFP_KERNEL);
+ if (!pr_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &pr_bulk->fs_bulk, bulk_len))
+ goto free_pr_bulk;
+
+ for (i = 0; i < bulk_len; i++) {
+ pr_bulk->prs_data[i].bulk = pr_bulk;
+ pr_bulk->prs_data[i].offset = i;
+ }
+
+ pr_bulk->hws_action = mlx5_fs_pr_bulk_action_create(dev, pr_pool_ctx);
+ if (!pr_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &pr_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&pr_bulk->fs_bulk);
+free_pr_bulk:
+ kvfree(pr_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_pr_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+
+ pr_bulk = container_of(fs_bulk, struct mlx5_fs_hws_pr_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all reformats were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(pr_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(pr_bulk);
+
+ return 0;
+}
+
+static void mlx5_hws_pool_update_threshold(struct mlx5_fs_pool *hws_pool)
+{
+ hws_pool->threshold = min_t(int, MLX5_FS_HWS_POOL_MAX_THRESHOLD,
+ hws_pool->used_units / MLX5_FS_HWS_POOL_USED_BUFF_RATIO);
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_pr_pool_ops = {
+ .bulk_create = mlx5_fs_hws_pr_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_pr_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ if (reformat_type != MLX5HWS_ACTION_TYP_INSERT_HEADER &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 &&
+ reformat_type != MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2)
+ return -EOPNOTSUPP;
+
+ pr_pool_ctx = kzalloc(sizeof(*pr_pool_ctx), GFP_KERNEL);
+ if (!pr_pool_ctx)
+ return -ENOMEM;
+ pr_pool_ctx->reformat_type = reformat_type;
+ pr_pool_ctx->encap_data_size = encap_data_size;
+ mlx5_fs_pool_init(pr_pool, dev, &mlx5_fs_hws_pr_pool_ops, pr_pool_ctx);
+ return 0;
+}
+
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_hws_pr_pool_ctx *pr_pool_ctx;
+
+ mlx5_fs_pool_cleanup(pr_pool);
+ pr_pool_ctx = pr_pool->pool_ctx;
+ if (!pr_pool_ctx)
+ return;
+ kfree(pr_pool_ctx);
+}
+
+struct mlx5_fs_hws_pr *
+mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_pr_bulk *pr_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(pr_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ pr_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_pr_bulk,
+ fs_bulk);
+ return &pr_bulk->prs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = pr_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = pr_data->offset;
+ if (mlx5_fs_pool_release_index(pr_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release packet reformat which is not acquired\n");
+}
+
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data)
+{
+ return pr_data->bulk->hws_action;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_mh_bulk_action_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB;
+ u32 log_bulk_size;
+
+ log_bulk_size = ilog2(MLX5_FS_HWS_DEFAULT_BULK_LEN);
+ return mlx5hws_action_create_modify_header(ctx, 1, pattern,
+ log_bulk_size, flags);
+}
+
+static struct mlx5_fs_bulk *
+mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
+{
+ struct mlx5hws_action_mh_pattern *pattern;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ struct mlx5hws_context *ctx;
+ int bulk_len;
+
+ if (!pool_ctx)
+ return NULL;
+
+ root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
+ return NULL;
+
+ ctx = root_ns->fs_hws_context.hws_ctx;
+ if (!ctx)
+ return NULL;
+
+ pattern = pool_ctx;
+ bulk_len = MLX5_FS_HWS_DEFAULT_BULK_LEN;
+ mh_bulk = kvzalloc(struct_size(mh_bulk, mhs_data, bulk_len), GFP_KERNEL);
+ if (!mh_bulk)
+ return NULL;
+
+ if (mlx5_fs_bulk_init(dev, &mh_bulk->fs_bulk, bulk_len))
+ goto free_mh_bulk;
+
+ for (int i = 0; i < bulk_len; i++) {
+ mh_bulk->mhs_data[i].bulk = mh_bulk;
+ mh_bulk->mhs_data[i].offset = i;
+ }
+
+ mh_bulk->hws_action = mlx5_fs_mh_bulk_action_create(ctx, pattern);
+ if (!mh_bulk->hws_action)
+ goto cleanup_fs_bulk;
+
+ return &mh_bulk->fs_bulk;
+
+cleanup_fs_bulk:
+ mlx5_fs_bulk_cleanup(&mh_bulk->fs_bulk);
+free_mh_bulk:
+ kvfree(mh_bulk);
+ return NULL;
+}
+
+static int
+mlx5_fs_hws_mh_bulk_destroy(struct mlx5_core_dev *dev,
+ struct mlx5_fs_bulk *fs_bulk)
+{
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+
+ mh_bulk = container_of(fs_bulk, struct mlx5_fs_hws_mh_bulk, fs_bulk);
+ if (mlx5_fs_bulk_get_free_amount(fs_bulk) < fs_bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all modify header were released\n");
+ return -EBUSY;
+ }
+
+ mlx5hws_action_destroy(mh_bulk->hws_action);
+ mlx5_fs_bulk_cleanup(fs_bulk);
+ kvfree(mh_bulk);
+
+ return 0;
+}
+
+static const struct mlx5_fs_pool_ops mlx5_fs_hws_mh_pool_ops = {
+ .bulk_create = mlx5_fs_hws_mh_bulk_create,
+ .bulk_destroy = mlx5_fs_hws_mh_bulk_destroy,
+ .update_threshold = mlx5_hws_pool_update_threshold,
+};
+
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ pool_pattern = kzalloc(sizeof(*pool_pattern), GFP_KERNEL);
+ if (!pool_pattern)
+ return -ENOMEM;
+ pool_pattern->data = kmemdup(pattern->data, pattern->sz, GFP_KERNEL);
+ if (!pool_pattern->data) {
+ kfree(pool_pattern);
+ return -ENOMEM;
+ }
+ pool_pattern->sz = pattern->sz;
+ mlx5_fs_pool_init(fs_hws_mh_pool, dev, &mlx5_fs_hws_mh_pool_ops,
+ pool_pattern);
+ return 0;
+}
+
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+
+ mlx5_fs_pool_cleanup(fs_hws_mh_pool);
+ pool_pattern = fs_hws_mh_pool->pool_ctx;
+ if (!pool_pattern)
+ return;
+ kfree(pool_pattern->data);
+ kfree(pool_pattern);
+}
+
+struct mlx5_fs_hws_mh *
+mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool)
+{
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_fs_hws_mh_bulk *mh_bulk;
+ int err;
+
+ err = mlx5_fs_pool_acquire_index(mh_pool, &pool_index);
+ if (err)
+ return ERR_PTR(err);
+ mh_bulk = container_of(pool_index.fs_bulk, struct mlx5_fs_hws_mh_bulk,
+ fs_bulk);
+ return &mh_bulk->mhs_data[pool_index.index];
+}
+
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data)
+{
+ struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk;
+ struct mlx5_fs_pool_index pool_index = {};
+ struct mlx5_core_dev *dev = mh_pool->dev;
+
+ pool_index.fs_bulk = fs_bulk;
+ pool_index.index = mh_data->offset;
+ if (mlx5_fs_pool_release_index(mh_pool, &pool_index))
+ mlx5_core_warn(dev, "Attempted to release modify header which is not acquired\n");
+}
+
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern)
+{
+ struct mlx5hws_action_mh_pattern *pool_pattern;
+ int num_actions, i;
+
+ pool_pattern = mh_pool->pool_ctx;
+ if (WARN_ON_ONCE(!pool_pattern))
+ return false;
+
+ if (pattern->sz != pool_pattern->sz)
+ return false;
+ num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
+ for (i = 0; i < num_actions; i++) {
+ if ((__force __be32)pattern->data[i] !=
+ (__force __be32)pool_pattern->data[i])
+ return false;
+ }
+ return true;
+}
+
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
+
+ fc_bulk_hws = &fc_bulk->hws_data;
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
+ return fc_bulk_hws->hws_action;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return fc_bulk_hws->hws_action;
+ }
+ fc_bulk_hws->hws_action =
+ mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
+ if (!fc_bulk_hws->hws_action) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return NULL;
+ }
+ refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
+ mutex_unlock(&fc_bulk_hws->lock);
+
+ return fc_bulk_hws->hws_action;
+}
+
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
+{
+ struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
+ return;
+
+ mutex_lock(&fc_bulk_hws->lock);
+ if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
+ mutex_unlock(&fc_bulk_hws->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fc_bulk_hws->hws_action);
+ fc_bulk_hws->hws_action = NULL;
+ mutex_unlock(&fc_bulk_hws->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
new file mode 100644
index 000000000000..34072551dd21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2025 NVIDIA Corporation & Affiliates */
+
+#ifndef __MLX5_FS_HWS_POOLS_H__
+#define __MLX5_FS_HWS_POOLS_H__
+
+#include <linux/if_vlan.h>
+#include "fs_pool.h"
+#include "fs_core.h"
+
+#define MLX5_FS_INSERT_HDR_VLAN_ANCHOR MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
+#define MLX5_FS_INSERT_HDR_VLAN_OFFSET offsetof(struct vlan_ethhdr, h_vlan_proto)
+#define MLX5_FS_INSERT_HDR_VLAN_SIZE sizeof(struct vlan_hdr)
+
+enum {
+ MLX5_FS_DL3TNLTOL2_MAC_HDR_IDX = 0,
+ MLX5_FS_DL3TNLTOL2_MAC_VLAN_HDR_IDX,
+};
+
+struct mlx5_fs_hws_pr {
+ struct mlx5_fs_hws_pr_bulk *bulk;
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ size_t data_size;
+};
+
+struct mlx5_fs_hws_pr_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_pr prs_data[];
+};
+
+struct mlx5_fs_hws_pr_pool_ctx {
+ enum mlx5hws_action_type reformat_type;
+ size_t encap_data_size;
+};
+
+struct mlx5_fs_hws_mh {
+ struct mlx5_fs_hws_mh_bulk *bulk;
+ u32 offset;
+ u8 *data;
+};
+
+struct mlx5_fs_hws_mh_bulk {
+ struct mlx5_fs_bulk fs_bulk;
+ struct mlx5_fs_pool *mh_pool;
+ struct mlx5hws_action *hws_action;
+ struct mlx5_fs_hws_mh mhs_data[];
+};
+
+int mlx5_fs_hws_pr_pool_init(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_core_dev *dev, size_t encap_data_size,
+ enum mlx5hws_action_type reformat_type);
+void mlx5_fs_hws_pr_pool_cleanup(struct mlx5_fs_pool *pr_pool);
+
+struct mlx5_fs_hws_pr *mlx5_fs_hws_pr_pool_acquire_pr(struct mlx5_fs_pool *pr_pool);
+void mlx5_fs_hws_pr_pool_release_pr(struct mlx5_fs_pool *pr_pool,
+ struct mlx5_fs_hws_pr *pr_data);
+struct mlx5hws_action *mlx5_fs_hws_pr_get_action(struct mlx5_fs_hws_pr *pr_data);
+int mlx5_fs_hws_mh_pool_init(struct mlx5_fs_pool *fs_hws_mh_pool,
+ struct mlx5_core_dev *dev,
+ struct mlx5hws_action_mh_pattern *pattern);
+void mlx5_fs_hws_mh_pool_cleanup(struct mlx5_fs_pool *fs_hws_mh_pool);
+struct mlx5_fs_hws_mh *mlx5_fs_hws_mh_pool_acquire_mh(struct mlx5_fs_pool *mh_pool);
+void mlx5_fs_hws_mh_pool_release_mh(struct mlx5_fs_pool *mh_pool,
+ struct mlx5_fs_hws_mh *mh_data);
+bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
+ struct mlx5hws_action_mh_pattern *pattern);
+struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
+ struct mlx5_fc *counter);
+void mlx5_fc_put_hws_action(struct mlx5_fc *counter);
+#endif /* __MLX5_FS_HWS_POOLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
index 3c8635f286ce..30ccd635b505 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h
@@ -39,7 +39,6 @@
#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
#define MLX5HWS_TABLE_TYPE_BASE 2
-#define MLX5HWS_ACTION_STE_IDX_ANY 0
static inline bool is_mem_zero(const u8 *mem, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index 1bb3a6f8c3cd..b61864b32053 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -165,14 +165,14 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
next->match_ste.rtc_0_id,
next->match_ste.rtc_1_id);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect matcher\n");
+ return ret;
}
} else {
ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to disconnect last matcher\n");
+ return ret;
}
}
@@ -180,27 +180,19 @@ static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
if (prev_ft_id == tbl->ft_id) {
ret = mlx5hws_table_update_connected_miss_tables(tbl);
if (ret) {
- mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
- goto matcher_reconnect;
+ mlx5hws_err(tbl->ctx,
+ "Fatal error, failed to update connected miss table\n");
+ return ret;
}
}
ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
- goto matcher_reconnect;
+ return ret;
}
return 0;
-
-matcher_reconnect:
- if (list_empty(&tbl->matchers_list) || !prev)
- list_add(&matcher->list_node, &tbl->matchers_list);
- else
- /* insert after prev matcher */
- list_add(&matcher->list_node, &prev->list_node);
-
- return ret;
}
static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
@@ -208,7 +200,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
enum mlx5hws_matcher_rtc_type rtc_type,
bool is_mirror)
{
- struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste.ste;
enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
@@ -225,8 +217,7 @@ static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
}
static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+ enum mlx5hws_matcher_rtc_type rtc_type)
{
struct mlx5hws_matcher_attr *attr = &matcher->attr;
struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
@@ -286,14 +277,20 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
break;
case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
rtc_0_id = &action_ste->rtc_0_id;
rtc_1_id = &action_ste->rtc_1_id;
ste_pool = action_ste->pool;
ste = &action_ste->ste;
+ /* Action RTC size calculation:
+ * log((max number of rules in matcher) *
+ * (max number of action STEs per rule) *
+ * (2 to support writing new STEs for update rule))
+ */
ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- attr->table.sz_row_log;
+ attr->table.sz_row_log +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
rtc_attr.log_size = ste->order;
rtc_attr.log_depth = 0;
rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
@@ -318,8 +315,8 @@ static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
/* STC is a single resource (obj_id), use any STC for the ID */
- stc_pool = ctx->stc_pool[tbl->type];
- default_stc = ctx->common_res[tbl->type].default_stc;
+ stc_pool = ctx->stc_pool;
+ default_stc = ctx->common_res.default_stc;
obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
rtc_attr.stc_base = obj_id;
@@ -358,8 +355,7 @@ free_ste:
}
static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
- enum mlx5hws_matcher_rtc_type rtc_type,
- u8 action_ste_selector)
+ enum mlx5hws_matcher_rtc_type rtc_type)
{
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
@@ -375,7 +371,7 @@ static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
ste = &matcher->match_ste.ste;
break;
case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
rtc_0_id = action_ste->rtc_0_id;
rtc_1_id = action_ste->rtc_1_id;
ste_pool = action_ste->pool;
@@ -466,20 +462,13 @@ static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
if (!resize_data)
return -ENOMEM;
- resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
-
- resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
- resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
- resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
- resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
- src_matcher->action_ste[0].pool :
- NULL;
- resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
- resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
- resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
- resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
- src_matcher->action_ste[1].pool :
- NULL;
+ resize_data->max_stes = src_matcher->action_ste.max_stes;
+
+ resize_data->stc = src_matcher->action_ste.stc;
+ resize_data->rtc_0_id = src_matcher->action_ste.rtc_0_id;
+ resize_data->rtc_1_id = src_matcher->action_ste.rtc_1_id;
+ resize_data->pool = src_matcher->action_ste.max_stes ?
+ src_matcher->action_ste.pool : NULL;
/* Place the new resized matcher on the dst matcher's list */
list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
@@ -512,49 +501,69 @@ static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
if (resize_data->max_stes) {
mlx5hws_action_free_single_stc(matcher->tbl->ctx,
matcher->tbl->type,
- &resize_data->action_ste[1].stc);
- mlx5hws_action_free_single_stc(matcher->tbl->ctx,
- matcher->tbl->type,
- &resize_data->action_ste[0].stc);
+ &resize_data->stc);
- if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB)
mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_1_id);
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_1_id);
- }
- mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[1].rtc_0_id);
+ resize_data->rtc_1_id);
+
mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
- resize_data->action_ste[0].rtc_0_id);
- if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
- mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
- mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
- }
+ resize_data->rtc_0_id);
+
+ if (resize_data->pool)
+ mlx5hws_pool_destroy(resize_data->pool);
}
kfree(resize_data);
}
}
-static int
-hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
struct mlx5hws_pool_attr pool_attr = {0};
struct mlx5hws_context *ctx = tbl->ctx;
- int ret;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste.max_stes = max_stes;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
/* Allocate action STE mempool */
pool_attr.table_type = tbl->type;
pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ /* Pool size is similar to action RTC size */
pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
- matcher->attr.table.sz_row_log;
+ matcher->attr.table.sz_row_log +
+ MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT;
hws_matcher_set_pool_attr(&pool_attr, matcher);
action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
if (!action_ste->pool) {
@@ -563,7 +572,7 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
}
/* Allocate action RTC */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
if (ret) {
mlx5hws_err(ctx, "Failed to create action RTC\n");
goto free_ste_pool;
@@ -587,18 +596,18 @@ hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
return 0;
free_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
free_ste_pool:
mlx5hws_pool_destroy(action_ste->pool);
return ret;
}
-static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_table *tbl = matcher->tbl;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
if (!action_ste->max_stes ||
matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
@@ -606,65 +615,10 @@ static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action
return;
mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY);
mlx5hws_pool_destroy(action_ste->pool);
}
-static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
-{
- bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
- struct mlx5hws_table *tbl = matcher->tbl;
- struct mlx5hws_context *ctx = tbl->ctx;
- u32 required_stes;
- u8 max_stes = 0;
- int i, ret;
-
- if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
- return 0;
-
- for (i = 0; i < matcher->num_of_at; i++) {
- struct mlx5hws_action_template *at = &matcher->at[i];
-
- ret = hws_matcher_check_and_process_at(matcher, at);
- if (ret) {
- mlx5hws_err(ctx, "Invalid at %d", i);
- return ret;
- }
-
- required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- max_stes = max(max_stes, required_stes);
-
- /* Future: Optimize reparse */
- }
-
- /* There are no additional STEs required for matcher */
- if (!max_stes)
- return 0;
-
- matcher->action_ste[0].max_stes = max_stes;
- matcher->action_ste[1].max_stes = max_stes;
-
- ret = hws_matcher_bind_at_idx(matcher, 0);
- if (ret)
- return ret;
-
- ret = hws_matcher_bind_at_idx(matcher, 1);
- if (ret)
- goto free_at_0;
-
- return 0;
-
-free_at_0:
- hws_matcher_unbind_at_idx(matcher, 0);
- return ret;
-}
-
-static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
-{
- hws_matcher_unbind_at_idx(matcher, 1);
- hws_matcher_unbind_at_idx(matcher, 0);
-}
-
static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
{
struct mlx5hws_context *ctx = matcher->tbl->ctx;
@@ -810,7 +764,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
goto unbind_at;
/* Allocate the RTC for the new matcher */
- ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
if (ret)
goto destroy_end_ft;
@@ -822,7 +776,7 @@ static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
return 0;
destroy_rtc:
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
destroy_end_ft:
hws_matcher_destroy_end_ft(matcher);
unbind_at:
@@ -836,7 +790,7 @@ static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
{
hws_matcher_resize_uninit(matcher);
hws_matcher_disconnect(matcher);
- hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH);
hws_matcher_destroy_end_ft(matcher);
hws_matcher_unbind_at(matcher);
hws_matcher_unbind_mt(matcher);
@@ -970,10 +924,9 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
- if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ if (matcher->action_ste.max_stes < required_stes) {
mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
- required_stes,
- matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ required_stes, matcher->action_ste.max_stes);
return -ENOMEM;
}
@@ -1007,9 +960,9 @@ hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
if (!matcher->mt)
return -ENOMEM;
- matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
- sizeof(*matcher->at),
- GFP_KERNEL);
+ matcher->at = kvcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
if (!matcher->at) {
mlx5hws_err(ctx, "Failed to allocate action template array\n");
ret = -ENOMEM;
@@ -1035,7 +988,7 @@ free_mt:
static void
hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
{
- kfree(matcher->at);
+ kvfree(matcher->at);
kfree(matcher->mt);
}
@@ -1157,8 +1110,7 @@ static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
return -EINVAL;
}
- if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
- dst_matcher->action_ste[0].max_stes) {
+ if (src_matcher->action_ste.max_stes > dst_matcher->action_ste.max_stes) {
mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
index 81ff487f57be..020de70270c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h
@@ -18,6 +18,11 @@
/* Required depth of the main large table */
#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+/* Action RTC size multiplier that is required in order
+ * to support rule update for rules with action STEs.
+ */
+#define MLX5HWS_MATCHER_ACTION_RTC_UPDATE_MULT 1
+
enum mlx5hws_matcher_offset {
MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
@@ -52,15 +57,11 @@ struct mlx5hws_matcher_action_ste {
u8 max_stes;
};
-struct mlx5hws_matcher_resize_data_node {
+struct mlx5hws_matcher_resize_data {
struct mlx5hws_pool_chunk stc;
u32 rtc_0_id;
u32 rtc_1_id;
struct mlx5hws_pool *pool;
-};
-
-struct mlx5hws_matcher_resize_data {
- struct mlx5hws_matcher_resize_data_node action_ste[2];
u8 max_stes;
struct list_head list_node;
};
@@ -78,7 +79,7 @@ struct mlx5hws_matcher {
struct mlx5hws_matcher *col_matcher;
struct mlx5hws_matcher *resize_dst;
struct mlx5hws_matcher_match_ste match_ste;
- struct mlx5hws_matcher_action_ste action_ste[2];
+ struct mlx5hws_matcher_action_ste action_ste;
struct list_head list_node;
struct list_head resize_data;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index f39d636ff39a..5121951f2778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 06db5e4726ae..d9dc4f2d0dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -344,7 +344,7 @@ void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
memset(wqe_ctrl, 0, wqe_len);
mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
- memcpy(wqe_arg, arg_data, wqe_len);
+ memcpy(wqe_arg, arg_data, MLX5HWS_ARG_DATA_SIZE);
send_attr.id = arg_idx++;
mlx5hws_send_engine_post_end(&ctrl, &send_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
index 27ca93385b08..8ddb51980044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h
@@ -31,7 +31,7 @@ struct mlx5hws_pattern_cache_item {
u8 *data;
u16 num_of_actions;
} mh_data;
- u32 refcount;
+ u32 refcount; /* protected by pattern_cache lock */
struct list_head ptrn_list_node;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index fed2d913f3b8..50a81d360bb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -183,7 +183,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
*seg = -1;
/* Find the next free place from the buddy array */
- while (*seg == -1) {
+ while (*seg < 0) {
for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
buddy = hws_pool_buddy_get_next_buddy(pool, i,
order,
@@ -194,7 +194,7 @@ static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
}
*seg = mlx5hws_buddy_alloc_mem(buddy, order);
- if (*seg != -1)
+ if (*seg >= 0)
goto found;
if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
index de92cecbeb92..271490a51b96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h
@@ -390,11 +390,6 @@ struct mlx5_ifc_definer_bits {
u8 match_mask[0x160];
};
-struct mlx5_ifc_arg_bits {
- u8 rsvd0[0x88];
- u8 access_pd[0x18];
-};
-
struct mlx5_ifc_header_modify_pattern_in_bits {
u8 modify_field_select[0x40];
@@ -428,11 +423,6 @@ struct mlx5_ifc_create_definer_in_bits {
struct mlx5_ifc_definer_bits definer;
};
-struct mlx5_ifc_create_arg_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_arg_bits arg;
-};
-
struct mlx5_ifc_create_header_modify_pattern_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_header_modify_pattern_in_bits pattern;
@@ -479,36 +469,4 @@ enum {
MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
};
-struct mlx5_ifc_alloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 packet_reformat_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_packet_reformat_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
index e20c67a04203..a27a2d5ffc7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c
@@ -129,27 +129,18 @@ static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
static void
hws_rule_save_resize_info(struct mlx5hws_rule *rule,
- struct mlx5hws_send_ste_attr *ste_attr,
- bool is_update)
+ struct mlx5hws_send_ste_attr *ste_attr)
{
if (!mlx5hws_matcher_is_resizable(rule->matcher))
return;
- if (likely(!is_update)) {
+ /* resize_info might already exist (if we're in update flow) */
+ if (likely(!rule->resize_info)) {
rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
if (unlikely(!rule->resize_info)) {
pr_warn("HWS: resize info isn't allocated for rule\n");
return;
}
-
- rule->resize_info->max_stes =
- rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
- rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
- rule->matcher->action_ste[0].pool :
- NULL;
- rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
- rule->matcher->action_ste[1].pool :
- NULL;
}
memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
@@ -204,15 +195,14 @@ hws_rule_load_delete_info(struct mlx5hws_rule *rule,
}
}
-static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule)
{
struct mlx5hws_matcher *matcher = rule->matcher;
struct mlx5hws_matcher_action_ste *action_ste;
struct mlx5hws_pool_chunk ste = {0};
int ret;
- action_ste = &matcher->action_ste[action_ste_selector];
+ action_ste = &matcher->action_ste;
ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
if (unlikely(ret)) {
@@ -220,68 +210,29 @@ static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
"Failed to allocate STE for rule actions");
return ret;
}
- rule->action_ste_idx = ste.offset;
+
+ rule->action_ste.pool = matcher->action_ste.pool;
+ rule->action_ste.num_stes = matcher->action_ste.max_stes;
+ rule->action_ste.index = ste.offset;
return 0;
}
-static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
- u8 action_ste_selector)
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste)
{
- struct mlx5hws_matcher *matcher = rule->matcher;
struct mlx5hws_pool_chunk ste = {0};
- struct mlx5hws_pool *pool;
- u8 max_stes;
-
- if (mlx5hws_matcher_is_resizable(matcher)) {
- /* Free the original action pool if rule was resized */
- max_stes = rule->resize_info->max_stes;
- pool = rule->resize_info->action_ste_pool[action_ste_selector];
- } else {
- max_stes = matcher->action_ste[action_ste_selector].max_stes;
- pool = matcher->action_ste[action_ste_selector].pool;
- }
-
- /* This release is safe only when the rule match part was deleted */
- ste.order = ilog2(roundup_pow_of_two(max_stes));
- ste.offset = rule->action_ste_idx;
-
- mlx5hws_pool_chunk_free(pool, &ste);
-}
-static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
- struct mlx5hws_rule_attr *attr)
-{
- int action_ste_idx;
- int ret;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 0);
- if (unlikely(ret))
- return ret;
-
- action_ste_idx = rule->action_ste_idx;
-
- ret = hws_rule_alloc_action_ste_idx(rule, 1);
- if (unlikely(ret)) {
- hws_rule_free_action_ste_idx(rule, 0);
- return ret;
- }
-
- /* Both pools have to return the same index */
- if (unlikely(rule->action_ste_idx != action_ste_idx)) {
- pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
- return -EINVAL;
- }
+ if (!action_ste->num_stes)
+ return;
- return 0;
-}
+ ste.order = ilog2(roundup_pow_of_two(action_ste->num_stes));
+ ste.offset = action_ste->index;
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
-{
- if (rule->action_ste_idx > -1) {
- hws_rule_free_action_ste_idx(rule, 1);
- hws_rule_free_action_ste_idx(rule, 0);
- }
+ /* This release is safe only when the rule match STE was deleted
+ * (when the rule is being deleted) or replaced with the new STE that
+ * isn't pointing to old action STEs (when the rule is being updated).
+ */
+ mlx5hws_pool_chunk_free(action_ste->pool, &ste);
}
static void hws_rule_create_init(struct mlx5hws_rule *rule,
@@ -298,14 +249,24 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* In update we use these rtc's */
rule->rtc_0 = 0;
rule->rtc_1 = 0;
- rule->action_ste_selector = 0;
+
+ rule->action_ste.pool = NULL;
+ rule->action_ste.num_stes = 0;
+ rule->action_ste.index = -1;
+
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
} else {
- rule->action_ste_selector = !rule->action_ste_selector;
+ rule->status = MLX5HWS_RULE_STATUS_UPDATING;
}
+ /* Initialize the old action STE info - shallow-copy action_ste.
+ * In create flow this will set old_action_ste fields to initial values.
+ * In update flow this will save the existing action STE info,
+ * so that we will later use it to free old STEs.
+ */
+ rule->old_action_ste = rule->action_ste;
+
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->status = MLX5HWS_RULE_STATUS_CREATING;
/* Init default send STE attributes */
ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
@@ -315,8 +276,8 @@ static void hws_rule_create_init(struct mlx5hws_rule *rule,
/* Init default action apply */
apply->tbl_type = tbl->type;
- apply->common_res = &ctx->common_res[tbl->type];
- apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->common_res = &ctx->common_res;
+ apply->jump_to_action_stc = matcher->action_ste.stc.offset;
apply->require_dep = 0;
}
@@ -332,8 +293,6 @@ static void hws_rule_move_init(struct mlx5hws_rule *rule,
rule->rtc_1 = 0;
rule->pending_wqes = 0;
- rule->action_ste_idx = -1;
- rule->action_ste_selector = 0;
rule->status = MLX5HWS_RULE_STATUS_CREATING;
rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
}
@@ -394,21 +353,17 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (action_stes) {
/* Allocate action STEs for rules that need more than match STE */
- if (!is_update) {
- ret = hws_rule_alloc_action_ste(rule, attr);
- if (ret) {
- mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
- mlx5hws_send_abort_new_dep_wqe(queue);
- return ret;
- }
+ ret = hws_rule_alloc_action_ste(rule);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
}
/* Skip RX/TX based on the dep_wqe init */
- ste_attr.rtc_0 = dep_wqe->rtc_0 ?
- matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
- ste_attr.rtc_1 = dep_wqe->rtc_1 ?
- matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1_id : 0;
/* Action STEs are written to a specific index last to first */
- ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ ste_attr.direct_index = rule->action_ste.index + action_stes;
apply.next_direct_idx = ste_attr.direct_index;
} else {
apply.next_direct_idx = 0;
@@ -459,7 +414,7 @@ static int hws_rule_create_hws(struct mlx5hws_rule *rule,
if (!is_update)
hws_rule_save_delete_info(rule, &ste_attr);
- hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ hws_rule_save_resize_info(rule, &ste_attr);
mlx5hws_send_engine_inc_rule(queue);
if (!attr->burst)
@@ -480,7 +435,10 @@ static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
/* Rule failed now we can safely release action STEs */
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
+
+ /* Perhaps the rule failed updating - release old action STEs as well */
+ mlx5hws_rule_free_action_ste(&rule->old_action_ste);
/* Clear complex tag */
hws_rule_clear_delete_info(rule);
@@ -517,7 +475,8 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
}
/* Rule is not completed yet */
- if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING ||
+ rule->status == MLX5HWS_RULE_STATUS_UPDATING)
return -EBUSY;
/* Rule failed and doesn't require cleanup */
@@ -534,7 +493,7 @@ static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
hws_rule_gen_comp(queue, rule, false,
attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
- mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_free_action_ste(&rule->action_ste);
mlx5hws_rule_clear_resize_info(rule);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
index 495cdd17e9f3..b5ee94ac449b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h
@@ -15,6 +15,8 @@ enum mlx5hws_rule_status {
MLX5HWS_RULE_STATUS_UNKNOWN,
MLX5HWS_RULE_STATUS_CREATING,
MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_UPDATING,
+ MLX5HWS_RULE_STATUS_UPDATED,
MLX5HWS_RULE_STATUS_DELETING,
MLX5HWS_RULE_STATUS_DELETED,
MLX5HWS_RULE_STATUS_FAILING,
@@ -41,13 +43,17 @@ struct mlx5hws_rule_match_tag {
};
};
+struct mlx5hws_rule_action_ste_info {
+ struct mlx5hws_pool *pool;
+ int index; /* STE array index */
+ u8 num_stes;
+};
+
struct mlx5hws_rule_resize_info {
- struct mlx5hws_pool *action_ste_pool[2];
u32 rtc_0;
u32 rtc_1;
u32 rule_idx;
u8 state;
- u8 max_stes;
u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
};
@@ -58,18 +64,18 @@ struct mlx5hws_rule {
struct mlx5hws_rule_match_tag tag;
struct mlx5hws_rule_resize_info *resize_info;
};
+ struct mlx5hws_rule_action_ste_info action_ste;
+ struct mlx5hws_rule_action_ste_info old_action_ste;
u32 rtc_0; /* The RTC into which the STE was inserted */
u32 rtc_1; /* The RTC into which the STE was inserted */
- int action_ste_idx; /* STE array index */
u8 status; /* enum mlx5hws_rule_status */
- u8 action_ste_selector; /* For rule update - which action STE is in use */
u8 pending_wqes;
bool skip_delete; /* For complex rules - another rule with same tag
* still exists, so don't actually delete this rule.
*/
};
-void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule_action_ste_info *action_ste);
int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
void *queue, void *user_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index 883b4ed30892..cb6abc4ab7df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -377,17 +377,25 @@ static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
*status = MLX5HWS_FLOW_OP_ERROR;
} else {
- /* Increase the status, this only works on good flow as the enum
- * is arrange it away creating -> created -> deleting -> deleted
+ /* Increase the status, this only works on good flow as
+ * the enum is arranged this way:
+ * - creating -> created
+ * - updating -> updated
+ * - deleting -> deleted
*/
priv->rule->status++;
*status = MLX5HWS_FLOW_OP_SUCCESS;
- /* Rule was deleted now we can safely release action STEs
- * and clear resize info
- */
if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
- mlx5hws_rule_free_action_ste(priv->rule);
+ /* Rule was deleted, now we can safely release
+ * action STEs and clear resize info
+ */
+ mlx5hws_rule_free_action_ste(&priv->rule->action_ste);
mlx5hws_rule_clear_resize_info(priv->rule);
+ } else if (priv->rule->status == MLX5HWS_RULE_STATUS_UPDATED) {
+ /* Rule was updated, free the old action STEs */
+ mlx5hws_rule_free_action_ste(&priv->rule->old_action_ste);
+ /* Update completed - move the rule back to "created" */
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
}
}
}
@@ -633,6 +641,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ MLX5_SET(sqc, sqc, non_wire, 1);
ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
@@ -896,15 +905,18 @@ close_cq:
return err;
}
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+static void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size)
+static int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
{
int err;
@@ -1005,7 +1017,7 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queue_size)
{
int err = 0;
- u32 i;
+ int i = 0;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1021,7 +1033,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
if (err)
goto close_send_queues;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
index b50825d6dc53..f833092235c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h
@@ -189,12 +189,6 @@ void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
-void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
-
-int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
- struct mlx5hws_send_engine *queue,
- u16 queue_size);
-
void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 9576e02d00c3..ab1297531232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -37,6 +37,7 @@ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
}
static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
@@ -48,8 +49,8 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return 0;
- if (ctx->common_res[tbl_type].default_miss) {
- ctx->common_res[tbl_type].default_miss->refcount++;
+ if (ctx->common_res.default_miss) {
+ ctx->common_res.default_miss->refcount++;
return 0;
}
@@ -70,29 +71,28 @@ static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
return -EINVAL;
}
- /* ctx->ctrl_lock must be held here */
- ctx->common_res[tbl_type].default_miss = default_miss;
- ctx->common_res[tbl_type].default_miss->refcount++;
+ ctx->common_res.default_miss = default_miss;
+ ctx->common_res.default_miss->refcount++;
return 0;
}
/* Called under ctx->ctrl_lock */
static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+__must_hold(&tbl->ctx->ctrl_lock)
{
struct mlx5hws_cmd_forward_tbl *default_miss;
struct mlx5hws_context *ctx = tbl->ctx;
- u8 tbl_type = tbl->type;
if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
return;
- default_miss = ctx->common_res[tbl_type].default_miss;
+ default_miss = ctx->common_res.default_miss;
if (--default_miss->refcount)
return;
mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
- ctx->common_res[tbl_type].default_miss = NULL;
+ ctx->common_res.default_miss = NULL;
}
static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
@@ -478,15 +478,9 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
if (old_miss_tbl)
list_del_init(&tbl->default_miss.next);
- old_miss_tbl = tbl->default_miss.miss_tbl;
- if (old_miss_tbl)
- list_del_init(&old_miss_tbl->default_miss.head);
-
if (miss_tbl)
list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
- mutex_unlock(&ctx->ctrl_lock);
- return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 49f22cad92bf..60cb4527588a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -8,7 +8,7 @@
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
((dmn)->info.caps.dmn_type##_sw_owner || \
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
- (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_8))
bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
index e94fbb015efa..c8b8ff80c7c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
@@ -555,7 +555,7 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_tx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -566,7 +566,7 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes)
{
- ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+ ste_ctx->set_actions_rx(ste_ctx, dmn, action_type_set, ste_ctx->actions_caps,
hw_ste_arr, attr, added_stes);
}
@@ -1458,6 +1458,8 @@ struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
return mlx5dr_ste_get_ctx_v1();
else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
return mlx5dr_ste_get_ctx_v2();
+ else if (version == MLX5_STEERING_FORMAT_CONNECTX_8)
+ return mlx5dr_ste_get_ctx_v3();
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 54a6619c3ecb..5f409dc30aca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -160,13 +160,15 @@ struct mlx5dr_ste_ctx {
/* Actions */
u32 actions_caps;
- void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_rx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
struct mlx5dr_ste_actions_attr *attr,
u32 *added_stes);
- void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ void (*set_actions_tx)(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *hw_ste_arr,
@@ -197,7 +199,17 @@ struct mlx5dr_ste_ctx {
u16 *used_hw_action_num);
int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
+ /* Actions bit set */
+ void (*set_encap)(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size);
+ void (*set_push_vlan)(u8 *ste, u8 *d_action,
+ u32 vlan_hdr);
+ void (*set_pop_vlan)(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num);
+ void (*set_rx_decap)(u8 *hw_ste_p, u8 *s_action);
+ void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
+ u8 *scnd_d_action, u32 reformat_id,
+ int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
@@ -205,5 +217,6 @@ struct mlx5dr_ste_ctx {
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void);
#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
index e9f6c7ed7a7b..42536bee55e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
@@ -406,7 +406,8 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste,
}
static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -476,7 +477,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
}
static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+dr_ste_v0_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 1d49704b9542..7f83d77c43ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -5,136 +5,6 @@
#include "mlx5_ifc_dr_ste_v1.h"
#include "dr_ste_v1.h"
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
- ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
- DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
- DR_STE_V1_TYPE_BWC_BYTE = 0x0,
- DR_STE_V1_TYPE_BWC_DW = 0x1,
- DR_STE_V1_TYPE_MATCH = 0x2,
- DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
- DR_STE_V1_LU_TYPE_NOP = 0x0000,
- DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
- DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
- DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
- DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
- DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
- DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
- DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
- DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
- DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
- DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
- DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
- DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
- DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
- DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
- DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
- DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
- DR_STE_V1_LU_TYPE_GRE = 0x010d,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
- DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
- DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
- DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
- DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
- DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
- DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
- DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
- DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
- DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
- DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
- DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
- DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
-};
-
-enum dr_ste_v1_action_size {
- DR_STE_ACTION_SINGLE_SZ = 4,
- DR_STE_ACTION_DOUBLE_SZ = 8,
- DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
- DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
- DR_STE_V1_ACTION_ID_NOP = 0x00,
- DR_STE_V1_ACTION_ID_COPY = 0x05,
- DR_STE_V1_ACTION_ID_SET = 0x06,
- DR_STE_V1_ACTION_ID_ADD = 0x07,
- DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
- DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
- DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
- DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
- DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
- DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
- DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
- DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
- DR_STE_V1_ACTION_ID_ASO = 0x12,
- DR_STE_V1_ACTION_ID_TRAILER = 0x13,
- DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
- DR_STE_V1_ACTION_ID_MAX = 0x21,
- /* use for special cases */
- DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
-};
-
-enum {
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
- DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
- DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -379,13 +249,12 @@ static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
}
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+void dr_ste_v1_set_reparse(u8 *hw_ste_p)
{
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -432,8 +301,7 @@ static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
- u32 vlan_hdr)
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr)
{
MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
@@ -446,7 +314,7 @@ static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -459,11 +327,8 @@ static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -483,7 +348,7 @@ static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
{
MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
@@ -620,7 +485,8 @@ static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
}
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -640,7 +506,7 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
@@ -677,8 +543,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -691,9 +557,9 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -706,10 +572,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
}
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -776,7 +642,8 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
u8 *last_ste,
@@ -799,7 +666,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
allow_modify_hdr = false;
allow_ctr = false;
} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
- dr_ste_v1_set_rx_decap(last_ste, action);
+ ste_ctx->set_rx_decap(last_ste, action);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_modify_hdr = false;
@@ -827,7 +694,7 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+ ste_ctx->set_pop_vlan(last_ste, action, attr->vlans.count);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
allow_ctr = false;
@@ -868,8 +735,8 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_push_vlan(last_ste, action,
- attr->vlans.headers[i]);
+ ste_ctx->set_push_vlan(last_ste, action,
+ attr->vlans.headers[i]);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
@@ -895,9 +762,9 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_encap(last_ste, action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -912,10 +779,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_encap_l3(last_ste,
- action, d_action,
- attr->reformat.id,
- attr->reformat.size);
+ ste_ctx->set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
allow_modify_hdr = false;
} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
@@ -1027,9 +894,6 @@ void dr_ste_v1_set_action_copy(u8 *d_action,
MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
}
-#define DR_STE_DECAP_L3_ACTION_NUM 8
-#define DR_STE_L2_HDR_MAX_SZ 20
-
int dr_ste_v1_set_action_decap_l3_list(void *data,
u32 data_sz,
u8 *hw_action,
@@ -2330,7 +2194,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index e2fc69867088..a8d9e308d339 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -7,6 +7,138 @@
#include "dr_types.h"
#include "dr_ste.h"
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
@@ -17,11 +149,18 @@ u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_reparse(u8 *hw_ste_p);
+void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size);
+void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, u32 vlan_hdr);
+void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
+void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
+ u32 reformat_id, int size);
+void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
- u32 actions_caps, u8 *last_ste,
+void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
+ u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
u8 length, u32 data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 808b013cf48c..0882dba0f64b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -2,167 +2,7 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "dr_ste_v1.h"
-
-enum {
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
- DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
- DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
- DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
- DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
- DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
- DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
- DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
- DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
- DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
- DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
- DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
- .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
- .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
- .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
- },
-};
+#include "dr_ste_v2.h"
static struct mlx5dr_ste_ctx ste_ctx_v2 = {
/* Builders */
@@ -223,7 +63,12 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
.alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
.dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
+ /* Actions bit set */
+ .set_encap = &dr_ste_v1_set_encap,
+ .set_push_vlan = &dr_ste_v1_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v1_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v1_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v1_set_encap_l3,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
new file mode 100644
index 000000000000..d853fde49cfc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _DR_STE_V2_
+#define _DR_STE_V2_
+
+enum {
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
+ DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94,
+ DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
+};
+
+#endif /* _DR_STE_V2_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
new file mode 100644
index 000000000000..cc60ce1d274e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+#include "dr_ste_v2.h"
+
+static void dr_ste_v3_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_push_vlan(u8 *ste, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, start_offset,
+ HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v3, d_action, inline_data, vlan_hdr);
+ dr_ste_v1_set_reparse(ste);
+}
+
+static void dr_ste_v3_set_pop_vlan(u8 *hw_ste_p, u8 *s_action,
+ u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static int
+dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ uint16_t *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v3, inline_data);
+
+ /* Add an alignment padding */
+ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v3, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ /* Point to the last dword of the header */
+ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v3, hw_action, start_offset, 0);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v3,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, start_offset, 0);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v3 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+ .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init =
+ &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
+ .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
+ .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
+ .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+ .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+
+ /* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP |
+ DR_STE_CTX_ACTION_CAP_RX_PUSH |
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v2_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v3_set_action_decap_l3_list,
+ .alloc_modify_hdr_chunk = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+ .dealloc_modify_hdr_chunk = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+ /* Actions bit set */
+ .set_encap = &dr_ste_v3_set_encap,
+ .set_push_vlan = &dr_ste_v3_set_push_vlan,
+ .set_pop_vlan = &dr_ste_v3_set_pop_vlan,
+ .set_rx_decap = &dr_ste_v3_set_rx_decap,
+ .set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v3(void)
+{
+ return &ste_ctx_v3;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
index 4b349d4005e4..8007d3f523c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
@@ -521,7 +521,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
goto free_actions;
}
- id = dst->dest_attr.counter_id;
+ id = mlx5_fc_id(dst->dest_attr.counter);
tmp_action =
mlx5dr_action_create_flow_counter(id);
if (!tmp_action) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
index fb078fa0f0cc..898c3618ff26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
@@ -600,4 +600,44 @@ struct mlx5_ifc_ste_double_action_aso_v1_bits {
};
};
+struct mlx5_ifc_ste_single_action_remove_header_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 end_anchor[0x7];
+ u8 reserved_at_16[0x1];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v3_bits {
+ u8 action_id[0x8];
+ u8 start_anchor[0x7];
+ u8 start_offset[0x8];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
index 3ac7dc67509f..0bb3724c10c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
@@ -160,7 +160,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
(MLX5_CAP_GEN(dev, steering_format_version) <=
- MLX5_STEERING_FORMAT_CONNECTX_7)));
+ MLX5_STEERING_FORMAT_CONNECTX_8)));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6d11225594dd..24c3ff6fcf71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -73,6 +73,7 @@ struct mlxsw_tx_info {
};
struct mlxsw_rx_md_info {
+ struct napi_struct *napi;
u32 cookie_index;
u32 latency;
u32 tx_congestion;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index d6f37456fb31..0863dca2fc0b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -737,6 +737,7 @@ static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
}
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+ struct napi_struct *napi,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
@@ -807,6 +808,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
}
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+ mlxsw_skb_cb(skb)->rx_md_info.napi = napi;
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -869,7 +871,7 @@ static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
continue;
}
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, napi, rdq,
wqe_counter, q->u.cq.v, cqe);
if (++work_done == budget)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3f5e5d99251b..aa71993daf28 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2449,7 +2449,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
u64_stats_update_end(&pcpu_stats->syncp);
skb->protocol = eth_type_trans(skb, skb->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index f07955b5439f..6a4a81c63451 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -192,6 +192,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (sample_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirror action after sample action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
block, out_dev,
extack);
@@ -265,6 +270,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return -EOPNOTSUPP;
}
+ if (mirror_act_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action after mirror action is not supported");
+ return -EOPNOTSUPP;
+ }
+
err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei,
block,
act->sample.psample_group,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 69cd689dbc83..5afe6b155ef0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -1003,10 +1003,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- if (mr_route->mfc->mfc_un.res.pkt != packets)
- mr_route->mfc->mfc_un.res.lastuse = jiffies;
- mr_route->mfc->mfc_un.res.pkt = packets;
- mr_route->mfc->mfc_un.res.bytes = bytes;
+ if (atomic_long_read(&mr_route->mfc->mfc_un.res.pkt) != packets)
+ WRITE_ONCE(mr_route->mfc->mfc_un.res.lastuse, jiffies);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.pkt, packets);
+ atomic_long_set(&mr_route->mfc->mfc_un.res.bytes, bytes);
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 899c954e0e5f..1f9c1c86839f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -173,7 +173,7 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
if (err)
return;
- netif_receive_skb(skb);
+ napi_gro_receive(mlxsw_skb_cb(skb)->rx_md_info.napi, skb);
}
static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index ea6214ca48e7..239b2258ec65 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -13,6 +13,7 @@ fbnic-y := fbnic_csr.o \
fbnic_ethtool.o \
fbnic_fw.o \
fbnic_hw_stats.o \
+ fbnic_hwmon.o \
fbnic_irq.o \
fbnic_mac.o \
fbnic_netdev.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 744eb0d95449..14751f16e125 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -16,10 +16,15 @@
#include "fbnic_mac.h"
#include "fbnic_rpc.h"
+struct fbnic_napi_vector;
+
+#define FBNIC_MAX_NAPI_VECTORS 128u
+
struct fbnic_dev {
struct device *dev;
struct net_device *netdev;
struct dentry *dbg_fbd;
+ struct device *hwmon;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -28,10 +33,16 @@ struct fbnic_dev {
unsigned int pcs_msix_vector;
unsigned short num_irqs;
+ struct {
+ u8 users;
+ char name[IFNAMSIZ + 9];
+ } napi_irq[FBNIC_MAX_NAPI_VECTORS];
+
struct delayed_work service_task;
struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
struct fbnic_fw_cap fw_cap;
+ struct fbnic_fw_completion *cmpl_data;
/* Lock protecting Tx Mailbox queue to prevent possible races */
spinlock_t fw_tx_lock;
@@ -140,9 +151,18 @@ void fbnic_devlink_unregister(struct fbnic_dev *fbd);
int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+void fbnic_hwmon_register(struct fbnic_dev *fbd);
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
+
int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv);
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr);
int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data);
void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index cc8ca94529ca..20cd9f5f89e2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -40,49 +40,99 @@ static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
#define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
#define FBNIC_HW_STATS_LEN FBNIC_HW_FIXED_STATS_LEN
-static int
-fbnic_get_ts_info(struct net_device *netdev,
- struct kernel_ethtool_ts_info *tsinfo)
+static void
+fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
- tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+ fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+}
- tsinfo->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+static int fbnic_get_regs_len(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
- tsinfo->tx_types =
- BIT(HWTSTAMP_TX_OFF) |
- BIT(HWTSTAMP_TX_ON);
+ return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
+}
- tsinfo->rx_filters =
- BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_ALL);
+static void fbnic_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *data)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
- return 0;
+ fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
}
-static void
-fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
- fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
- sizeof(drvinfo->fw_version));
+ clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
+ if (!clone)
+ return NULL;
+
+ memset(clone->tx, 0, sizeof(clone->tx));
+ memset(clone->rx, 0, sizeof(clone->rx));
+ memset(clone->napi, 0, sizeof(clone->napi));
+ return clone;
}
-static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
+ struct fbnic_net *clone)
{
- if (counter->reported)
- *stat = counter->value;
+ swap(clone->rcq_size, orig->rcq_size);
+ swap(clone->hpq_size, orig->hpq_size);
+ swap(clone->ppq_size, orig->ppq_size);
+ swap(clone->txq_size, orig->txq_size);
+ swap(clone->num_rx_queues, orig->num_rx_queues);
+ swap(clone->num_tx_queues, orig->num_tx_queues);
+ swap(clone->num_napi, orig->num_napi);
+}
+
+static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
+ }
+}
+
+static void fbnic_clone_swap(struct fbnic_net *orig,
+ struct fbnic_net *clone)
+{
+ struct fbnic_dev *fbd = orig->fbd;
+ unsigned int i;
+
+ for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
+ fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
+ for (i = 0; i < orig->num_napi; i++)
+ fbnic_aggregate_vector_counters(orig, orig->napi[i]);
+
+ fbnic_clone_swap_cfg(orig, clone);
+
+ for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
+ swap(clone->napi[i], orig->napi[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
+ swap(clone->tx[i], orig->tx[i]);
+ for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
+ swap(clone->rx[i], orig->rx[i]);
+}
+
+static void fbnic_clone_free(struct fbnic_net *clone)
+{
+ kfree(clone);
}
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -97,6 +147,21 @@ static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
}
}
+static void fbnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_stat *stat;
+ int i;
+
+ fbnic_get_hw_stats(fbn->fbd);
+
+ for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
+ stat = &fbnic_gstrings_hw_stats[i];
+ data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+ }
+}
+
static int fbnic_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
@@ -107,19 +172,375 @@ static int fbnic_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fbnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static int fbnic_get_rss_hash_idx(u32 flow_type)
{
- struct fbnic_net *fbn = netdev_priv(dev);
- const struct fbnic_stat *stat;
- int i;
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return FBNIC_TCP4_HASH_OPT;
+ case TCP_V6_FLOW:
+ return FBNIC_TCP6_HASH_OPT;
+ case UDP_V4_FLOW:
+ return FBNIC_UDP4_HASH_OPT;
+ case UDP_V6_FLOW:
+ return FBNIC_UDP6_HASH_OPT;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IPV4_USER_FLOW:
+ return FBNIC_IPV4_HASH_OPT;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ return FBNIC_IPV6_HASH_OPT;
+ case ETHER_FLOW:
+ return FBNIC_ETHER_HASH_OPT;
+ }
- fbnic_get_hw_stats(fbn->fbd);
+ return -1;
+}
- for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
- stat = &fbnic_gstrings_hw_stats[i];
- data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
+static int
+fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+static int fbnic_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = fbn->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = fbnic_get_rss_hash_opts(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
+{
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = fbnic_set_rss_hash_opts(fbn, cmd);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_KEY_BYTE_LEN;
+}
+
+static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return FBNIC_RPC_RSS_TBL_SIZE;
+}
+
+static int
+fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
+ u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
+
+ rxfh->key[i] = rss_key >> 24;
+ }
+ }
+
+ if (rxfh->indir) {
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ rxfh->indir[i] = fbn->indir_tbl[0][i];
+ }
+
+ return 0;
+}
+
+static unsigned int
+fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
+{
+ unsigned int i, changes = 0;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (fbn->indir_tbl[idx][i] == indir[i])
+ continue;
+
+ fbn->indir_tbl[idx][i] = indir[i];
+ changes++;
+ }
+
+ return changes;
+}
+
+static int
+fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int i, changes = 0;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EINVAL;
+
+ if (rxfh->key) {
+ u32 rss_key = 0;
+
+ for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
+ rss_key >>= 8;
+ rss_key |= (u32)(rxfh->key[i]) << 24;
+
+ if (i % 4)
+ continue;
+
+ if (fbn->rss_key[i / 4] == rss_key)
+ continue;
+
+ fbn->rss_key[i / 4] = rss_key;
+ changes++;
+ }
}
+
+ if (rxfh->indir)
+ changes += fbnic_set_indir(fbn, 0, rxfh->indir);
+
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static void fbnic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ ch->max_rx = fbd->max_num_queues;
+ ch->max_tx = fbd->max_num_queues;
+ ch->max_combined = min(ch->max_rx, ch->max_tx);
+ ch->max_other = FBNIC_NON_NAPI_VECTORS;
+
+ if (fbn->num_rx_queues > fbn->num_napi ||
+ fbn->num_tx_queues > fbn->num_napi)
+ ch->combined_count = min(fbn->num_rx_queues,
+ fbn->num_tx_queues);
+ else
+ ch->combined_count =
+ fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
+ ch->rx_count = fbn->num_rx_queues - ch->combined_count;
+ ch->tx_count = fbn->num_tx_queues - ch->combined_count;
+ ch->other_count = FBNIC_NON_NAPI_VECTORS;
+}
+
+static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
+ unsigned int max_napis)
+{
+ fbn->num_rx_queues = ch->rx_count + ch->combined_count;
+ fbn->num_tx_queues = ch->tx_count + ch->combined_count;
+ fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
+ max_napis);
+}
+
+static int fbnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ unsigned int max_napis, standalone;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_net *clone;
+ int err;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+ standalone = ch->rx_count + ch->tx_count;
+
+ /* Limits for standalone queues:
+ * - each queue has it's own NAPI (num_napi >= rx + tx + combined)
+ * - combining queues (combined not 0, rx or tx must be 0)
+ */
+ if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
+ (standalone && standalone + ch->combined_count > max_napis) ||
+ ch->rx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->tx_count + ch->combined_count > fbd->max_num_queues ||
+ ch->other_count != FBNIC_NON_NAPI_VECTORS)
+ return -EINVAL;
+
+ if (!netif_running(netdev)) {
+ fbnic_set_queues(fbn, ch, max_napis);
+ fbnic_reset_indir_tbl(fbn);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_queues(clone, ch, max_napis);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ /* Reset RSS indirection table */
+ fbnic_reset_indir_tbl(fbn);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
+static int
+fbnic_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *tsinfo)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
+
+ tsinfo->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ tsinfo->tx_types =
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ tsinfo->rx_filters =
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static void fbnic_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u64 ts_packets, ts_lost;
+ struct fbnic_ring *ring;
+ unsigned int start;
+ int i;
+
+ ts_stats->pkts = fbn->tx_stats.ts_packets;
+ ts_stats->lost = fbn->tx_stats.ts_lost;
+ for (i = 0; i < fbn->num_tx_queues; i++) {
+ ring = fbn->tx[i];
+ do {
+ start = u64_stats_fetch_begin(&ring->stats.syncp);
+ ts_packets = ring->stats.ts_packets;
+ ts_lost = ring->stats.ts_lost;
+ } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
+ ts_stats->pkts += ts_packets;
+ ts_stats->lost += ts_lost;
+ }
+}
+
+static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
+{
+ if (counter->reported)
+ *stat = counter->value;
}
static void
@@ -164,44 +585,6 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
-static void fbnic_get_ts_stats(struct net_device *netdev,
- struct ethtool_ts_stats *ts_stats)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
- u64 ts_packets, ts_lost;
- struct fbnic_ring *ring;
- unsigned int start;
- int i;
-
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
- for (i = 0; i < fbn->num_tx_queues; i++) {
- ring = fbn->tx[i];
- do {
- start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
- } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
- ts_stats->pkts += ts_packets;
- ts_stats->lost += ts_lost;
- }
-}
-
-static void fbnic_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *data)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
-}
-
-static int fbnic_get_regs_len(struct net_device *netdev)
-{
- struct fbnic_net *fbn = netdev_priv(netdev);
-
- return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
-}
-
static const struct ethtool_ops fbnic_ethtool_ops = {
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
@@ -209,6 +592,14 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
.get_ts_stats = fbnic_get_ts_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 8f7a2a19ddf8..bbc7c1c0c37e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -228,6 +228,63 @@ static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
tx_mbx->head = head;
}
+static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ /* If we are already waiting on a completion then abort */
+ if (cmpl_data && fbd->cmpl_data) {
+ err = -EBUSY;
+ goto unlock_mbx;
+ }
+
+ /* Record completion location and submit request */
+ if (cmpl_data)
+ fbd->cmpl_data = cmpl_data;
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ /* If msg failed then clear completion data for next caller */
+ if (err && cmpl_data)
+ fbd->cmpl_data = NULL;
+
+unlock_mbx:
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_fw_release_cmpl_data(struct kref *kref)
+{
+ struct fbnic_fw_completion *cmpl_data;
+
+ cmpl_data = container_of(kref, struct fbnic_fw_completion,
+ ref_count);
+ kfree(cmpl_data);
+}
+
+static struct fbnic_fw_completion *
+fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_fw_completion *cmpl_data = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
+ cmpl_data = fbd->cmpl_data;
+ kref_get(&fbd->cmpl_data->ref_count);
+ }
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return cmpl_data;
+}
+
/**
* fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
* @fbd: FBNIC device structure
@@ -651,6 +708,84 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+/**
+ * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion data structure to store sensor response
+ *
+ * Asks the firmware to provide an update with the latest sensor data.
+ * The response will contain temperature and voltage readings.
+ *
+ * Return: 0 on success, negative error value on failure
+ */
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data)
+{
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_tsene_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ int err = 0;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+ if (!cmpl_data)
+ return -EINVAL;
+
+ if (results[FBNIC_TSENE_ERROR]) {
+ err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
+ if (err)
+ goto exit_complete;
+ }
+
+ if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ err = -EINVAL;
+ goto exit_complete;
+ }
+
+ cmpl_data->u.tsene.millidegrees =
+ fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ cmpl_data->u.tsene.millivolts =
+ fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+
+exit_complete:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
fbnic_fw_parse_cap_resp),
@@ -658,6 +793,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(TSENE_READ_RESP,
+ fbnic_tsene_read_resp_index,
+ fbnic_fw_parse_tsene_read_resp),
FBNIC_TLV_MSG_ERROR
};
@@ -802,3 +940,25 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
fw_version, str_sz);
}
+
+void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
+ u32 msg_type)
+{
+ fw_cmpl->msg_type = msg_type;
+ init_completion(&fw_cmpl->done);
+ kref_init(&fw_cmpl->ref_count);
+}
+
+void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbd->cmpl_data = NULL;
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
+{
+ kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 221faf8c6756..fe68333d51b1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -44,6 +44,19 @@ struct fbnic_fw_cap {
u8 link_fec;
};
+struct fbnic_fw_completion {
+ u32 msg_type;
+ struct completion done;
+ struct kref ref_count;
+ int result;
+ union {
+ struct {
+ s32 millivolts;
+ s32 millidegrees;
+ } tsene;
+ } u;
+};
+
void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
@@ -52,6 +65,12 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
+void fbnic_fw_init_cmpl(struct fbnic_fw_completion *cmpl_data,
+ u32 msg_type);
+void fbnic_fw_clear_compl(struct fbnic_dev *fbd);
+void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
do { \
@@ -76,6 +95,8 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
+ FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -118,6 +139,13 @@ enum {
};
enum {
+ FBNIC_TSENE_THERM = 0x0,
+ FBNIC_TSENE_VOLT = 0x1,
+ FBNIC_TSENE_ERROR = 0x2,
+ FBNIC_TSENE_MSG_MAX
+};
+
+enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
new file mode 100644
index 000000000000..def8598aceec
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/hwmon.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+
+static int fbnic_hwmon_sensor_id(enum hwmon_sensor_types type)
+{
+ if (type == hwmon_temp)
+ return FBNIC_SENSOR_TEMP;
+ if (type == hwmon_in)
+ return FBNIC_SENSOR_VOLTAGE;
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t fbnic_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type == hwmon_temp && attr == hwmon_temp_input)
+ return 0444;
+ if (type == hwmon_in && attr == hwmon_in_input)
+ return 0444;
+
+ return 0;
+}
+
+static int fbnic_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ const struct fbnic_mac *mac = fbd->mac;
+ int id;
+
+ id = fbnic_hwmon_sensor_id(type);
+ return id < 0 ? id : mac->get_sensor(fbd, id, val);
+}
+
+static const struct hwmon_ops fbnic_hwmon_ops = {
+ .is_visible = fbnic_hwmon_is_visible,
+ .read = fbnic_hwmon_read,
+};
+
+static const struct hwmon_channel_info *fbnic_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info fbnic_chip_info = {
+ .ops = &fbnic_hwmon_ops,
+ .info = fbnic_hwmon_info,
+};
+
+void fbnic_hwmon_register(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return;
+
+ fbd->hwmon = hwmon_device_register_with_info(fbd->dev, "fbnic",
+ fbd, &fbnic_chip_info,
+ NULL);
+ if (IS_ERR(fbd->hwmon)) {
+ dev_notice(fbd->dev,
+ "Failed to register hwmon device %pe\n",
+ fbd->hwmon);
+ fbd->hwmon = NULL;
+ }
+}
+
+void fbnic_hwmon_unregister(struct fbnic_dev *fbd)
+{
+ if (!IS_REACHABLE(CONFIG_HWMON) || !fbd->hwmon)
+ return;
+
+ hwmon_device_unregister(fbd->hwmon);
+ fbd->hwmon = NULL;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 914362195920..1bbc0e56f3a0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -146,6 +146,17 @@ void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
free_irq(fbd->pcs_msix_vector, fbd);
}
+void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ synchronize_irq(irq);
+}
+
int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
unsigned long flags, const char *name, void *data)
{
@@ -169,6 +180,48 @@ void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
free_irq(irq, data);
}
+void fbnic_napi_name_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fbd->napi_irq); i++)
+ snprintf(fbd->napi_irq[i].name,
+ sizeof(fbd->napi_irq[i].name),
+ "%s-TxRx-%u", fbd->netdev->name, i);
+}
+
+int fbnic_napi_request_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+ int err;
+
+ if (!fbd->napi_irq[i].users) {
+ err = fbnic_request_irq(fbd, nv->v_idx,
+ fbnic_msix_clean_rings, 0,
+ fbd->napi_irq[i].name,
+ &fbn->napi[i]);
+ if (err)
+ return err;
+ }
+
+ fbd->napi_irq[i].users++;
+ return 0;
+}
+
+void fbnic_napi_free_irq(struct fbnic_dev *fbd,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ int i = fbnic_napi_idx(nv);
+
+ if (--fbd->napi_irq[i].users)
+ return;
+
+ fbnic_free_irq(fbd, nv->v_idx, &fbn->napi[i]);
+}
+
void fbnic_free_irqs(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 7b654d0a6dac..14291401f463 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -686,6 +686,77 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
+static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
+ long *val)
+{
+ struct fbnic_fw_completion *fw_cmpl;
+ int err = 0, retries = 5;
+ s32 *sensor;
+
+ fw_cmpl = kzalloc(sizeof(*fw_cmpl), GFP_KERNEL);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fbnic_fw_init_cmpl(fw_cmpl, FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
+
+ switch (id) {
+ case FBNIC_SENSOR_TEMP:
+ sensor = &fw_cmpl->u.tsene.millidegrees;
+ break;
+ case FBNIC_SENSOR_VOLTAGE:
+ sensor = &fw_cmpl->u.tsene.millivolts;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit_free;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_free;
+ }
+
+ /* Allow 2 seconds for reply, resend and try up to 5 times */
+ while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ retries--;
+
+ if (retries == 0) {
+ dev_err(fbd->dev,
+ "Timed out waiting for TSENE read\n");
+ err = -ETIMEDOUT;
+ goto exit_cleanup;
+ }
+
+ err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
+ if (err) {
+ dev_err(fbd->dev,
+ "Failed to transmit TSENE read msg, err %d\n",
+ err);
+ goto exit_cleanup;
+ }
+ }
+
+ /* Handle error returned by firmware */
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ dev_err(fbd->dev, "%s: Firmware returned error %d\n",
+ __func__, err);
+ goto exit_cleanup;
+ }
+
+ *val = *sensor;
+exit_cleanup:
+ fbnic_fw_clear_compl(fbd);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
.pcs_enable = fbnic_pcs_enable_asic,
@@ -695,6 +766,7 @@ static const struct fbnic_mac fbnic_mac_asic = {
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
+ .get_sensor = fbnic_mac_get_sensor_asic,
};
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 476239a9d381..05a591653e09 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -47,6 +47,11 @@ enum {
#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+enum fbnic_sensor_id {
+ FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
+ FBNIC_SENSOR_VOLTAGE, /* Voltage in millivolts */
+};
+
/* This structure defines the interface hooks for the MAC. The MAC hooks
* will be configured as a const struct provided with a set of function
* pointers.
@@ -83,6 +88,8 @@ struct fbnic_mac {
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+
+ int (*get_sensor)(struct fbnic_dev *fbd, int id, long *val);
};
int fbnic_mac_init(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index fc7d80db5fa6..7a96b6ee773f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -23,13 +23,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto free_napi_vectors;
- err = netif_set_real_num_tx_queues(fbn->netdev,
- fbn->num_tx_queues);
- if (err)
- goto free_resources;
-
- err = netif_set_real_num_rx_queues(fbn->netdev,
- fbn->num_rx_queues);
+ err = fbnic_set_netif_queues(fbn);
if (err)
goto free_resources;
@@ -74,6 +68,8 @@ static int fbnic_open(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
int err;
+ fbnic_napi_name_irqs(fbn->fbd);
+
err = __fbnic_open(fbn);
if (!err)
fbnic_up(fbn);
@@ -91,6 +87,7 @@ static int fbnic_stop(struct net_device *netdev)
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+ fbnic_reset_netif_queues(fbn);
fbnic_free_resources(fbn);
fbnic_free_napi_vectors(fbn);
@@ -615,7 +612,6 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->netdev = netdev;
fbn->fbd = fbd;
- INIT_LIST_HEAD(&fbn->napis);
fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b8417b300778..a392ac1cc4f2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -11,10 +11,14 @@
#include "fbnic_rpc.h"
#include "fbnic_txrx.h"
+#define FBNIC_MAX_NAPI_VECTORS 128u
+
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+ struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
+
struct net_device *netdev;
struct fbnic_dev *fbd;
@@ -56,13 +60,12 @@ struct fbnic_net {
/* Time stampinn filter config */
struct kernel_hwtstamp_config hwtstamp_config;
-
- struct list_head napis;
};
int __fbnic_open(struct fbnic_net *fbn);
void fbnic_up(struct fbnic_net *fbn);
void fbnic_down(struct fbnic_net *fbn);
+void fbnic_down_noidle(struct fbnic_net *fbn);
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
void fbnic_netdev_free(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 7ccf192f13d5..6cbbc2ee3e1f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -145,7 +145,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_service_task_start(fbn);
}
-static void fbnic_down_noidle(struct fbnic_net *fbn)
+void fbnic_down_noidle(struct fbnic_net *fbn)
{
fbnic_service_task_stop(fbn);
@@ -296,6 +296,8 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Capture snapshot of hardware stats so netdev can calculate delta */
fbnic_reset_hw_stats(fbd);
+ fbnic_hwmon_register(fbd);
+
if (!fbd->dsn) {
dev_warn(&pdev->dev, "Reading serial number failed\n");
goto init_failure_mode;
@@ -358,6 +360,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_netdev_free(fbd);
}
+ fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
fbnic_fw_disable_mbx(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 1a5e1e719b30..bb11fc83367d 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -15,7 +15,7 @@ fbnic_pcs_to_net(struct phylink_pcs *pcs)
}
static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 908c098cd59e..c25bd300b902 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -13,10 +13,11 @@ void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
unsigned int num_rx = fbn->num_rx_queues;
unsigned int i;
- for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ if (netif_is_rxfh_configured(fbn->netdev))
+ return;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
- fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
- }
}
void fbnic_rss_key_fill(u32 *buffer)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index b5050fabe8fe..d4d7027df9a0 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -1033,20 +1033,20 @@ static int fbnic_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
fbnic_nv_irq_rearm(nv);
- return 0;
+ return work_done;
}
-static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
{
- struct fbnic_napi_vector *nv = data;
+ struct fbnic_napi_vector *nv = *(void **)data;
napi_schedule_irqoff(&nv->napi);
return IRQ_HANDLED;
}
-static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *rxr)
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
{
struct fbnic_queue_stats *stats = &rxr->stats;
@@ -1056,8 +1056,8 @@ static void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.dropped += stats->dropped;
}
-static void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
- struct fbnic_ring *txr)
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
{
struct fbnic_queue_stats *stats = &txr->stats;
@@ -1099,7 +1099,6 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
struct fbnic_dev *fbd = nv->fbd;
- u32 v_idx = nv->v_idx;
int i, j;
for (i = 0; i < nv->txt_count; i++) {
@@ -1113,31 +1112,20 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
- fbnic_free_irq(fbd, v_idx, nv);
+ fbnic_napi_free_irq(fbd, nv);
page_pool_destroy(nv->page_pool);
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
void fbnic_free_napi_vectors(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv, *temp;
-
- list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
- fbnic_free_napi_vector(fbn, nv);
-}
-
-static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
-{
- unsigned char *dev_name = nv->napi.dev->name;
+ int i;
- if (!nv->rxt_count)
- snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
- else
- snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
- nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ for (i = 0; i < fbn->num_napi; i++)
+ if (fbn->napi[i])
+ fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
#define FBNIC_PAGE_POOL_FLAGS \
@@ -1222,7 +1210,7 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
nv->v_idx = v_idx;
/* Tie napi to netdev */
- list_add(&nv->napis, &fbn->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = nv;
netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
/* Record IRQ to NAPI struct */
@@ -1239,12 +1227,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
goto napi_del;
}
- /* Initialize vector name */
- fbnic_name_napi_vector(nv);
-
/* Request the IRQ for napi vector */
- err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
- IRQF_SHARED, nv->name, nv);
+ err = fbnic_napi_request_irq(fbd, nv);
if (err)
goto pp_destroy;
@@ -1307,7 +1291,7 @@ pp_destroy:
page_pool_destroy(nv->page_pool);
napi_del:
netif_napi_del(&nv->napi);
- list_del(&nv->napis);
+ fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
}
@@ -1612,19 +1596,18 @@ free_resources:
void fbnic_free_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
}
int fbnic_alloc_resources(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
- int err = -ENODEV;
+ int i, err = -ENODEV;
- list_for_each_entry(nv, &fbn->napis, napis) {
- err = fbnic_alloc_nv_resources(fbn, nv);
+ for (i = 0; i < fbn->num_napi; i++) {
+ err = fbnic_alloc_nv_resources(fbn, fbn->napi[i]);
if (err)
goto free_resources;
}
@@ -1632,12 +1615,77 @@ int fbnic_alloc_resources(struct fbnic_net *fbn)
return 0;
free_resources:
- list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
- fbnic_free_nv_resources(fbn, nv);
+ while (i--)
+ fbnic_free_nv_resources(fbn, fbn->napi[i]);
return err;
}
+static void fbnic_set_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Associate Tx queue with NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Associate Rx queue with NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ }
+}
+
+static void fbnic_reset_netif_napi(struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Disassociate Tx queue from NAPI */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Disassociate Rx queue from NAPI */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+}
+
+int fbnic_set_netif_queues(struct fbnic_net *fbn)
+{
+ int i, err;
+
+ err = netif_set_real_num_queues(fbn->netdev, fbn->num_tx_queues,
+ fbn->num_rx_queues);
+ if (err)
+ return err;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_set_netif_napi(fbn->napi[i]);
+
+ return 0;
+}
+
+void fbnic_reset_netif_queues(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_reset_netif_napi(fbn->napi[i]);
+}
+
static void fbnic_disable_twq0(struct fbnic_ring *txr)
{
u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
@@ -1670,33 +1718,34 @@ static void fbnic_disable_rcq(struct fbnic_ring *rxr)
void fbnic_napi_disable(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- napi_disable(&nv->napi);
+ for (i = 0; i < fbn->num_napi; i++) {
+ napi_disable(&fbn->napi[i]->napi);
- fbnic_nv_irq_disable(nv);
+ fbnic_nv_irq_disable(fbn->napi[i]);
}
}
void fbnic_disable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Disable Tx queue triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_twq0(&qt->sub0);
fbnic_disable_tcq(&qt->cmpl);
}
/* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_disable_bdq(&qt->sub0, &qt->sub1);
fbnic_disable_rcq(&qt->cmpl);
@@ -1792,14 +1841,15 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
void fbnic_flush(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
+ int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Flush any processed Tx Queue Triads and drop the rest */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
struct netdev_queue *tx_queue;
/* Clean the work queues of unprocessed work */
@@ -1816,15 +1866,11 @@ void fbnic_flush(struct fbnic_net *fbn)
tx_queue = netdev_get_tx_queue(nv->napi.dev,
qt->sub0.q_idx);
netdev_tx_reset_queue(tx_queue);
-
- /* Disassociate Tx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, NULL);
}
/* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Clean the work queues of unprocessed work */
fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
@@ -1835,43 +1881,23 @@ void fbnic_flush(struct fbnic_net *fbn)
fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
qt->cmpl.pkt->buff.data_hard_start = NULL;
-
- /* Disassociate Rx queue from NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, NULL);
}
}
}
void fbnic_fill(struct fbnic_net *fbn)
{
- struct fbnic_napi_vector *nv;
-
- list_for_each_entry(nv, &fbn->napis, napis) {
- int i, j;
-
- /* Configure NAPI mapping for Tx */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Nothing to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+ int i;
- /* Associate Tx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
- NETDEV_QUEUE_TYPE_TX, &nv->napi);
- }
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
/* Configure NAPI mapping and populate pages
* in the BDQ rings to use for Rx
*/
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
-
- /* Associate Rx queue with NAPI */
- netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
- NETDEV_QUEUE_TYPE_RX, &nv->napi);
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
/* Populate the header and payload BDQs */
fbnic_fill_bdq(nv, &qt->sub0);
@@ -2025,21 +2051,23 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
void fbnic_enable(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+ int j, t;
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Setup Tx Queue Triads */
- for (i = 0; i < nv->txt_count; i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_twq0(&qt->sub0);
fbnic_enable_tcq(nv, &qt->cmpl);
}
/* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, i++) {
- struct fbnic_q_triad *qt = &nv->qt[i];
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
fbnic_enable_bdq(&qt->sub0, &qt->sub1);
fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
@@ -2064,10 +2092,11 @@ void fbnic_napi_enable(struct fbnic_net *fbn)
{
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
int i;
- list_for_each_entry(nv, &fbn->napis, napis) {
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
napi_enable(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2096,17 +2125,18 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
struct fbnic_dev *fbd = fbn->fbd;
- struct fbnic_napi_vector *nv;
- int i, j;
+ int i, j, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
- list_for_each_entry(nv, &fbn->napis, napis) {
/* Find RQs which are completely out of pages */
- for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ for (t = nv->txt_count, j = 0; j < nv->rxt_count; j++, t++) {
/* Assume 4 pages is always enough to fit a packet
* and therefore generate a completion and an IRQ.
*/
- if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
- fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ if (fbnic_desc_used(&nv->qt[t].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[t].sub1) < 4)
irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 8d626287c3f4..c2a94f31f71b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -104,14 +104,11 @@ struct fbnic_napi_vector {
struct device *dev; /* Device for DMA unmapping */
struct page_pool *page_pool;
struct fbnic_dev *fbd;
- char name[IFNAMSIZ + 9];
u16 v_idx;
u8 txt_count;
u8 rxt_count;
- struct list_head napis;
-
struct fbnic_q_triad qt[];
};
@@ -123,10 +120,18 @@ netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features);
+void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *txr);
+
int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
void fbnic_free_napi_vectors(struct fbnic_net *fbn);
int fbnic_alloc_resources(struct fbnic_net *fbn);
void fbnic_free_resources(struct fbnic_net *fbn);
+int fbnic_set_netif_queues(struct fbnic_net *fbn);
+void fbnic_reset_netif_queues(struct fbnic_net *fbn);
+irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
void fbnic_enable(struct fbnic_net *fbn);
@@ -137,4 +142,9 @@ void fbnic_fill(struct fbnic_net *fbn);
void fbnic_napi_depletion_check(struct net_device *netdev);
int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+static inline int fbnic_napi_idx(const struct fbnic_napi_vector *nv)
+{
+ return nv->v_idx - FBNIC_NON_NAPI_VECTORS;
+}
+
#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 25cb2f61986f..1efa584e7107 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -443,7 +443,7 @@ int lan966x_stats_init(struct lan966x *lan966x);
void lan966x_port_config_down(struct lan966x_port *port);
void lan966x_port_config_up(struct lan966x_port *port);
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state);
int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x_port_config *config);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 1d63903f9006..75188b99e4e7 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -88,11 +88,12 @@ static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
}
static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x_port *port = lan966x_pcs_to_port(pcs);
- lan966x_port_status_get(port, state);
+ lan966x_port_status_get(port, neg_mode, state);
}
static int lan966x_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index fdfa4040d9ee..cf7de0267c32 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -284,7 +284,7 @@ void lan966x_port_config_up(struct lan966x_port *port)
lan966x_port_link_up(port);
}
-void lan966x_port_status_get(struct lan966x_port *port,
+void lan966x_port_status_get(struct lan966x_port *port, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lan966x *lan966x = port->lan966x;
@@ -314,7 +314,7 @@ void lan966x_port_status_get(struct lan966x_port *port,
bmsr |= BMSR_ANEGCOMPLETE;
lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
- phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lp_adv);
} else {
if (!state->link)
return;
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 35b057c9d0cb..35e1c0cf345e 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -28,5 +28,6 @@ config SPARX5_DCB
config LAN969X_SWITCH
bool "Lan969x switch driver"
depends on SPARX5_SWITCH
+ select PAGE_POOL
help
This driver supports the lan969x family of network switch devices.
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 4bf2a885a9da..d447f9e84d92 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -20,7 +20,9 @@ sparx5-switch-$(CONFIG_LAN969X_SWITCH) += lan969x/lan969x_regs.o \
lan969x/lan969x.o \
lan969x/lan969x_calendar.o \
lan969x/lan969x_vcap_ag_api.o \
- lan969x/lan969x_vcap_impl.o
+ lan969x/lan969x_vcap_impl.o \
+ lan969x/lan969x_rgmii.o \
+ lan969x/lan969x_fdma.o
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
index c2afa2176b08..f3a9c71bea36 100644
--- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c
@@ -90,9 +90,12 @@ static const struct sparx5_main_io_resource lan969x_main_iomap[] = {
{ TARGET_DEV2G5 + 27, 0x30d8000, 1 }, /* 0xe30d8000 */
{ TARGET_DEV10G + 9, 0x30dc000, 1 }, /* 0xe30dc000 */
{ TARGET_PCS10G_BR + 9, 0x30e0000, 1 }, /* 0xe30e0000 */
+ { TARGET_DEVRGMII, 0x30e4000, 1 }, /* 0xe30e4000 */
+ { TARGET_DEVRGMII + 1, 0x30e8000, 1 }, /* 0xe30e8000 */
{ TARGET_DSM, 0x30ec000, 1 }, /* 0xe30ec000 */
{ TARGET_PORT_CONF, 0x30f0000, 1 }, /* 0xe30f0000 */
{ TARGET_ASM, 0x3200000, 1 }, /* 0xe3200000 */
+ { TARGET_HSIO_WRAP, 0x3408000, 1 }, /* 0xe3408000 */
};
static struct sparx5_sdlb_group lan969x_sdlb_groups[LAN969X_SDLB_GRP_CNT] = {
@@ -329,6 +332,7 @@ static const struct sparx5_ops lan969x_ops = {
.is_port_5g = &lan969x_port_is_5g,
.is_port_10g = &lan969x_port_is_10g,
.is_port_25g = &lan969x_port_is_25g,
+ .is_port_rgmii = &lan969x_port_is_rgmii,
.get_port_dev_index = &lan969x_port_dev_mapping,
.get_port_dev_bit = &lan969x_get_dev_mode_bit,
.get_hsch_max_group_rate = &lan969x_get_hsch_max_group_rate,
@@ -336,6 +340,11 @@ static const struct sparx5_ops lan969x_ops = {
.set_port_mux = &lan969x_port_mux_set,
.ptp_irq_handler = &lan969x_ptp_irq_handler,
.dsm_calendar_calc = &lan969x_dsm_calendar_calc,
+ .port_config_rgmii = &lan969x_port_config_rgmii,
+ .fdma_init = &lan969x_fdma_init,
+ .fdma_deinit = &lan969x_fdma_deinit,
+ .fdma_poll = &lan969x_fdma_napi_poll,
+ .fdma_xmit = &lan969x_fdma_xmit,
};
const struct sparx5_match_data lan969x_desc = {
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
index 2489d0d32dfd..529fde3d4deb 100644
--- a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h
@@ -59,7 +59,24 @@ static inline bool lan969x_port_is_25g(int portno)
return false;
}
+static inline bool lan969x_port_is_rgmii(int portno)
+{
+ return portno == 28 || portno == 29;
+}
+
/* lan969x_calendar.c */
int lan969x_dsm_calendar_calc(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+
+/* lan969x_rgmii.c */
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+
+/* lan969x_fdma.c */
+int lan969x_fdma_init(struct sparx5 *sparx5);
+int lan969x_fdma_deinit(struct sparx5 *sparx5);
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight);
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
+
#endif
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
new file mode 100644
index 000000000000..1282f5c3ee6d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2025 Microchip Technology Inc. and its subsidiaries.
+ */
+#include <net/page_pool/helpers.h>
+
+#include "../sparx5_main.h"
+#include "../sparx5_main_regs.h"
+#include "../sparx5_port.h"
+
+#include "fdma_api.h"
+#include "lan969x.h"
+
+#define FDMA_PRIV(fdma) ((struct sparx5 *)((fdma)->priv))
+
+static int lan969x_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ *dataptr = FDMA_PRIV(fdma)->tx.dbs[dcb].dma_addr;
+
+ return 0;
+}
+
+static int lan969x_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
+ u64 *dataptr)
+{
+ struct sparx5_rx *rx = &FDMA_PRIV(fdma)->rx;
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ rx->page[dcb][db] = page;
+
+ *dataptr = page_pool_get_dma_addr(page);
+
+ return 0;
+}
+
+static int lan969x_fdma_get_next_dcb(struct sparx5_tx *tx)
+{
+ struct fdma *fdma = &tx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i)
+ if (!tx->dbs[i].used && !fdma_is_last(fdma, &fdma->dcbs[i]))
+ return i;
+
+ return -ENOSPC;
+}
+
+static void lan969x_fdma_tx_clear_buf(struct sparx5 *sparx5, int weight)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+ struct sparx5_tx_buf *db;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&sparx5->tx_lock, flags);
+
+ for (i = 0; i < fdma->n_dcbs; ++i) {
+ db = &sparx5->tx.dbs[i];
+
+ if (!db->used)
+ continue;
+
+ if (!fdma_db_is_done(fdma_db_get(fdma, i, 0)))
+ continue;
+
+ db->dev->stats.tx_bytes += db->skb->len;
+ db->dev->stats.tx_packets++;
+ sparx5->tx.packets++;
+
+ dma_unmap_single(sparx5->dev,
+ db->dma_addr,
+ db->skb->len,
+ DMA_TO_DEVICE);
+
+ if (!db->ptp)
+ napi_consume_skb(db->skb, weight);
+
+ db->used = false;
+ }
+
+ spin_unlock_irqrestore(&sparx5->tx_lock, flags);
+}
+
+static void lan969x_fdma_free_pages(struct sparx5_rx *rx)
+{
+ struct fdma *fdma = &rx->fdma;
+
+ for (int i = 0; i < fdma->n_dcbs; ++i) {
+ for (int j = 0; j < fdma->n_dbs; ++j)
+ page_pool_put_full_page(rx->page_pool,
+ rx->page[i][j], false);
+ }
+}
+
+static struct sk_buff *lan969x_fdma_rx_get_frame(struct sparx5 *sparx5,
+ struct sparx5_rx *rx)
+{
+ const struct sparx5_consts *consts = sparx5->data->consts;
+ struct fdma *fdma = &rx->fdma;
+ struct sparx5_port *port;
+ struct frame_info fi;
+ struct sk_buff *skb;
+ struct fdma_db *db;
+ struct page *page;
+
+ db = &fdma->dcbs[fdma->dcb_index].db[fdma->db_index];
+ page = rx->page[fdma->dcb_index][fdma->db_index];
+
+ sparx5_ifh_parse(sparx5, page_address(page), &fi);
+ port = fi.src_port < consts->n_ports ? sparx5->ports[fi.src_port] :
+ NULL;
+ if (WARN_ON(!port))
+ goto free_page;
+
+ skb = build_skb(page_address(page), fdma->db_size);
+ if (unlikely(!skb))
+ goto free_page;
+
+ skb_mark_for_recycle(skb);
+ skb_put(skb, fdma_db_len_get(db));
+ skb_pull(skb, IFH_LEN * sizeof(u32));
+
+ skb->dev = port->ndev;
+
+ if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
+ skb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (test_bit(port->portno, sparx5->bridge_mask))
+ skb->offload_fwd_mark = 1;
+
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ return skb;
+
+free_page:
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ return NULL;
+}
+
+static int lan969x_fdma_rx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct fdma *fdma = &rx->fdma;
+ int err;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fdma->n_dcbs * fdma->n_dbs,
+ .nid = NUMA_NO_NODE,
+ .dev = sparx5->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = fdma->db_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ };
+
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool))
+ return PTR_ERR(rx->page_pool);
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err)
+ return err;
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ return 0;
+}
+
+static int lan969x_fdma_tx_alloc(struct sparx5 *sparx5)
+{
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ int err;
+
+ tx->dbs = kcalloc(fdma->n_dcbs,
+ sizeof(struct sparx5_tx_buf),
+ GFP_KERNEL);
+ if (!tx->dbs)
+ return -ENOMEM;
+
+ err = fdma_alloc_coherent(sparx5->dev, fdma);
+ if (err) {
+ kfree(tx->dbs);
+ return err;
+ }
+
+ fdma_dcbs_init(fdma,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_DONE);
+
+ return 0;
+}
+
+static void lan969x_fdma_rx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->rx.fdma;
+
+ fdma->channel_id = FDMA_XTR_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_rx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+
+ /* Fetch a netdev for SKB and NAPI use, any will do */
+ for (int idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
+ struct sparx5_port *port = sparx5->ports[idx];
+
+ if (port && port->ndev) {
+ sparx5->rx.ndev = port->ndev;
+ break;
+ }
+ }
+}
+
+static void lan969x_fdma_tx_init(struct sparx5 *sparx5)
+{
+ struct fdma *fdma = &sparx5->tx.fdma;
+
+ fdma->channel_id = FDMA_INJ_CHANNEL;
+ fdma->n_dcbs = FDMA_DCB_MAX;
+ fdma->n_dbs = 1;
+ fdma->priv = sparx5;
+ fdma->size = fdma_get_size(fdma);
+ fdma->db_size = PAGE_SIZE;
+ fdma->ops.dataptr_cb = &lan969x_fdma_tx_dataptr_cb;
+ fdma->ops.nextptr_cb = &fdma_nextptr_cb;
+}
+
+int lan969x_fdma_napi_poll(struct napi_struct *napi, int weight)
+{
+ struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
+ struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
+ int old_dcb, dcb_reload, counter = 0;
+ struct fdma *fdma = &rx->fdma;
+ struct sk_buff *skb;
+
+ dcb_reload = fdma->dcb_index;
+
+ lan969x_fdma_tx_clear_buf(sparx5, weight);
+
+ /* Process RX data */
+ while (counter < weight) {
+ if (!fdma_has_frames(fdma))
+ break;
+
+ skb = lan969x_fdma_rx_get_frame(sparx5, rx);
+ if (!skb)
+ break;
+
+ napi_gro_receive(&rx->napi, skb);
+
+ fdma_db_advance(fdma);
+ counter++;
+ /* Check if the DCB can be reused */
+ if (fdma_dcb_is_reusable(fdma))
+ continue;
+
+ fdma_db_reset(fdma);
+ fdma_dcb_advance(fdma);
+ }
+
+ /* Allocate new pages and map them */
+ while (dcb_reload != fdma->dcb_index) {
+ old_dcb = dcb_reload;
+ dcb_reload++;
+ /* n_dcbs must be a power of 2 */
+ dcb_reload &= fdma->n_dcbs - 1;
+
+ fdma_dcb_add(fdma,
+ old_dcb,
+ FDMA_DCB_INFO_DATAL(fdma->db_size),
+ FDMA_DCB_STATUS_INTR);
+
+ sparx5_fdma_reload(sparx5, fdma);
+ }
+
+ if (counter < weight && napi_complete_done(napi, counter))
+ spx5_wr(0xff, sparx5, FDMA_INTR_DB_ENA);
+
+ return counter;
+}
+
+int lan969x_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int next_dcb, needed_headroom, needed_tailroom, err;
+ struct sparx5_tx *tx = &sparx5->tx;
+ struct fdma *fdma = &tx->fdma;
+ struct sparx5_tx_buf *db_buf;
+ u64 status;
+
+ next_dcb = lan969x_fdma_get_next_dcb(tx);
+ if (next_dcb < 0)
+ return -EBUSY;
+
+ needed_headroom = max_t(int, IFH_LEN * 4 - skb_headroom(skb), 0);
+ needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ skb_push(skb, IFH_LEN * 4);
+ memcpy(skb->data, ifh, IFH_LEN * 4);
+ skb_put(skb, ETH_FCS_LEN);
+
+ db_buf = &tx->dbs[next_dcb];
+ db_buf->dma_addr = dma_map_single(sparx5->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(sparx5->dev, db_buf->dma_addr))
+ return -ENOMEM;
+
+ db_buf->dev = dev;
+ db_buf->skb = skb;
+ db_buf->ptp = false;
+ db_buf->used = true;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
+ db_buf->ptp = true;
+
+ status = FDMA_DCB_STATUS_SOF |
+ FDMA_DCB_STATUS_EOF |
+ FDMA_DCB_STATUS_BLOCKO(0) |
+ FDMA_DCB_STATUS_BLOCKL(skb->len) |
+ FDMA_DCB_STATUS_INTR;
+
+ fdma_dcb_advance(fdma);
+ fdma_dcb_add(fdma, next_dcb, 0, status);
+
+ sparx5_fdma_reload(sparx5, fdma);
+
+ return NETDEV_TX_OK;
+}
+
+int lan969x_fdma_init(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ int err;
+
+ lan969x_fdma_rx_init(sparx5);
+ lan969x_fdma_tx_init(sparx5);
+ sparx5_fdma_injection_mode(sparx5);
+
+ err = dma_set_mask_and_coherent(sparx5->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(sparx5->dev, "Failed to set 64-bit FDMA mask");
+ return err;
+ }
+
+ err = lan969x_fdma_rx_alloc(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to allocate RX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ err = lan969x_fdma_tx_alloc(sparx5);
+ if (err) {
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ dev_err(sparx5->dev, "Failed to allocate TX buffers: %d\n",
+ err);
+ return err;
+ }
+
+ /* Reset FDMA state */
+ spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
+ spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
+
+ return err;
+}
+
+int lan969x_fdma_deinit(struct sparx5 *sparx5)
+{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ sparx5_fdma_stop(sparx5);
+ fdma_free_coherent(sparx5->dev, &tx->fdma);
+ fdma_free_coherent(sparx5->dev, &rx->fdma);
+ lan969x_fdma_free_pages(rx);
+ page_pool_destroy(rx->page_pool);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
new file mode 100644
index 000000000000..4e422ca50828
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip lan969x Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "lan969x.h"
+
+/* Tx clock selectors */
+#define LAN969X_RGMII_TX_CLK_SEL_125MHZ 1 /* 1000Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_25MHZ 2 /* 100Mbps */
+#define LAN969X_RGMII_TX_CLK_SEL_2M5MHZ 3 /* 10Mbps */
+
+/* Port speed selectors */
+#define LAN969X_RGMII_SPEED_SEL_10 0 /* Select 10Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_100 1 /* Select 100Mbps speed */
+#define LAN969X_RGMII_SPEED_SEL_1000 2 /* Select 1000Mbps speed */
+
+/* Clock delay selectors */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS 2 /* Phase shift 45deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS 3 /* Phase shift 77deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS 4 /* Phase shift 90deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS 5 /* Phase shift 112deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS 6 /* Phase shift 135deg */
+#define LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS 7 /* Phase shift 147deg */
+
+#define LAN969X_RGMII_PORT_START_IDX 28 /* Index of the first RGMII port */
+#define LAN969X_RGMII_IFG_TX 4 /* TX Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX1 5 /* RX1 Inter Frame Gap value */
+#define LAN969X_RGMII_IFG_RX2 1 /* RX2 Inter Frame Gap value */
+
+#define RGMII_PORT_IDX(port) ((port)->portno - LAN969X_RGMII_PORT_START_IDX)
+
+/* Get the tx clock selector based on the port speed. */
+static int lan969x_rgmii_get_clk_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_TX_CLK_SEL_2M5MHZ :
+ speed == SPEED_100 ? LAN969X_RGMII_TX_CLK_SEL_25MHZ :
+ LAN969X_RGMII_TX_CLK_SEL_125MHZ);
+}
+
+/* Get the port speed selector based on the port speed. */
+static int lan969x_rgmii_get_speed_sel(int speed)
+{
+ return (speed == SPEED_10 ? LAN969X_RGMII_SPEED_SEL_10 :
+ speed == SPEED_100 ? LAN969X_RGMII_SPEED_SEL_100 :
+ LAN969X_RGMII_SPEED_SEL_1000);
+}
+
+/* Get the clock delay selector based on the clock delay in picoseconds. */
+static int lan969x_rgmii_get_clk_delay_sel(struct sparx5_port *port,
+ u32 delay_ps, u32 *clk_delay_sel)
+{
+ switch (delay_ps) {
+ case 0:
+ /* Hardware default selector. */
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 1000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_0_NS;
+ break;
+ case 1700:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_1_7_NS;
+ break;
+ case 2000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_0_NS;
+ break;
+ case 2500:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_2_5_NS;
+ break;
+ case 3000:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_0_NS;
+ break;
+ case 3300:
+ *clk_delay_sel = LAN969X_RGMII_CLK_DELAY_SEL_3_3_NS;
+ break;
+ default:
+ dev_err(port->sparx5->dev, "Invalid RGMII delay: %u", delay_ps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Configure the RGMII tx clock frequency. */
+static void lan969x_rgmii_tx_clk_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 clk_sel = lan969x_rgmii_get_clk_sel(conf->speed);
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Take the RGMII clock domain out of reset and set tx clock
+ * frequency.
+ */
+ spx5_rmw(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(clk_sel) |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(0),
+ HSIO_WRAP_RGMII_CFG_TX_CLK_CFG |
+ HSIO_WRAP_RGMII_CFG_RGMII_TX_RST |
+ HSIO_WRAP_RGMII_CFG_RGMII_RX_RST,
+ port->sparx5, HSIO_WRAP_RGMII_CFG(idx));
+}
+
+/* Configure the RGMII port device. */
+static void lan969x_rgmii_port_device_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 dtag, dotag, etype, speed_sel, idx = RGMII_PORT_IDX(port);
+
+ speed_sel = lan969x_rgmii_get_speed_sel(conf->speed);
+
+ etype = (port->vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
+ port->custom_etype :
+ port->vlan_type == SPX5_VLAN_PORT_TYPE_C ?
+ ETH_P_8021Q : ETH_P_8021AD);
+
+ dtag = port->max_vlan_tags == SPX5_PORT_MAX_TAGS_TWO;
+ dotag = port->max_vlan_tags != SPX5_PORT_MAX_TAGS_NONE;
+
+ /* Enable the MAC. */
+ spx5_wr(DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(1),
+ port->sparx5, DEVRGMII_MAC_ENA_CFG(idx));
+
+ /* Configure the Inter Frame Gap. */
+ spx5_wr(DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(LAN969X_RGMII_IFG_TX) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(LAN969X_RGMII_IFG_RX1) |
+ DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(LAN969X_RGMII_IFG_RX2),
+ port->sparx5, DEVRGMII_MAC_IFG_CFG(idx));
+
+ /* Configure port data rate. */
+ spx5_wr(DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(speed_sel),
+ port->sparx5, DEVRGMII_DEV_RST_CTRL(idx));
+
+ /* Configure VLAN awareness. */
+ spx5_wr(DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(etype) |
+ DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
+ DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
+ port->sparx5,
+ DEVRGMII_MAC_TAGS_CFG(idx));
+}
+
+/* Configure the RGMII delay lines in the MAC.
+ *
+ * We use the rx-internal-delay-ps" and "tx-internal-delay-ps" properties to
+ * configure the rx and tx delays for the MAC. If these properties are missing
+ * or set to zero, the MAC will not apply any delay.
+ *
+ * The PHY side delays are determined by the PHY mode
+ * (e.g. PHY_INTERFACE_MODE_RGMII_{ID, RXID, TXID}), and ignored by the MAC side
+ * entirely.
+ */
+static int lan969x_rgmii_delay_config(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ u32 tx_clk_sel, rx_clk_sel, tx_delay_ps = 0, rx_delay_ps = 0;
+ u32 idx = RGMII_PORT_IDX(port);
+ int err;
+
+ of_property_read_u32(port->of_node, "rx-internal-delay-ps",
+ &rx_delay_ps);
+
+ of_property_read_u32(port->of_node, "tx-internal-delay-ps",
+ &tx_delay_ps);
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, rx_delay_ps, &rx_clk_sel);
+ if (err)
+ return err;
+
+ err = lan969x_rgmii_get_clk_delay_sel(port, tx_delay_ps, &tx_clk_sel);
+ if (err)
+ return err;
+
+ /* Configure rx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!rx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(rx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 0));
+
+ /* Configure tx delay. */
+ spx5_rmw(HSIO_WRAP_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_WRAP_DLL_CFG_DLL_ENA_SET(1) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(!!tx_delay_ps) |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(tx_clk_sel),
+ HSIO_WRAP_DLL_CFG_DLL_RST |
+ HSIO_WRAP_DLL_CFG_DLL_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_ENA |
+ HSIO_WRAP_DLL_CFG_DLL_CLK_SEL,
+ port->sparx5, HSIO_WRAP_DLL_CFG(idx, 1));
+
+ return 0;
+}
+
+/* Configure GPIO's to be used as RGMII interface. */
+static void lan969x_rgmii_gpio_config(struct sparx5_port *port)
+{
+ u32 idx = RGMII_PORT_IDX(port);
+
+ /* Enable the RGMII on the GPIOs. */
+ spx5_wr(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(1), port->sparx5,
+ HSIO_WRAP_XMII_CFG(!idx));
+}
+
+int lan969x_port_config_rgmii(struct sparx5_port *port,
+ struct sparx5_port_config *conf)
+{
+ int err;
+
+ err = lan969x_rgmii_delay_config(port, conf);
+ if (err)
+ return err;
+
+ lan969x_rgmii_tx_clk_config(port, conf);
+ lan969x_rgmii_gpio_config(port);
+ lan969x_rgmii_port_device_config(port, conf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 0027144a2af2..dbe86f937b21 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -18,9 +18,6 @@
#include "sparx5_main.h"
#include "sparx5_port.h"
-#define FDMA_XTR_CHANNEL 6
-#define FDMA_INJ_CHANNEL 0
-
#define FDMA_XTR_BUFFER_SIZE 2048
#define FDMA_WEIGHT 4
@@ -133,7 +130,7 @@ static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *t
sparx5, FDMA_CH_ACTIVATE);
}
-static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
{
/* Reload the RX channel */
spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
@@ -183,7 +180,7 @@ static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx
return true;
}
-static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
{
struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
@@ -213,11 +210,11 @@ static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
return counter;
}
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev)
{
struct sparx5_tx *tx = &sparx5->tx;
struct fdma *fdma = &tx->fdma;
- static bool first_time = true;
void *virt_addr;
fdma_dcb_advance(fdma);
@@ -238,12 +235,8 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
- if (first_time) {
- sparx5_fdma_tx_activate(sparx5, tx);
- first_time = false;
- } else {
- sparx5_fdma_reload(sparx5, fdma);
- }
+ sparx5_fdma_reload(sparx5, fdma);
+
return NETDEV_TX_OK;
}
@@ -260,10 +253,6 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
FDMA_DCB_STATUS_INTR);
- netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
- FDMA_WEIGHT);
- napi_enable(&rx->napi);
- sparx5_fdma_rx_activate(sparx5, rx);
return 0;
}
@@ -348,7 +337,7 @@ irqreturn_t sparx5_fdma_handler(int irq, void *args)
return IRQ_HANDLED;
}
-static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
{
const int byte_swap = 1;
int portno;
@@ -410,7 +399,7 @@ static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
}
}
-int sparx5_fdma_start(struct sparx5 *sparx5)
+int sparx5_fdma_init(struct sparx5 *sparx5)
{
int err;
@@ -443,24 +432,55 @@ int sparx5_fdma_start(struct sparx5 *sparx5)
return err;
}
+int sparx5_fdma_deinit(struct sparx5 *sparx5)
+{
+ sparx5_fdma_stop(sparx5);
+ fdma_free_phys(&sparx5->rx.fdma);
+ fdma_free_phys(&sparx5->tx.fdma);
+
+ return 0;
+}
+
static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
{
return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
}
+int sparx5_fdma_start(struct sparx5 *sparx5)
+{
+ const struct sparx5_ops *ops = sparx5->data->ops;
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
+
+ netif_napi_add_weight(rx->ndev,
+ &rx->napi,
+ ops->fdma_poll,
+ FDMA_WEIGHT);
+
+ napi_enable(&rx->napi);
+
+ sparx5_fdma_rx_activate(sparx5, rx);
+ sparx5_fdma_tx_activate(sparx5, tx);
+
+ return 0;
+}
+
int sparx5_fdma_stop(struct sparx5 *sparx5)
{
+ struct sparx5_rx *rx = &sparx5->rx;
+ struct sparx5_tx *tx = &sparx5->tx;
u32 val;
- napi_disable(&sparx5->rx.napi);
+ napi_disable(&rx->napi);
+
/* Stop the fdma and channel interrupts */
- sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
- sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
+ sparx5_fdma_rx_deactivate(sparx5, rx);
+ sparx5_fdma_tx_deactivate(sparx5, tx);
+
/* Wait for the RX channel to stop */
read_poll_timeout(sparx5_fdma_port_ctrl, val,
FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
500, 10000, 0, sparx5);
- fdma_free_phys(&sparx5->rx.fdma);
- fdma_free_phys(&sparx5->tx.fdma);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index f61aa15beab7..6a0e5b83ecd0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -313,10 +313,13 @@ static int sparx5_create_port(struct sparx5 *sparx5,
struct initial_port_config *config)
{
struct sparx5_port *spx5_port;
+ const struct sparx5_ops *ops;
struct net_device *ndev;
struct phylink *phylink;
int err;
+ ops = sparx5->data->ops;
+
ndev = sparx5_create_netdev(sparx5, config->portno);
if (IS_ERR(ndev)) {
dev_err(sparx5->dev, "Could not create net device: %02u\n",
@@ -357,6 +360,9 @@ static int sparx5_create_port(struct sparx5 *sparx5,
MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+ if (ops->is_port_rgmii(spx5_port->portno))
+ phy_interface_set_rgmii(spx5_port->phylink_config.supported_interfaces);
+
__set_bit(PHY_INTERFACE_MODE_SGMII,
spx5_port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
@@ -778,15 +784,19 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Start Frame DMA with fallback to register based INJ/XTR */
err = -ENXIO;
- if (sparx5->fdma_irq >= 0 && is_sparx5(sparx5)) {
- if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0)
+ if (sparx5->fdma_irq >= 0) {
+ if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0 ||
+ !is_sparx5(sparx5))
err = devm_request_irq(sparx5->dev,
sparx5->fdma_irq,
sparx5_fdma_handler,
0,
"sparx5-fdma", sparx5);
- if (!err)
- err = sparx5_fdma_start(sparx5);
+ if (!err) {
+ err = ops->fdma_init(sparx5);
+ if (!err)
+ sparx5_fdma_start(sparx5);
+ }
if (err)
sparx5->fdma_irq = -ENXIO;
} else {
@@ -830,6 +840,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
struct initial_port_config *configs, *config;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ const struct sparx5_ops *ops;
struct reset_control *reset;
struct sparx5 *sparx5;
int idx = 0, err = 0;
@@ -851,6 +862,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
return -EINVAL;
regs = sparx5->data->regs;
+ ops = sparx5->data->ops;
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
@@ -880,7 +892,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
for_each_available_child_of_node(ports, portnp) {
struct sparx5_port_config *conf;
- struct phy *serdes;
+ struct phy *serdes = NULL;
u32 portno;
err = of_property_read_u32(portnp, "reg", &portno);
@@ -910,13 +922,17 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
conf->sd_sgpio = ~0;
else
sparx5->sd_sgpio_remapping = true;
- serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
- "port %u: missing serdes\n",
- portno);
- of_node_put(portnp);
- goto cleanup_config;
+ /* There is no SerDes node for RGMII ports. */
+ if (!ops->is_port_rgmii(portno)) {
+ serdes = devm_of_phy_get(sparx5->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = dev_err_probe(sparx5->dev,
+ PTR_ERR(serdes),
+ "port %u: missing serdes\n",
+ portno);
+ of_node_put(portnp);
+ goto cleanup_config;
+ }
}
config->portno = portno;
config->node = portnp;
@@ -1014,6 +1030,7 @@ cleanup_pnode:
static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
+ const struct sparx5_ops *ops = sparx5->data->ops;
debugfs_remove_recursive(sparx5->debugfs_root);
if (sparx5->xtr_irq) {
@@ -1025,7 +1042,7 @@ static void mchp_sparx5_remove(struct platform_device *pdev)
sparx5->fdma_irq = -ENXIO;
}
sparx5_ptp_deinit(sparx5);
- sparx5_fdma_stop(sparx5);
+ ops->fdma_deinit(sparx5);
sparx5_cleanup_ports(sparx5);
sparx5_vcap_destroy(sparx5);
/* Unregister netdevs */
@@ -1072,6 +1089,7 @@ static const struct sparx5_ops sparx5_ops = {
.is_port_5g = &sparx5_port_is_5g,
.is_port_10g = &sparx5_port_is_10g,
.is_port_25g = &sparx5_port_is_25g,
+ .is_port_rgmii = &sparx5_port_is_rgmii,
.get_port_dev_index = &sparx5_port_dev_mapping,
.get_port_dev_bit = &sparx5_port_dev_mapping,
.get_hsch_max_group_rate = &sparx5_get_hsch_max_group_rate,
@@ -1079,6 +1097,10 @@ static const struct sparx5_ops sparx5_ops = {
.set_port_mux = &sparx5_port_mux_set,
.ptp_irq_handler = &sparx5_ptp_irq_handler,
.dsm_calendar_calc = &sparx5_dsm_calendar_calc,
+ .fdma_init = &sparx5_fdma_init,
+ .fdma_deinit = &sparx5_fdma_deinit,
+ .fdma_poll = &sparx5_fdma_napi_callback,
+ .fdma_xmit = &sparx5_fdma_xmit,
};
static const struct sparx5_match_data sparx5_desc = {
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index d5dd953b0a71..fe7d8bcc0cd9 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -112,6 +112,8 @@ enum sparx5_feature {
#define XTR_QUEUE 0
#define INJ_QUEUE 0
+#define FDMA_XTR_CHANNEL 6
+#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 64
#define FDMA_RX_DCB_MAX_DBS 15
#define FDMA_TX_DCB_MAX_DBS 1
@@ -157,11 +159,25 @@ struct sparx5_calendar_data {
*/
struct sparx5_rx {
struct fdma fdma;
- struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page_pool *page_pool;
+ union {
+ struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS];
+ };
dma_addr_t dma;
struct napi_struct napi;
struct net_device *ndev;
u64 packets;
+ u8 page_order;
+};
+
+/* Used to store information about TX buffers. */
+struct sparx5_tx_buf {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ bool used;
+ bool ptp;
};
/* Frame DMA transmit state:
@@ -169,6 +185,7 @@ struct sparx5_rx {
*/
struct sparx5_tx {
struct fdma fdma;
+ struct sparx5_tx_buf *dbs;
u64 packets;
u64 dropped;
};
@@ -313,6 +330,7 @@ struct sparx5_ops {
bool (*is_port_5g)(int portno);
bool (*is_port_10g)(int portno);
bool (*is_port_25g)(int portno);
+ bool (*is_port_rgmii)(int portno);
u32 (*get_port_dev_index)(struct sparx5 *sparx5, int port);
u32 (*get_port_dev_bit)(struct sparx5 *sparx5, int port);
u32 (*get_hsch_max_group_rate)(int grp);
@@ -323,6 +341,13 @@ struct sparx5_ops {
irqreturn_t (*ptp_irq_handler)(int irq, void *args);
int (*dsm_calendar_calc)(struct sparx5 *sparx5, u32 taxi,
struct sparx5_calendar_data *data);
+ int (*port_config_rgmii)(struct sparx5_port *port,
+ struct sparx5_port_config *conf);
+ int (*fdma_init)(struct sparx5 *sparx5);
+ int (*fdma_deinit)(struct sparx5 *sparx5);
+ int (*fdma_poll)(struct napi_struct *napi, int weight);
+ int (*fdma_xmit)(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
};
struct sparx5_main_io_resource {
@@ -433,10 +458,16 @@ int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
/* sparx5_fdma.c */
+int sparx5_fdma_init(struct sparx5 *sparx5);
+int sparx5_fdma_deinit(struct sparx5 *sparx5);
int sparx5_fdma_start(struct sparx5 *sparx5);
int sparx5_fdma_stop(struct sparx5 *sparx5);
-int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb);
+int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight);
+int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb,
+ struct net_device *dev);
irqreturn_t sparx5_fdma_handler(int irq, void *args);
+void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma);
+void sparx5_fdma_injection_mode(struct sparx5 *sparx5);
/* sparx5_mactable.c */
void sparx5_mact_pull_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 561344f19062..d9ef4ef137b8 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -37,6 +37,7 @@ enum sparx5_target {
TARGET_FDMA = 117,
TARGET_GCB = 118,
TARGET_HSCH = 119,
+ TARGET_HSIO_WRAP = 120,
TARGET_LRN = 122,
TARGET_PCEP = 129,
TARGET_PCS10G_BR = 132,
@@ -54,6 +55,7 @@ enum sparx5_target {
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
+ TARGET_DEVRGMII = 392,
NUM_TARGETS = 517
};
@@ -5367,6 +5369,69 @@ extern const struct sparx5_regs *regs;
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_GET(x)\
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:XMII_CFG */
+#define HSIO_WRAP_XMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 0, 0, 1, 4)
+
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG GENMASK(2, 1)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+#define HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_XMII_CFG_GPIO_XMII_CFG, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:RGMII_CFG */
+#define HSIO_WRAP_RGMII_CFG(g) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 4, 0, 1, 4)
+
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_WRAP_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_WRAP_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_RGMII_CFG_RGMII_RX_RST, x)
+
+/* LAN969X ONLY */
+/* HSIOWRAP:XMII_CFG:DLL_CFG */
+#define HSIO_WRAP_DLL_CFG(g, r) \
+ __REG(TARGET_HSIO_WRAP, 0, 1, 116, g, 2, 20, 12, r, 2, 4)
+
+#define HSIO_WRAP_DLL_CFG_DLL_ENA BIT(19)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA BIT(18)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_ENA_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_ENA, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL GENMASK(17, 15)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+#define HSIO_WRAP_DLL_CFG_DLL_CLK_SEL_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_CLK_SEL, x)
+
+#define HSIO_WRAP_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+#define HSIO_WRAP_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_WRAP_DLL_CFG_DLL_RST, x)
+
/* LRN:COMMON:COMMON_ACCESS_CTRL */
#define LRN_COMMON_ACCESS_CTRL \
__REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
@@ -8110,4 +8175,84 @@ extern const struct sparx5_regs *regs;
#define XQS_CNT(g) \
__REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+/* LAN969X ONLY */
+/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
+#define DEVRGMII_DEV_RST_CTRL(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 0, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL GENMASK(22, 20)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_SET(x)\
+ FIELD_PREP(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+#define DEVRGMII_DEV_RST_CTRL_SPEED_SEL_GET(x)\
+ FIELD_GET(DEVRGMII_DEV_RST_CTRL_SPEED_SEL, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEVRGMII_MAC_ENA_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 0, 0, 1, 4)
+
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+#define DEVRGMII_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_ENA_CFG_TX_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
+#define DEVRGMII_MAC_TAGS_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 12, 0, 1, 4)
+
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+#define DEVRGMII_MAC_TAGS_CFG_TAG_ID_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_TAG_ID, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(3)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA GENMASK(2, 1)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_PB_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_PB_ENA, x)
+
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+#define DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
+
+/* LAN969X ONLY */
+/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEVRGMII_MAC_IFG_CFG(t) \
+ __REG(TARGET_DEVRGMII, t, 2, 36, 0, 1, 36, 24, 0, 1, 4)
+
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+#define DEVRGMII_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+#define DEVRGMII_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEVRGMII_MAC_IFG_CFG_RX_IFG1, x)
+
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index b6f635d85820..138ac58fae51 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -232,9 +232,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
+ const struct sparx5_ops *ops;
u32 ifh[IFH_LEN];
netdev_tx_t ret;
+ ops = sparx5->data->ops;
+
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(sparx5, ifh, port->portno);
@@ -254,7 +257,7 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
- ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ ret = ops->fdma_xmit(sparx5, ifh, skb, dev);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
spin_unlock(&sparx5->tx_lock);
@@ -264,6 +267,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
if (ret < 0)
goto drop;
+ if (!is_sparx5(sparx5))
+ /* When lan969x and TX_OK, stats and SKB consumption is handled
+ * in the TX completion loop, so dont go any further.
+ */
+ return NETDEV_TX_OK;
+
stats->tx_bytes += skb->len;
stats->tx_packets++;
sparx5->tx.packets++;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index f8562c1a894d..cfb4b2e17ace 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -32,7 +32,19 @@ sparx5_phylink_mac_select_pcs(struct phylink_config *config,
{
struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- return &port->phylink_pcs;
+ /* Return the PCS for all the modes that require it. */
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ return &port->phylink_pcs;
+ default:
+ return NULL;
+ }
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
@@ -77,7 +89,7 @@ static struct sparx5_port *sparx5_pcs_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct sparx5_port, phylink_pcs);
}
-static void sparx5_pcs_get_state(struct phylink_pcs *pcs,
+static void sparx5_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct sparx5_port *port = sparx5_pcs_to_port(pcs);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index f9d1a6bb9bff..04bc8fffaf96 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -257,6 +257,15 @@ static int sparx5_port_verify_speed(struct sparx5 *sparx5,
conf->speed != SPEED_25000))
return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ if (conf->speed != SPEED_1000 &&
+ conf->speed != SPEED_100 &&
+ conf->speed != SPEED_10)
+ return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
+ break;
default:
return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
}
@@ -994,6 +1003,7 @@ int sparx5_port_config(struct sparx5 *sparx5,
struct sparx5_port *port,
struct sparx5_port_config *conf)
{
+ bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
bool high_speed_dev = sparx5_is_baser(conf->portmode);
const struct sparx5_ops *ops = sparx5->data->ops;
int err, urgency, stop_wm;
@@ -1002,8 +1012,14 @@ int sparx5_port_config(struct sparx5 *sparx5,
if (err)
return err;
+ if (rgmii) {
+ err = ops->port_config_rgmii(port, conf);
+ if (err)
+ return err;
+ }
+
/* high speed device is already configured */
- if (!high_speed_dev)
+ if (!rgmii && !high_speed_dev)
sparx5_port_config_low_set(sparx5, port, conf);
/* Configure flow control */
@@ -1067,24 +1083,6 @@ int sparx5_port_init(struct sparx5 *sparx5,
if (err)
return err;
- /* Configure MAC vlan awareness */
- err = sparx5_port_max_tags_set(sparx5, port);
- if (err)
- return err;
-
- /* Set Max Length */
- spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
- DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
- sparx5,
- DEV2G5_MAC_MAXLEN_CFG(port->portno));
-
- /* 1G/2G5: Signal Detect configuration */
- spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
- DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
- DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
- sparx5,
- DEV2G5_PCS1G_SD_CFG(port->portno));
-
/* Set Pause WM hysteresis */
spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
@@ -1108,6 +1106,27 @@ int sparx5_port_init(struct sparx5 *sparx5,
ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
sparx5, ANA_CL_FILTER_CTRL(port->portno));
+ if (ops->is_port_rgmii(port->portno))
+ return 0; /* RGMII device - nothing more to configure */
+
+ /* Configure MAC vlan awareness */
+ err = sparx5_port_max_tags_set(sparx5, port);
+ if (err)
+ return err;
+
+ /* Set Max Length */
+ spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
+ DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
+ sparx5,
+ DEV2G5_MAC_MAXLEN_CFG(port->portno));
+
+ /* 1G/2G5: Signal Detect configuration */
+ spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
+ DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
+ DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
+ sparx5,
+ DEV2G5_PCS1G_SD_CFG(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index 9b9bcc6834bc..c8a37468a3d1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -40,6 +40,11 @@ static inline bool sparx5_port_is_25g(int portno)
return portno >= 56 && portno <= 63;
}
+static inline bool sparx5_port_is_rgmii(int portno)
+{
+ return false;
+}
+
static inline u32 sparx5_to_high_dev(struct sparx5 *sparx5, int port)
{
const struct sparx5_ops *ops = sparx5->data->ops;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 558e03301aa8..8d48468cddd7 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -758,12 +758,13 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
+ struct ndo_fdb_dump_context *ctx = (void *)dump->cb->ctx;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- if (dump->idx < dump->cb->args[2])
+ if (dump->idx < ctx->fdb_idx)
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 98e098c09c03..abba165738a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2779,7 +2779,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
break;
}
- netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
+ netdev->watchdog_timeo = secs_to_jiffies(5);
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 1c61390677f7..04f00ea94230 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,8 +18,6 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
-#define IONIC_ASIC_TYPE_ELBA 2
-
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -59,7 +57,6 @@ struct ionic {
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
cpumask_var_t *affinity_masks;
struct delayed_work doorbell_check_dwork;
- struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
struct ionic_vf *vfs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 9b7f78b6cdb1..a2d4336d2766 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -158,6 +158,20 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
25000baseCR_Full);
copper_seen++;
break;
+ case IONIC_XCVR_PID_QSFP_50G_CR2_FC:
+ case IONIC_XCVR_PID_QSFP_50G_CR2:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 50000baseCR2_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 200000baseCR4_Full);
+ copper_seen++;
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_CR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported, 400000baseCR4_Full);
+ copper_seen++;
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_AOC:
case IONIC_XCVR_PID_SFP_10GBASE_CU:
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -196,6 +210,31 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseSR_Full);
break;
+ case IONIC_XCVR_PID_QSFP_200G_AOC:
+ case IONIC_XCVR_PID_QSFP_200G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseSR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_200G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 200000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_FR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseLR4_ER4_FR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_DR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseDR4_Full);
+ break;
+ case IONIC_XCVR_PID_QSFP_400G_SR4:
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 400000baseSR4_Full);
+ break;
case IONIC_XCVR_PID_SFP_10GBASE_SR:
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
@@ -929,6 +968,7 @@ static int ionic_get_module_info(struct net_device *netdev,
break;
case SFF8024_ID_QSFP_8436_8636:
case SFF8024_ID_QSFP28_8636:
+ case SFF8024_ID_QSFP_PLUS_CMIS:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9c85c0706c6e..830c8adbfbee 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -1277,7 +1277,10 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3,
IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4,
IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5,
-
+ IONIC_XCVR_PID_QSFP_50G_CR2_FC = 6,
+ IONIC_XCVR_PID_QSFP_50G_CR2 = 7,
+ IONIC_XCVR_PID_QSFP_200G_CR4 = 8,
+ IONIC_XCVR_PID_QSFP_400G_CR4 = 9,
/* Fiber */
IONIC_XCVR_PID_QSFP_100G_AOC = 50,
IONIC_XCVR_PID_QSFP_100G_ACC = 51,
@@ -1303,6 +1306,15 @@ enum ionic_xcvr_pid {
IONIC_XCVR_PID_SFP_25GBASE_ACC = 71,
IONIC_XCVR_PID_SFP_10GBASE_T = 72,
IONIC_XCVR_PID_SFP_1000BASE_T = 73,
+ IONIC_XCVR_PID_QSFP_200G_AOC = 74,
+ IONIC_XCVR_PID_QSFP_200G_FR4 = 75,
+ IONIC_XCVR_PID_QSFP_200G_DR4 = 76,
+ IONIC_XCVR_PID_QSFP_200G_SR4 = 77,
+ IONIC_XCVR_PID_QSFP_200G_ACC = 78,
+ IONIC_XCVR_PID_QSFP_400G_FR4 = 79,
+ IONIC_XCVR_PID_QSFP_400G_DR4 = 80,
+ IONIC_XCVR_PID_QSFP_400G_SR4 = 81,
+ IONIC_XCVR_PID_QSFP_400G_VR4 = 82,
};
/**
@@ -1404,6 +1416,8 @@ struct ionic_xcvr_status {
*/
union ionic_port_config {
struct {
+#define IONIC_SPEED_400G 400000 /* 400G in Mbps */
+#define IONIC_SPEED_200G 200000 /* 200G in Mbps */
#define IONIC_SPEED_100G 100000 /* 100G in Mbps */
#define IONIC_SPEED_50G 50000 /* 50G in Mbps */
#define IONIC_SPEED_40G 40000 /* 40G in Mbps */
@@ -3209,7 +3223,11 @@ union ionic_adminq_comp {
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
#define IONIC_DEV_CMD_DONE 0x00000001
-#define IONIC_ASIC_TYPE_CAPRI 0
+#define IONIC_ASIC_TYPE_NONE 0
+#define IONIC_ASIC_TYPE_CAPRI 1
+#define IONIC_ASIC_TYPE_ELBA 2
+#define IONIC_ASIC_TYPE_GIGLIO 3
+#define IONIC_ASIC_TYPE_SALINA 4
/**
* struct ionic_doorbell - Doorbell register layout
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 3d3f936779f7..7707a9e53c43 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3265,7 +3265,7 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
- le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
+ le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
lif->neqs = ionic->neqs_per_lif;
lif->nxqs = ionic->ntxqs_per_lif;
@@ -3804,10 +3804,6 @@ err_out_adminq_deinit:
return err;
}
-static void ionic_lif_notify_work(struct work_struct *ws)
-{
-}
-
static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
{
struct ionic_admin_ctx ctx = {
@@ -3858,8 +3854,6 @@ int ionic_lif_register(struct ionic_lif *lif)
ionic_lif_register_phc(lif);
- INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
-
lif->ionic->nb.notifier_call = ionic_lif_notify;
err = register_netdevice_notifier(&lif->ionic->nb);
@@ -3885,7 +3879,6 @@ void ionic_lif_unregister(struct ionic_lif *lif)
{
if (lif->ionic->nb.notifier_call) {
unregister_netdevice_notifier(&lif->ionic->nb);
- cancel_work_sync(&lif->ionic->nb_work);
lif->ionic->nb.notifier_call = NULL;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0f817c3f92d8..daf1e82cb76b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -81,8 +81,9 @@ static int ionic_error_to_errno(enum ionic_status_code code)
case IONIC_RC_EQTYPE:
case IONIC_RC_EQID:
case IONIC_RC_EINVAL:
- case IONIC_RC_ENOSUPP:
return -EINVAL;
+ case IONIC_RC_ENOSUPP:
+ return -EOPNOTSUPP;
case IONIC_RC_EPERM:
return -EPERM;
case IONIC_RC_ENOENT:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9cff0a8ffb2c..3383ee1dad14 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2832,7 +2832,7 @@ netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2860,7 +2860,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
static ssize_t
netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2901,7 +2901,7 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
static ssize_t
netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2922,7 +2922,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2946,20 +2946,20 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_crb,
- .write = netxen_sysfs_write_crb,
+ .read_new = netxen_sysfs_read_crb,
+ .write_new = netxen_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = netxen_sysfs_read_mem,
- .write = netxen_sysfs_write_mem,
+ .read_new = netxen_sysfs_read_mem,
+ .write_new = netxen_sysfs_write_mem,
};
static ssize_t
netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3082,7 +3082,7 @@ out:
static const struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = 0644 },
.size = sizeof(struct netxen_dimm_cfg),
- .read = netxen_sysfs_read_dimm,
+ .read_new = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 74125188beb8..c0f20464fd1e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -264,7 +264,7 @@ static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -281,7 +281,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -310,7 +310,7 @@ static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -332,7 +332,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = kobj_to_dev(kobj);
@@ -396,7 +396,7 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -446,7 +446,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -539,7 +539,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -623,7 +623,7 @@ out:
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -675,7 +675,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -722,7 +722,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -769,7 +769,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -804,7 +804,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -839,7 +839,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -868,7 +868,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -898,7 +898,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -938,7 +938,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1115,7 +1115,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
@@ -1195,64 +1195,63 @@ static const struct device_attribute dev_attr_beacon = {
static const struct bin_attribute bin_attr_crb = {
.attr = { .name = "crb", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_crb,
- .write = qlcnic_sysfs_write_crb,
+ .read_new = qlcnic_sysfs_read_crb,
+ .write_new = qlcnic_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
.attr = { .name = "mem", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_mem,
- .write = qlcnic_sysfs_write_mem,
+ .read_new = qlcnic_sysfs_read_mem,
+ .write_new = qlcnic_sysfs_write_mem,
};
static const struct bin_attribute bin_attr_npar_config = {
.attr = { .name = "npar_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_npar_config,
- .write = qlcnic_sysfs_write_npar_config,
+ .read_new = qlcnic_sysfs_read_npar_config,
+ .write_new = qlcnic_sysfs_write_npar_config,
};
static const struct bin_attribute bin_attr_pci_config = {
.attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pci_config,
- .write = NULL,
+ .read_new = qlcnic_sysfs_read_pci_config,
};
static const struct bin_attribute bin_attr_port_stats = {
.attr = { .name = "port_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_port_stats,
- .write = qlcnic_sysfs_clear_port_stats,
+ .read_new = qlcnic_sysfs_get_port_stats,
+ .write_new = qlcnic_sysfs_clear_port_stats,
};
static const struct bin_attribute bin_attr_esw_stats = {
.attr = { .name = "esw_stats", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_get_esw_stats,
- .write = qlcnic_sysfs_clear_esw_stats,
+ .read_new = qlcnic_sysfs_get_esw_stats,
+ .write_new = qlcnic_sysfs_clear_esw_stats,
};
static const struct bin_attribute bin_attr_esw_config = {
.attr = { .name = "esw_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_esw_config,
- .write = qlcnic_sysfs_write_esw_config,
+ .read_new = qlcnic_sysfs_read_esw_config,
+ .write_new = qlcnic_sysfs_write_esw_config,
};
static const struct bin_attribute bin_attr_pm_config = {
.attr = { .name = "pm_config", .mode = 0644 },
.size = 0,
- .read = qlcnic_sysfs_read_pm_config,
- .write = qlcnic_sysfs_write_pm_config,
+ .read_new = qlcnic_sysfs_read_pm_config,
+ .write_new = qlcnic_sysfs_write_pm_config,
};
static const struct bin_attribute bin_attr_flash = {
.attr = { .name = "flash", .mode = 0644 },
.size = 0,
- .read = qlcnic_83xx_sysfs_flash_read_handler,
- .write = qlcnic_83xx_sysfs_flash_write_handler,
+ .read_new = qlcnic_83xx_sysfs_flash_read_handler,
+ .write_new = qlcnic_83xx_sysfs_flash_write_handler,
};
#ifdef CONFIG_QLCNIC_HWMON
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index be4c9622618d..7a194a8ab989 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,7 +23,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_08,
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
- RTL_GIGA_MAC_VER_11,
+ /* support for RTL_GIGA_MAC_VER_11 has been removed */
/* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
/* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
@@ -71,6 +71,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_64,
RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_VER_66,
+ RTL_GIGA_MAC_VER_70,
+ RTL_GIGA_MAC_VER_71,
RTL_GIGA_MAC_NONE
};
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 8a3959bb2360..5a5eba49c651 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -56,6 +56,8 @@
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
#define FIRMWARE_8125D_1 "rtl_nic/rtl8125d-1.fw"
+#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
+#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
@@ -103,7 +105,6 @@ static const struct {
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
[RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
- [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_14] = {"RTL8401" },
[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
@@ -140,8 +141,10 @@ static const struct {
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
[RTL_GIGA_MAC_VER_64] = {"RTL8125D", FIRMWARE_8125D_1},
- [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
- [RTL_GIGA_MAC_VER_66] = {"RTL8126A", FIRMWARE_8126A_3},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8125D", FIRMWARE_8125D_2},
+ [RTL_GIGA_MAC_VER_66] = {"RTL8125BP", FIRMWARE_8125BP_2},
+ [RTL_GIGA_MAC_VER_70] = {"RTL8126A", FIRMWARE_8126A_2},
+ [RTL_GIGA_MAC_VER_71] = {"RTL8126A", FIRMWARE_8126A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -622,7 +625,6 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_RESET_PENDING,
- RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -631,6 +633,7 @@ enum rtl_dash_type {
RTL_DASH_NONE,
RTL_DASH_DP,
RTL_DASH_EP,
+ RTL_DASH_25_BP,
};
struct rtl8169_private {
@@ -707,6 +710,8 @@ MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
MODULE_FIRMWARE(FIRMWARE_8125D_1);
+MODULE_FIRMWARE(FIRMWARE_8125D_2);
+MODULE_FIRMWARE(FIRMWARE_8125BP_2);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
@@ -1229,7 +1234,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1244,7 +1249,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1359,10 +1364,19 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
+static void rtl8125bp_driver_start(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_start(tp);
else
rtl8168ep_driver_start(tp);
}
@@ -1383,10 +1397,19 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
}
+static void rtl8125bp_driver_stop(struct rtl8169_private *tp)
+{
+ r8168ep_ocp_write(tp, 0x01, 0x14, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x18, 0x00);
+ r8168ep_ocp_write(tp, 0x01, 0x10, 0x01);
+}
+
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
+ else if (tp->dash_type == RTL_DASH_25_BP)
+ rtl8125bp_driver_stop(tp);
else
rtl8168ep_driver_stop(tp);
}
@@ -1409,6 +1432,7 @@ static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
+ case RTL_DASH_25_BP:
return r8168ep_check_dash(tp);
default:
return false;
@@ -1423,6 +1447,8 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
return RTL_DASH_DP;
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return RTL_DASH_EP;
+ case RTL_GIGA_MAC_VER_66:
+ return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
}
@@ -1575,7 +1601,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_71:
r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
break;
default:
@@ -2048,7 +2074,7 @@ static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
tp->tx_lpi_timer = timer_val;
r8168_mac_ocp_write(tp, 0xe048, timer_val);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
tp->tx_lpi_timer = timer_val;
RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
break;
@@ -2256,10 +2282,14 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
enum mac_version ver;
} mac_info[] = {
/* 8126A family. */
- { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_66 },
- { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+ { 0x7cf, 0x64a, RTL_GIGA_MAC_VER_71 },
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_70 },
+
+ /* 8125BP family. */
+ { 0x7cf, 0x681, RTL_GIGA_MAC_VER_66 },
/* 8125D family. */
+ { 0x7cf, 0x689, RTL_GIGA_MAC_VER_65 },
{ 0x7cf, 0x688, RTL_GIGA_MAC_VER_64 },
/* 8125B family. */
@@ -2335,7 +2365,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168B family. */
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- /* This one is very old and rare, let's see if anybody complains.
+ /* This one is very old and rare, support has been removed.
* { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
*/
@@ -2527,7 +2557,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2659,7 +2689,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_71:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2902,7 +2932,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2916,7 +2946,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2942,8 +2972,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
rtl_mod_config5(tp, 0, ASPM_en);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -2954,7 +2984,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2966,7 +2996,7 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2974,8 +3004,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
RTL_W8(tp, INT_CFG0_8125, val8);
break;
@@ -3695,12 +3725,12 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
@@ -3718,8 +3748,8 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
- if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
- tp->mac_version == RTL_GIGA_MAC_VER_66)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_71)
r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
else
r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
@@ -3803,7 +3833,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3839,8 +3868,10 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
- [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
- [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_66] = rtl_hw_start_8125d,
+ [RTL_GIGA_MAC_VER_70] = rtl_hw_start_8126a,
+ [RTL_GIGA_MAC_VER_71] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3857,12 +3888,14 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_64:
+ case RTL_GIGA_MAC_VER_65:
+ case RTL_GIGA_MAC_VER_66:
for (i = 0xa00; i < 0xb00; i += 4)
RTL_W32(tp, i, 0);
break;
case RTL_GIGA_MAC_VER_63:
- case RTL_GIGA_MAC_VER_65:
- case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_70:
+ case RTL_GIGA_MAC_VER_71:
for (i = 0xa00; i < 0xa80; i += 4)
RTL_W32(tp, i, 0);
RTL_W16(tp, INT_CFG1_8125, 0x0000);
@@ -4094,7 +4127,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_71:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4251,7 +4284,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -4679,12 +4712,6 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (status & LinkChg)
phy_mac_interrupt(tp->phydev);
- if (unlikely(status & RxFIFOOver &&
- tp->mac_version == RTL_GIGA_MAC_VER_11)) {
- netif_stop_queue(tp->dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
- }
-
rtl_irq_disable(tp);
napi_schedule(&tp->napi);
out:
@@ -4722,8 +4749,6 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
- } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
- rtl_reset_work(tp);
}
}
@@ -5102,9 +5127,6 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
tp->irq_mask |= SYSErr | RxFIFOOver;
- else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
- /* special workaround needed */
- tp->irq_mask |= RxFIFOOver;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5280,7 +5302,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
rtl_hw_init_8125(tp);
break;
default:
@@ -5299,7 +5321,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
return JUMBO_7K;
/* RTL8168b */
- case RTL_GIGA_MAC_VER_11:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 5307c6ff4e25..cf95e579c65d 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -276,15 +276,6 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp,
rtl_writephy_batch(phydev, phy_reg_init);
}
-static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_write(phydev, 0x1f, 0x0001);
- phy_set_bits(phydev, 0x16, BIT(0));
- phy_write(phydev, 0x10, 0xf41b);
- phy_write(phydev, 0x1f, 0x0000);
-}
-
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1111,6 +1102,28 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp,
rtl8125_config_eee_phy(phydev);
}
+static void rtl8125bp_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+ rtl8168g_enable_gphy_10m(phydev);
+
+ r8168g_phy_param(phydev, 0x8010, 0x0800, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x8088);
+ phy_modify(phydev, 0x17, 0xff00, 0x9000);
+ phy_write(phydev, 0x16, 0x808f);
+ phy_modify(phydev, 0x17, 0xff00, 0x9000);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ r8168g_phy_param(phydev, 0x8174, 0x2000, 0x1800);
+
+ rtl8125_legacy_force_mode(phydev);
+ rtl8168g_disable_aldps(phydev);
+ rtl8125_config_eee_phy(phydev);
+}
+
static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1136,7 +1149,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
- [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1172,8 +1184,10 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
[RTL_GIGA_MAC_VER_64] = rtl8125d_hw_phy_config,
- [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
- [RTL_GIGA_MAC_VER_66] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8125d_hw_phy_config,
+ [RTL_GIGA_MAC_VER_66] = rtl8125bp_hw_phy_config,
+ [RTL_GIGA_MAC_VER_70] = rtl8126a_hw_phy_config,
+ [RTL_GIGA_MAC_VER_71] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index dbc3f92eebc4..2bbfcad613ab 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -13,6 +13,7 @@
#define RTASE_HW_VER_906X_7XA 0x00800000
#define RTASE_HW_VER_906X_7XC 0x04000000
#define RTASE_HW_VER_907XD_V1 0x04800000
+#define RTASE_HW_VER_907XD_VA 0x08000000
#define RTASE_RX_DMA_BURST_256 4
#define RTASE_TX_DMA_BURST_UNLIMITED 7
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index c42c0516656b..3bd11cb56294 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1725,6 +1725,7 @@ static int rtase_get_settings(struct net_device *dev,
cmd->base.speed = SPEED_5000;
break;
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
cmd->base.speed = SPEED_10000;
break;
}
@@ -1993,6 +1994,7 @@ static int rtase_check_mac_version_valid(struct rtase_private *tp)
case RTASE_HW_VER_906X_7XA:
case RTASE_HW_VER_906X_7XC:
case RTASE_HW_VER_907XD_V1:
+ case RTASE_HW_VER_907XD_VA:
ret = 0;
break;
}
@@ -2016,7 +2018,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
SET_NETDEV_DEV(dev, &pdev->dev);
ret = pci_enable_device(pdev);
- if (ret < 0)
+ if (ret)
goto err_out_free_dev;
/* make sure PCI base addr 1 is MMIO */
@@ -2032,7 +2034,7 @@ static int rtase_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
ret = pci_request_regions(pdev, KBUILD_MODNAME);
- if (ret < 0)
+ if (ret)
goto err_out_disable;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -2108,7 +2110,7 @@ static int rtase_init_one(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "Automotive Switch Ethernet driver loaded\n");
ret = rtase_init_board(pdev, &dev, &ioaddr);
- if (ret != 0)
+ if (ret)
return ret;
tp = netdev_priv(dev);
@@ -2118,7 +2120,7 @@ static int rtase_init_one(struct pci_dev *pdev,
/* identify chip attached to board */
ret = rtase_check_mac_version_valid(tp);
- if (ret != 0) {
+ if (ret) {
dev_err(&pdev->dev,
"unknown chip version: 0x%08x, contact rtase maintainers (see MAINTAINERS file)\n",
tp->hw_ver);
@@ -2129,7 +2131,7 @@ static int rtase_init_one(struct pci_dev *pdev,
rtase_init_hardware(tp);
ret = rtase_alloc_interrupt(pdev, tp);
- if (ret < 0) {
+ if (ret) {
dev_err(&pdev->dev, "unable to alloc MSIX/MSI\n");
goto err_out_del_napi;
}
@@ -2174,7 +2176,7 @@ static int rtase_init_one(struct pci_dev *pdev,
netif_carrier_off(dev);
ret = register_netdev(dev);
- if (ret != 0)
+ if (ret)
goto err_out_free_dma;
netdev_dbg(dev, "%pM, IRQ %d\n", dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 9ac6e2aad18f..84d09a8973b7 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -111,25 +111,35 @@ static void rswitch_top_init(struct rswitch_private *priv)
/* Forwarding engine block (MFWD) */
static void rswitch_fwd_init(struct rswitch_private *priv)
{
+ u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
- /* For ETHA */
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
+ /* Start with empty configuration */
+ for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
+ /* Disable all port features */
+ iowrite32(0, priv->addr + FWPC0(i));
+ /* Disallow L3 forwarding and direct descriptor forwarding */
+ iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
+ priv->addr + FWPC1(i));
+ /* Disallow L2 forwarding */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
+ priv->addr + FWPC2(i));
+ /* Disallow port based forwarding */
iowrite32(0, priv->addr + FWPBFC(i));
}
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ /* For enabled ETHA ports, setup port based forwarding */
+ rswitch_for_each_enabled_port(priv, i) {
+ /* Port based forwarding from port i to GWCA port */
+ rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
+ FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
+ /* Within GWCA port, forward to Rx queue for port i */
iowrite32(priv->rdev[i]->rx_queue->index,
priv->addr + FWPBFCSDC(GWCA_INDEX, i));
- iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
}
- /* For GWCA */
- iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
- iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
- iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
- iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
+ /* For GWCA port, allow direct descriptor forwarding */
+ rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
}
/* Gateway CPU agent block (GWCA) */
@@ -1159,9 +1169,9 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
- rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
- rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
+ rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
+ FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
+ FIELD_PREP(MPIC_PSMHT, 0x06));
}
static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
@@ -1190,42 +1200,29 @@ static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
}
-static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
- int phyad, int devad, int regad, int data)
+static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
+ unsigned int mmf, unsigned int pda,
+ unsigned int pra, unsigned int pop,
+ unsigned int prd)
{
- int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
u32 val;
int ret;
- if (devad == 0xffffffff)
- return -ENODEV;
-
- writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
+ val = MPSM_PSME |
+ FIELD_PREP(MPSM_MFF, mmf) |
+ FIELD_PREP(MPSM_PDA, pda) |
+ FIELD_PREP(MPSM_PRA, pra) |
+ FIELD_PREP(MPSM_POP, pop) |
+ FIELD_PREP(MPSM_PRD, prd);
+ iowrite32(val, etha->addr + MPSM);
- val = MPSM_PSME | MPSM_MFF_C45;
- iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
+ ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
if (ret)
return ret;
- rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
-
if (read) {
- writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- if (ret)
- return ret;
-
- ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
-
- rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
- } else {
- iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
- etha->addr + MPSM);
-
- ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
+ val = ioread32(etha->addr + MPSM);
+ ret = FIELD_GET(MPSM_PRD, val);
}
return ret;
@@ -1235,16 +1232,47 @@ static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
int regad)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_READ_C45, 0);
}
static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
+ int ret;
- return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
+ ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_ADDRESS, regad);
+ if (ret)
+ return ret;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
+ MPSM_POP_WRITE, val);
+}
+
+static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_READ_C22, 0);
+}
+
+static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
+ int regad, u16 val)
+{
+ struct rswitch_etha *etha = bus->priv;
+
+ return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
+ MPSM_POP_WRITE, val);
}
/* Call of_node_put(port) after done */
@@ -1329,6 +1357,8 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->priv = rdev->etha;
mii_bus->read_c45 = rswitch_etha_mii_read_c45;
mii_bus->write_c45 = rswitch_etha_mii_write_c45;
+ mii_bus->read = rswitch_etha_mii_read_c22;
+ mii_bus->write = rswitch_etha_mii_write_c22;
mii_bus->parent = &rdev->priv->pdev->dev;
mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
@@ -1549,7 +1579,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
{
unsigned int i;
- for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+ rswitch_for_each_enabled_port(priv, i) {
phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
@@ -1924,9 +1954,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_get_params;
- if (rdev->priv->gwca.speed < rdev->etha->speed)
- rdev->priv->gwca.speed = rdev->etha->speed;
-
err = rswitch_rxdmac_alloc(ndev);
if (err < 0)
goto out_rxdmac;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index d8d4ed7d7f8b..532192cbca4b 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -12,6 +12,7 @@
#define RSWITCH_MAX_NUM_QUEUES 128
+#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
@@ -731,28 +732,21 @@ enum rswitch_etha_mode {
#define MPIC_LSC_100M 1
#define MPIC_LSC_1G 2
#define MPIC_LSC_2_5G 3
-
-#define MDIO_READ_C45 0x03
-#define MDIO_WRITE_C45 0x01
+#define MPIC_PSMCS GENMASK(22, 16)
+#define MPIC_PSMHT GENMASK(26, 24)
#define MPSM_PSME BIT(0)
-#define MPSM_MFF_C45 BIT(2)
-#define MPSM_PRD_SHIFT 16
-#define MPSM_PRD_MASK GENMASK(31, MPSM_PRD_SHIFT)
-
-/* Completion flags */
-#define MMIS1_PAACS BIT(2) /* Address */
-#define MMIS1_PWACS BIT(1) /* Write */
-#define MMIS1_PRACS BIT(0) /* Read */
-#define MMIS1_CLEAR_FLAGS 0xf
-
-#define MPIC_PSMCS_SHIFT 16
-#define MPIC_PSMCS_MASK GENMASK(22, MPIC_PSMCS_SHIFT)
-#define MPIC_PSMCS(val) ((val) << MPIC_PSMCS_SHIFT)
-
-#define MPIC_PSMHT_SHIFT 24
-#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
-#define MPIC_PSMHT(val) ((val) << MPIC_PSMHT_SHIFT)
+#define MPSM_MFF BIT(2)
+#define MPSM_MMF_C22 0
+#define MPSM_MMF_C45 1
+#define MPSM_PDA GENMASK(7, 3)
+#define MPSM_PRA GENMASK(12, 8)
+#define MPSM_POP GENMASK(14, 13)
+#define MPSM_POP_ADDRESS 0
+#define MPSM_POP_WRITE 1
+#define MPSM_POP_READ_C22 2
+#define MPSM_POP_READ_C45 3
+#define MPSM_PRD GENMASK(31, 16)
#define MLVC_PLV BIT(16)
@@ -806,6 +800,7 @@ enum rswitch_gwca_mode {
#define CABPPFLC_INIT_VALUE 0x00800080
/* MFWD */
+#define FWPC0(i) (FWPC00 + (i) * 0x10)
#define FWPC0_LTHTA BIT(0)
#define FWPC0_IP4UE BIT(3)
#define FWPC0_IP4TE BIT(4)
@@ -819,15 +814,15 @@ enum rswitch_gwca_mode {
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
-#define FWPC0(i) (FWPC00 + (i) * 0x10)
-#define FWPC0_DEFAULT (FWPC0_LTHTA | FWPC0_IP4UE | FWPC0_IP4TE | \
- FWPC0_IP4OE | FWPC0_L2SE | FWPC0_IP4EA | \
- FWPC0_IPDSA | FWPC0_IPHLA | FWPC0_MACSDA | \
- FWPC0_MACHLA | FWPC0_MACHMA | FWPC0_VLANSA)
#define FWPC1(i) (FWPC10 + (i) * 0x10)
+#define FWCP1_LTHFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPC1_DDE BIT(0)
-#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPC2(i) (FWPC20 + (i) * 0x10)
+#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+
+#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
+#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
@@ -984,7 +979,6 @@ struct rswitch_gwca {
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
- int speed;
};
#define NUM_QUEUES_PER_NDEV 2
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4cc7b501135f..ef374a8e05c3 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -217,28 +217,4 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
(reg) != 0xa1c), \
page)
-/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
- * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
- * collector register.
- */
-static inline void _efx_writed_page_locked(struct efx_nic *efx,
- const efx_dword_t *value,
- unsigned int reg,
- unsigned int page)
-{
- unsigned long flags __attribute__ ((unused));
-
- if (page == 0) {
- spin_lock_irqsave(&efx->biu_lock, flags);
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- spin_unlock_irqrestore(&efx->biu_lock, flags);
- } else {
- efx_writed(efx, value, efx_paged_reg(efx, page, reg));
- }
-}
-#define efx_writed_page_locked(efx, value, reg, page) \
- _efx_writed_page_locked(efx, value, \
- reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
- page)
-
#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 620ba6ef3514..f70a7b7d6345 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -831,6 +831,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -838,6 +839,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index ab358fe13e1d..4cc83203e188 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -897,7 +897,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -989,7 +989,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
index 9785eff10607..2be3bad3c993 100644
--- a/drivers/net/ethernet/sfc/siena/net_driver.h
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -753,6 +753,7 @@ struct efx_arfs_rule {
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
+ * @net_dev_tracker: reference tracker entry for @net_dev
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
@@ -760,6 +761,7 @@ struct efx_arfs_rule {
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
+ netdevice_tracker net_dev_tracker;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 082e35c6caaa..2839d0e0a9c1 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -888,7 +888,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
- dev_put(req->net_dev);
+ netdev_put(req->net_dev, &req->net_dev_tracker);
}
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@@ -980,7 +980,8 @@ int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
}
/* Queue the request */
- dev_hold(req->net_dev = net_dev);
+ req->net_dev = net_dev;
+ netdev_hold(req->net_dev, &req->net_dev_tracker, GFP_ATOMIC);
INIT_WORK(&req->work, efx_filter_rfs_work);
req->rxq_index = rxq_index;
req->flow_id = flow_id;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 6658536a4e17..4cc85a36a1ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -154,6 +154,18 @@ config DWMAC_RZN1
the stmmac device driver. This support can make use of a custom MII
converter PCS device.
+config DWMAC_S32
+ tristate "NXP S32G/S32R GMAC support"
+ default ARCH_S32
+ depends on OF && (ARCH_S32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on NXP S32CC SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for the S32CC series
+ SOCs GMAC ethernet controller, ie. S32G2xx, S32G3xx and
+ S32R45.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 2389fd261344..b26f0e79c2b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
+obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1367fa5c9b8e..e25db747a81a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -257,6 +257,8 @@ struct stmmac_safety_stats {
#define CSR_F_150M 150000000
#define CSR_F_250M 250000000
#define CSR_F_300M 300000000
+#define CSR_F_500M 500000000
+#define CSR_F_800M 800000000
#define MAC_CSR_H_FRQ_MASK 0x20
@@ -543,18 +545,8 @@ struct dma_features {
#define STMMAC_VLAN_INSERT 0x2
#define STMMAC_VLAN_REPLACE 0x3
-extern const struct stmmac_desc_ops enh_desc_ops;
-extern const struct stmmac_desc_ops ndesc_ops;
-
struct mac_device_info;
-extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern const struct stmmac_hwtimestamp dwmac1000_ptp;
-extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
-
-extern const struct ptp_clock_info stmmac_ptp_clock_ops;
-extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
-
struct mac_link {
u32 caps;
u32 speed_mask;
@@ -641,8 +633,4 @@ void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_mode_ops ring_mode_ops;
-extern const struct stmmac_mode_ops chain_mode_ops;
-extern const struct stmmac_desc_ops dwmac4_desc_ops;
-
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 83290e707df5..bd4eb187f8c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -181,24 +181,19 @@ static void dwc_qos_remove(struct platform_device *pdev)
static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
- unsigned long rate = 125000000;
bool needs_calibration = false;
+ long rate = 125000000;
u32 value;
int err;
switch (speed) {
case SPEED_1000:
- needs_calibration = true;
- rate = 125000000;
- break;
-
case SPEED_100:
needs_calibration = true;
- rate = 25000000;
- break;
+ fallthrough;
case SPEED_10:
- rate = 2500000;
+ rate = rgmii_clock(speed);
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 641f3cd019a3..20d3a202bb8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -36,6 +36,8 @@
#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+#define MX93_GPR_ENET_QOS_CLK_SEL_MASK BIT_MASK(0)
+#define MX93_GPR_CLK_SEL_OFFSET (4)
#define DMA_BUS_MODE 0x00001000
#define DMA_BUS_MODE_SFT_RESET (0x1 << 0)
@@ -108,13 +110,21 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
{
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
- int val;
+ int val, ret;
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
case PHY_INTERFACE_MODE_RMII:
+ if (dwmac->rmii_refclk_ext) {
+ ret = regmap_clear_bits(dwmac->intf_regmap,
+ dwmac->intf_reg_off +
+ MX93_GPR_CLK_SEL_OFFSET,
+ MX93_GPR_ENET_QOS_CLK_SEL_MASK);
+ if (ret)
+ return ret;
+ }
val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
@@ -186,7 +196,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
plat_dat = dwmac->plat_dat;
@@ -196,17 +206,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
(plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "invalid speed %u\n", speed);
return;
}
@@ -301,15 +302,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
* is required by i.MX8MP, i.MX93.
* is optinoal for i.MX8DXL.
*/
- dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ dwmac->intf_regmap =
+ syscon_regmap_lookup_by_phandle_args(np, "intf_mode", 1,
+ &dwmac->intf_reg_off);
if (IS_ERR(dwmac->intf_regmap))
return PTR_ERR(dwmac->intf_regmap);
-
- err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
- if (err) {
- dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
- return err;
- }
}
return err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d94f0a150e93..ddee6154d40b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -31,27 +31,13 @@ struct intel_dwmac_data {
static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct intel_dwmac *dwmac = priv;
- unsigned long rate;
+ long rate;
int ret;
- rate = clk_get_rate(dwmac->tx_clk);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
-
- case SPEED_100:
- rate = 25000000;
- break;
-
- case SPEED_10:
- rate = 2500000;
- break;
-
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "Invalid speed\n");
- break;
+ return;
}
ret = clk_set_rate(dwmac->tx_clk, rate);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 901a3c1959fa..2a5b38723635 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -777,7 +777,7 @@ static void ethqos_ptp_clk_freq_config(struct stmmac_priv *priv)
netdev_err(priv->dev, "Failed to max out clk_ptp_ref: %d\n", err);
plat_dat->clk_ptp_rate = clk_get_rate(plat_dat->clk_ptp_ref);
- netdev_dbg(priv->dev, "PTP rate %d\n", plat_dat->clk_ptp_rate);
+ netdev_dbg(priv->dev, "PTP rate %lu\n", plat_dat->clk_ptp_rate);
}
static int qcom_ethqos_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 8cb374668b74..a4dc89e23a68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1079,20 +1079,11 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
return;
}
@@ -1540,20 +1531,11 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
+ long rate;
int ret;
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- case 1000:
- rate = 125000000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
return;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
new file mode 100644
index 000000000000..9cc0e5817416
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP S32G/R GMAC glue layer
+ *
+ * Copyright 2019-2024 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GMAC_INTF_RATE_125M 125000000 /* 125MHz */
+
+/* SoC PHY interface control register */
+#define PHY_INTF_SEL_MII 0x00
+#define PHY_INTF_SEL_SGMII 0x01
+#define PHY_INTF_SEL_RGMII 0x02
+#define PHY_INTF_SEL_RMII 0x08
+
+struct s32_priv_data {
+ void __iomem *ioaddr;
+ void __iomem *ctrl_sts;
+ struct device *dev;
+ phy_interface_t *intf_mode;
+ struct clk *tx_clk;
+ struct clk *rx_clk;
+};
+
+static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
+{
+ writel(PHY_INTF_SEL_RGMII, gmac->ctrl_sts);
+
+ dev_dbg(gmac->dev, "PHY mode set to %s\n", phy_modes(*gmac->intf_mode));
+
+ return 0;
+}
+
+static int s32_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+ int ret;
+
+ /* Set initial TX interface clock */
+ ret = clk_prepare_enable(gmac->tx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable tx clock\n");
+ return ret;
+ }
+ ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set tx clock\n");
+ goto err_tx_disable;
+ }
+
+ /* Set initial RX interface clock */
+ ret = clk_prepare_enable(gmac->rx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable rx clock\n");
+ goto err_tx_disable;
+ }
+ ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set rx clock\n");
+ goto err_txrx_disable;
+ }
+
+ /* Set interface mode */
+ ret = s32_gmac_write_phy_intf_select(gmac);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set PHY interface mode\n");
+ goto err_txrx_disable;
+ }
+
+ return 0;
+
+err_txrx_disable:
+ clk_disable_unprepare(gmac->rx_clk);
+err_tx_disable:
+ clk_disable_unprepare(gmac->tx_clk);
+ return ret;
+}
+
+static void s32_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct s32_priv_data *gmac = priv;
+
+ clk_disable_unprepare(gmac->tx_clk);
+ clk_disable_unprepare(gmac->rx_clk);
+}
+
+static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+{
+ struct s32_priv_data *gmac = priv;
+ long tx_clk_rate;
+ int ret;
+
+ tx_clk_rate = rgmii_clock(speed);
+ if (tx_clk_rate < 0) {
+ dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed);
+ return;
+ }
+
+ dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate);
+ ret = clk_set_rate(gmac->tx_clk, tx_clk_rate);
+ if (ret)
+ dev_err(gmac->dev, "Can't set tx clock\n");
+}
+
+static int s32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat;
+ struct device *dev = &pdev->dev;
+ struct stmmac_resources res;
+ struct s32_priv_data *gmac;
+ int ret;
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->dev = &pdev->dev;
+
+ ret = stmmac_get_platform_resources(pdev, &res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get platform resources\n");
+
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
+ if (IS_ERR(plat))
+ return dev_err_probe(dev, PTR_ERR(plat),
+ "dt configuration failed\n");
+
+ /* PHY interface mode control reg */
+ gmac->ctrl_sts = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(gmac->ctrl_sts))
+ return dev_err_probe(dev, PTR_ERR(gmac->ctrl_sts),
+ "S32CC config region is missing\n");
+
+ /* tx clock */
+ gmac->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(gmac->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->tx_clk),
+ "tx clock not found\n");
+
+ /* rx clock */
+ gmac->rx_clk = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(gmac->rx_clk))
+ return dev_err_probe(dev, PTR_ERR(gmac->rx_clk),
+ "rx clock not found\n");
+
+ gmac->intf_mode = &plat->phy_interface;
+ gmac->ioaddr = res.addr;
+
+ /* S32CC core feature set */
+ plat->has_gmac4 = true;
+ plat->pmt = 1;
+ plat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat->rx_fifo_size = 20480;
+ plat->tx_fifo_size = 20480;
+
+ plat->init = s32_gmac_init;
+ plat->exit = s32_gmac_exit;
+ plat->fix_mac_speed = s32_fix_mac_speed;
+
+ plat->bsp_priv = gmac;
+
+ return stmmac_pltfr_probe(pdev, plat, &res);
+}
+
+static const struct of_device_id s32_dwmac_match[] = {
+ { .compatible = "nxp,s32g2-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s32_dwmac_match);
+
+static struct platform_driver s32_dwmac_driver = {
+ .probe = s32_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "s32-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = s32_dwmac_match,
+ },
+};
+module_platform_driver(s32_dwmac_driver);
+
+MODULE_AUTHOR("Jan Petrous (OSS) <jan.petrous@oss.nxp.com>");
+MODULE_DESCRIPTION("NXP S32G/R common chassis GMAC driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 421666279dd3..0a0a363d3730 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -34,24 +34,13 @@ struct starfive_dwmac {
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
{
struct starfive_dwmac *dwmac = priv;
- unsigned long rate;
+ long rate;
int err;
- rate = clk_get_rate(dwmac->clk_tx);
-
- switch (speed) {
- case SPEED_1000:
- rate = 125000000;
- break;
- case SPEED_100:
- rate = 25000000;
- break;
- case SPEED_10:
- rate = 2500000;
- break;
- default:
+ rate = rgmii_clock(speed);
+ if (rate < 0) {
dev_err(dwmac->dev, "invalid speed %u\n", speed);
- break;
+ return;
}
err = clk_set_rate(dwmac->clk_tx, rate);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a6ff02d905a9..f25461c292fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,10 +21,7 @@
#include "stmmac_platform.h"
-#define DWMAC_125MHZ 125000000
#define DWMAC_50MHZ 50000000
-#define DWMAC_25MHZ 25000000
-#define DWMAC_2_5MHZ 2500000
#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
iface == PHY_INTERFACE_MODE_RGMII_ID || \
@@ -140,7 +137,7 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
u32 reg = dwmac->ctrl_reg;
- u32 freq = 0;
+ long freq = 0;
if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
src = TX_RETIME_SRC_TXCLK;
@@ -153,19 +150,14 @@ static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
}
} else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
/* On GiGa clk source can be either ext or from clkgen */
- if (spd == SPEED_1000) {
- freq = DWMAC_125MHZ;
- } else {
+ freq = rgmii_clock(spd);
+
+ if (spd != SPEED_1000 && freq > 0)
/* Switch to clkgen for these speeds */
src = TX_RETIME_SRC_CLKGEN;
- if (spd == SPEED_100)
- freq = DWMAC_25MHZ;
- else if (spd == SPEED_10)
- freq = DWMAC_2_5MHZ;
- }
}
- if (src == TX_RETIME_SRC_CLKGEN && freq)
+ if (src == TX_RETIME_SRC_CLKGEN && freq > 0)
clk_set_rate(dwmac->clk, freq);
regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
@@ -207,16 +199,11 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (res)
dwmac->clk_sel_reg = res->start;
- regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->ctrl_reg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
- return err;
- }
-
err = of_get_phy_mode(np, &dwmac->interface);
if (err && err != -ENODEV) {
dev_err(dev, "Can't get phy-mode\n");
@@ -321,7 +308,6 @@ static void sti_dwmac_remove(struct platform_device *pdev)
clk_disable_unprepare(dwmac->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int sti_dwmac_suspend(struct device *dev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
@@ -341,10 +327,9 @@ static int sti_dwmac_resume(struct device *dev)
return stmmac_resume(dev);
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
- sti_dwmac_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
+ sti_dwmac_resume);
static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
.fix_retime_src = stih4xx_fix_retime_src,
@@ -361,7 +346,7 @@ static struct platform_driver sti_dwmac_driver = {
.remove = sti_dwmac_remove,
.driver = {
.name = "sti-dwmac",
- .pm = &sti_dwmac_pm_ops,
+ .pm = pm_sleep_ptr(&sti_dwmac_pm_ops),
.of_match_table = sti_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1e8bac665cc9..1fcb74e9e3ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -419,16 +419,11 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
}
/* Get mode register */
- dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ dwmac->regmap = syscon_regmap_lookup_by_phandle_args(np, "st,syscon",
+ 1, &dwmac->mode_reg);
if (IS_ERR(dwmac->regmap))
return PTR_ERR(dwmac->regmap);
- err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err) {
- dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
- return err;
- }
-
if (dwmac->ops->is_mp2)
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c25781874aa7..9ed8620580a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -27,7 +27,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
- u32 clk_rate;
+ unsigned long clk_rate;
value |= GMAC_CORE_INIT;
@@ -420,10 +420,10 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
+static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- int value = et & STMMAC_ET_MAX;
+ u32 value = et & STMMAC_ET_MAX;
int regval;
/* Program LPI entry timer value into register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 1ce6f43d545a..806555976496 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -144,4 +144,7 @@
/* TDS3 use for both format (read and write back) */
#define RDES3_OWN BIT(31)
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index a04a79003692..20027d3c25a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -493,4 +493,9 @@
#define XGMAC_RDES3_TSD BIT(6)
#define XGMAC_RDES3_TSA BIT(4)
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+
#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index a72d336a8350..31bdbab9a46c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -9,6 +9,8 @@
#include "stmmac_fpe.h"
#include "stmmac_ptp.h"
#include "stmmac_est.h"
+#include "dwmac4_descs.h"
+#include "dwxgmac2.h"
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
{
@@ -265,7 +267,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -288,7 +290,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.ptp = &stmmac_ptp_clock_ops,
.mode = NULL,
- .tc = &dwxgmac_tc_ops,
+ .tc = &dwmac510_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 64f8ed67dcc4..0f200b72c225 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -363,7 +363,7 @@ struct stmmac_ops {
void (*set_eee_mode)(struct mac_device_info *hw,
bool en_tx_lpi_clockgating);
void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -665,6 +665,15 @@ struct stmmac_regs_off {
u32 est_off;
};
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_hwtimestamp dwmac1000_ptp;
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+
extern const struct stmmac_ops dwmac100_ops;
extern const struct stmmac_dma_ops dwmac100_dma_ops;
extern const struct stmmac_ops dwmac1000_ops;
@@ -676,14 +685,6 @@ extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
-extern const struct stmmac_tc_ops dwxgmac_tc_ops;
-extern const struct stmmac_ops dwxgmac210_ops;
-extern const struct stmmac_ops dwxlgmac2_ops;
-extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
-extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
-extern const struct stmmac_mmc_ops dwmac_mmc_ops;
-extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
-extern const struct stmmac_est_ops dwmac510_est_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 5d1ea3e07459..1cba39fb2c44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -139,4 +139,7 @@ struct stmmac_counters {
unsigned int mmc_rx_fpe_fragment_cntr;
};
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1d86439b8a14..f05cae103d83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -126,7 +126,7 @@ struct stmmac_rx_queue {
unsigned int cur_rx;
unsigned int dirty_rx;
unsigned int buf_alloc_num;
- u32 rx_zeroc_thresh;
+ unsigned int napi_skb_frag_size;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
@@ -266,7 +266,6 @@ struct stmmac_priv {
int sph_cap;
u32 sarc_type;
- unsigned int rx_copybreak;
u32 rx_riwt[MTL_MAX_TX_QUEUES];
int hwts_rx_en;
@@ -307,11 +306,9 @@ struct stmmac_priv {
int clk_csr;
struct timer_list eee_ctrl_timer;
int lpi_irq;
- int eee_enabled;
- int eee_active;
- int tx_lpi_timer;
- int tx_lpi_enabled;
- int eee_tw_timer;
+ u32 tx_lpi_timer;
+ bool eee_enabled;
+ bool eee_active;
bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
@@ -407,8 +404,6 @@ void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
-void stmmac_disable_eee_mode(struct stmmac_priv *priv);
-bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
@@ -418,14 +413,6 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
return !!priv->xdp_prog;
}
-static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
-{
- if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index 7a858c566e7e..d247fa383a6e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -62,3 +62,5 @@
#define EST_SRWO BIT(0)
#define EST_GCL_DATA 0x00000034
+
+extern const struct stmmac_est_ops dwmac510_est_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1d77389ce953..918a32f8fda8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -654,7 +654,7 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
- if (priv->eee_enabled) {
+ if (priv->dma_cap.eee) {
int val = phylink_get_eee_err(priv->phylink);
if (val)
priv->xstats.phy_eee_wakeup_error_n = val;
@@ -898,9 +898,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->tx_lpi_timer = priv->tx_lpi_timer;
- edata->tx_lpi_enabled = priv->tx_lpi_enabled;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -908,29 +905,11 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
- netdev_warn(priv->dev,
- "Setting EEE tx-lpi is not supported\n");
-
- if (!edata->eee_enabled)
- stmmac_disable_eee_mode(priv);
-
- ret = phylink_ethtool_set_eee(priv->phylink, edata);
- if (ret)
- return ret;
-
- if (edata->eee_enabled &&
- priv->tx_lpi_timer != edata->tx_lpi_timer) {
- priv->tx_lpi_timer = edata->tx_lpi_timer;
- stmmac_eee_init(priv);
- }
-
- return 0;
+ return phylink_ethtool_set_eee(priv->phylink, edata);
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
@@ -1227,43 +1206,6 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
-static int stmmac_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = priv->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int stmmac_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- priv->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static int stmmac_get_mm(struct net_device *ndev,
struct ethtool_mm_state *state)
{
@@ -1390,8 +1332,6 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
.get_channels = stmmac_get_channels,
.set_channels = stmmac_set_channels,
- .get_tunable = stmmac_get_tunable,
- .set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
.get_mm = stmmac_get_mm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c81ea8cdfe6e..49b41148d594 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -77,7 +77,6 @@ module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -107,15 +106,13 @@ static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
-#define STMMAC_RX_COPYBREAK 256
-
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
#define STMMAC_DEFAULT_LPI_TIMER 1000
-static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, 0644);
+static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, uint, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
@@ -197,8 +194,6 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
- if (eee_timer < 0)
- eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -301,7 +296,7 @@ static void stmmac_global_err(struct stmmac_priv *priv)
*/
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
- u32 clk_rate;
+ unsigned long clk_rate;
clk_rate = clk_get_rate(priv->plat->stmmac_clk);
@@ -325,6 +320,10 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_150_250M;
else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
+ else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
+ priv->clk_csr = STMMAC_CSR_300_500M;
+ else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
+ priv->clk_csr = STMMAC_CSR_500_800M;
}
if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
@@ -391,23 +390,17 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
{
- int tx_lpi_timer;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
+}
- /* Clear/set the SW EEE timer flag based on LPI ET enablement */
- priv->eee_sw_timer_en = en ? 0 : 1;
- tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
- stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
+{
+ stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
}
-/**
- * stmmac_enable_eee_mode - check and enter in LPI mode
- * @priv: driver private structure
- * Description: this function is to verify and enter in LPI mode in case of
- * EEE.
- */
-static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -417,29 +410,43 @@ static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
+ return true; /* still unfinished work */
+ }
+
+ return false;
+}
+
+static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
+{
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+}
+
+/**
+ * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
+{
+ if (stmmac_eee_tx_busy(priv)) {
+ stmmac_restart_sw_lpi_timer(priv);
+ return;
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
- return 0;
}
/**
- * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * stmmac_stop_sw_lpi - stop transmitting LPI
* @priv: driver private structure
- * Description: this function is to exit and disable EEE in case of
- * LPI state is true. This is called by the xmit.
+ * Description: When using software-controlled LPI, stop transmitting LPI state.
*/
-void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
{
- if (!priv->eee_sw_timer_en) {
- stmmac_lpi_entry_timer_config(priv, 0);
- return;
- }
-
stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
@@ -456,25 +463,27 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ stmmac_try_to_start_sw_lpi(priv);
}
/**
* stmmac_eee_init - init EEE
* @priv: driver private structure
+ * @active: indicates whether EEE should be enabled.
* Description:
* if the GMAC supports the EEE (from the HW cap reg) and the phy device
* can also manage EEE, this function enable the LPI state and start related
* timer.
*/
-bool stmmac_eee_init(struct stmmac_priv *priv)
+static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
{
- int eee_tw_timer = priv->eee_tw_timer;
+ priv->eee_active = active;
/* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee)
- return false;
+ if (!priv->dma_cap.eee) {
+ priv->eee_enabled = false;
+ return;
+ }
mutex_lock(&priv->lock);
@@ -482,22 +491,24 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (!priv->eee_active) {
if (priv->eee_enabled) {
netdev_dbg(priv->dev, "disable EEE\n");
- stmmac_lpi_entry_timer_config(priv, 0);
+ priv->eee_sw_timer_en = false;
+ stmmac_disable_hw_lpi_timer(priv);
del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0,
+ STMMAC_DEFAULT_TWT_LS);
if (priv->hw->xpcs)
xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
false);
}
+ priv->eee_enabled = false;
mutex_unlock(&priv->lock);
- return false;
+ return;
}
if (priv->eee_active && !priv->eee_enabled) {
- timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- eee_tw_timer);
+ STMMAC_DEFAULT_TWT_LS);
if (priv->hw->xpcs)
xpcs_config_eee(priv->hw->xpcs,
priv->plat->mult_fact_100ns,
@@ -505,18 +516,22 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
}
if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ /* Use hardware LPI mode */
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
- stmmac_lpi_entry_timer_config(priv, 1);
+ priv->eee_sw_timer_en = false;
+ stmmac_enable_hw_lpi_timer(priv);
} else {
- stmmac_lpi_entry_timer_config(priv, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(priv->tx_lpi_timer));
+ /* Use software LPI mode */
+ priv->eee_sw_timer_en = true;
+ stmmac_disable_hw_lpi_timer(priv);
+ stmmac_restart_sw_lpi_timer(priv);
}
+ priv->eee_enabled = true;
+
mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
- return true;
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -973,9 +988,7 @@ static void stmmac_mac_link_down(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
stmmac_mac_set(priv, priv->ioaddr, false);
- priv->eee_active = false;
- priv->tx_lpi_enabled = false;
- priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_eee_init(priv, false);
stmmac_set_eee_pls(priv, priv->hw, false);
if (stmmac_fpe_supported(priv))
@@ -1084,11 +1097,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
- priv->eee_active =
- phy_init_eee(phy, !(priv->plat->flags &
- STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
- priv->eee_enabled = stmmac_eee_init(priv);
- priv->tx_lpi_enabled = priv->eee_enabled;
+ phy_eee_rx_clock_stop(phy, !(priv->plat->flags &
+ STMMAC_FLAG_RX_CLK_RUNS_IN_LPI));
+ priv->tx_lpi_timer = phy->eee_cfg.tx_lpi_timer;
+ stmmac_eee_init(priv, phy->enable_tx_lpi);
stmmac_set_eee_pls(priv, priv->hw, true);
}
@@ -1186,6 +1198,16 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
+ if (ret == 0) {
+ struct ethtool_keee eee;
+
+ /* Configure phylib's copy of the LPI timer */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
+ }
+ }
+
if (!priv->plat->pmt) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -1202,6 +1224,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
struct stmmac_mdio_bus_data *mdio_bus_data;
int mode = priv->plat->phy_interface;
struct fwnode_handle *fwnode;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
@@ -1223,8 +1246,14 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
/* If we have an xpcs, it defines which PHY interfaces are supported. */
if (priv->hw->xpcs)
- xpcs_get_interfaces(priv->hw->xpcs,
- priv->phylink_config.supported_interfaces);
+ pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
+ else
+ pcs = priv->hw->phylink_pcs;
+
+ if (pcs)
+ phy_interface_or(priv->phylink_config.supported_interfaces,
+ priv->phylink_config.supported_interfaces,
+ pcs->supported_interfaces);
fwnode = priv->plat->port_node;
if (!fwnode)
@@ -1307,6 +1336,14 @@ static void stmmac_display_rings(struct stmmac_priv *priv,
stmmac_display_tx_rings(priv, dma_conf);
}
+static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return NET_SKB_PAD;
+}
+
static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
@@ -2003,22 +2040,26 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
- unsigned int num_pages;
+ unsigned int dma_buf_sz_pad, num_pages;
unsigned int napi_id;
int ret;
+ dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
+
rx_q->queue_index = queue;
rx_q->priv_data = priv;
+ rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = dma_conf->dma_rx_size;
- num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
+ pp_params.order = order_base_2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
pp_params.offset = stmmac_rx_offset(priv);
- pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+ pp_params.max_len = dma_conf->dma_buf_sz;
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
@@ -2757,11 +2798,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
xmits = budget;
}
- if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
- priv->eee_sw_timer_en) {
- if (stmmac_enable_eee_mode(priv))
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
- }
+ if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
+ stmmac_restart_sw_lpi_timer(priv);
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
@@ -3436,12 +3474,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
else if (ptp_register)
stmmac_ptp_register(priv);
- priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
-
- /* Convert the timer from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
if (priv->use_riwt) {
u32 queue;
@@ -3908,6 +3940,10 @@ static int __stmmac_open(struct net_device *dev,
u32 chan;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
ret = pm_runtime_resume_and_get(priv->device);
if (ret < 0)
return ret;
@@ -3923,8 +3959,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
@@ -4024,11 +4058,6 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- del_timer_sync(&priv->eee_ctrl_timer);
- }
-
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@@ -4117,11 +4146,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
desc = &tx_q->dma_tx[tx_q->cur_tx];
curr_addr = des + (total_len - tmp_len);
- if (priv->dma_cap.addr64 <= 32)
- desc->des0 = cpu_to_le32(curr_addr);
- else
- stmmac_set_desc_addr(priv, desc, curr_addr);
-
+ stmmac_set_desc_addr(priv, desc, curr_addr);
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -4167,17 +4192,27 @@ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
* First Descriptor
* --------
* | DES0 |---> buffer1 = L2/L3/L4 header
- * | DES1 |---> TCP Payload (can continue on next descr...)
- * | DES2 |---> buffer 1 and 2 len
+ * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
+ * | | width is 32-bit, but we never use it.
+ * | | Also can be used as the most-significant 8-bits or 16-bits of
+ * | | buffer1 address pointer if the DMA AXI address width is 40-bit
+ * | | or 48-bit, and we always use it.
+ * | DES2 |---> buffer1 len
* | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
* --------
+ * --------
+ * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
+ * | DES3 |
+ * --------
* |
* ...
* |
* --------
- * | DES0 | --| Split TCP Payload on Buffers 1 and 2
- * | DES1 | --|
- * | DES2 | --> buffer 1 and 2 len
+ * | DES0 |---> buffer1 = Split TCP Payload
+ * | DES1 |---> same as the First Descriptor
+ * | DES2 |---> buffer1 len
* | DES3 |
* --------
*
@@ -4187,15 +4222,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
u32 pay_len, mss, queue;
- dma_addr_t tso_des, des;
+ int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
+ dma_addr_t des;
bool set_ic;
- int i;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4280,24 +4314,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- if (priv->dma_cap.addr64 <= 32) {
- first->des0 = cpu_to_le32(des);
-
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
-
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
- tso_des = des;
- } else {
- stmmac_set_desc_addr(priv, first, des);
- tmp_pay_len = pay_len;
- tso_des = des + proto_hdr_len;
- pay_len = 0;
- }
-
- stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
+ stmmac_set_desc_addr(priv, first, des);
+ stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
+ (nfrags == 0), queue);
/* In case two or more DMA transmit descriptors are allocated for this
* non-paged SKB data, the DMA buffer address should be saved to
@@ -4401,11 +4420,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Complete the first descriptor before granting the DMA */
- stmmac_prepare_tso_tx_desc(priv, first, 1,
- proto_hdr_len,
- pay_len,
- 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
+ tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -4492,7 +4509,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- stmmac_disable_eee_mode(priv);
+ stmmac_stop_sw_lpi(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
@@ -5473,7 +5490,7 @@ read_again:
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
buf->page = NULL;
error = 1;
if (!priv->hwts_rx_en)
@@ -5491,10 +5508,6 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page) + buf->page_offset);
- if (buf->sec_page)
- prefetch(page_address(buf->sec_page));
-
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5516,6 +5529,8 @@ read_again:
dma_sync_single_for_cpu(priv->device, buf->addr,
buf1_len, dma_dir);
+ net_prefetch(page_address(buf->page) +
+ buf->page_offset);
xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
@@ -5569,22 +5584,26 @@ read_again:
}
if (!skb) {
+ unsigned int head_pad_len;
+
/* XDP program may expand or reduce tail */
buf1_len = ctx.xdp.data_end - ctx.xdp.data;
- skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ skb = napi_build_skb(page_address(buf->page),
+ rx_q->napi_skb_frag_size);
if (!skb) {
+ page_pool_recycle_direct(rx_q->page_pool,
+ buf->page);
rx_dropped++;
count++;
goto drain_data;
}
/* XDP program may adjust header */
- skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
+ head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
+ skb_reserve(skb, head_pad_len);
skb_put(skb, buf1_len);
-
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ skb_mark_for_recycle(skb);
buf->page = NULL;
} else if (buf1_len) {
dma_sync_single_for_cpu(priv->device, buf->addr,
@@ -7407,6 +7426,8 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7714,7 +7735,7 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- if (priv->eee_enabled) {
+ if (priv->eee_sw_timer_en) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index dc9884130b91..d0e61aa1a495 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -626,7 +626,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_info(&pdev->dev, "PTP uses main clock\n");
} else {
plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
- dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ dev_dbg(&pdev->dev, "PTP rate %lu\n", plat->clk_ptp_rate);
}
plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4cc70480ce0f..3fe0e3a80e80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -104,4 +104,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time);
void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv);
+extern const struct ptp_clock_info stmmac_ptp_clock_ops;
+extern const struct ptp_clock_info dwmac1000_ptp_clock_ops;
+
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 6a79e6a111ed..694d6ee14381 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1284,14 +1284,3 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.query_caps = tc_query_caps,
.setup_mqprio = tc_setup_dwmac510_mqprio,
};
-
-const struct stmmac_tc_ops dwxgmac_tc_ops = {
- .init = tc_init,
- .setup_cls_u32 = tc_setup_cls_u32,
- .setup_cbs = tc_setup_cbs,
- .setup_cls = tc_setup_cls,
- .setup_taprio = tc_setup_taprio,
- .setup_etf = tc_setup_etf,
- .query_caps = tc_query_caps,
- .setup_mqprio = tc_setup_dwmac510_mqprio,
-};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
index 896dc987d4ef..77ce8cfbe976 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -4,7 +4,6 @@
#ifndef _STMMAC_XDP_H_
#define _STMMAC_XDP_H_
-#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df6d35d41b97..d7459866d24c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3303,7 +3303,7 @@ static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
addr &= PAGE_MASK;
pp = &rp->rxhash[h];
for (; (p = *pp) != NULL; pp = &niu_next_page(p)) {
- if (p->index == addr) {
+ if (p->private == addr) {
*link = pp;
goto found;
}
@@ -3318,7 +3318,7 @@ static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
{
unsigned int h = niu_hash_rxaddr(rp, base);
- page->index = base;
+ page->private = base;
niu_next_page(page) = rp->rxhash[h];
rp->rxhash[h] = page;
}
@@ -3400,11 +3400,11 @@ static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
RCR_ENTRY_PKTBUFSZ_SHIFT];
- if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+ if ((page->private + PAGE_SIZE) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
rp->rbr_refill_pending++;
@@ -3469,11 +3469,11 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
- if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+ if ((page->private + rp->rbr_block_size) - rcr_size == addr) {
*link = niu_next_page(page);
- np->ops->unmap_page(np->device, page->index,
+ np->ops->unmap_page(np->device, page->private,
PAGE_SIZE, DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
rp->rbr_refill_pending++;
} else
@@ -3538,11 +3538,11 @@ static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
page = rp->rxhash[i];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
np->ops->unmap_page(np->device, base, PAGE_SIZE,
DMA_FROM_DEVICE);
- page->index = 0;
+ page->private = 0;
niu_next_page(page) = NULL;
__free_page(page);
@@ -6460,7 +6460,7 @@ static void niu_reset_buffers(struct niu *np)
page = rp->rxhash[j];
while (page) {
struct page *next = niu_next_page(page);
- u64 base = page->index;
+ u64 base = page->private;
base = base >> RBR_DESCR_ADDR_SHIFT;
rp->rbr[k++] = cpu_to_le32(base);
page = next;
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 721d8ed3f302..5e0e4c9ecbb0 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -199,7 +199,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
- .ndo_do_ioctl = phy_do_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 5465bf872734..3fd8fc3121dc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -32,6 +32,7 @@
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
#include <net/page_pool/helpers.h>
+#include <net/dsa.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -762,7 +763,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -1014,6 +1015,15 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
common->usage_count++;
+ /* VLAN aware CPSW mode is incompatible with some DSA tagging schemes.
+ * Therefore disable VLAN_AWARE mode if any of the ports is a DSA Port.
+ */
+ if (netdev_uses_dsa(ndev)) {
+ reg = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+ reg &= ~AM65_CPSW_CTL_VLAN_AWARE;
+ writel(reg, common->cpsw_base + AM65_CPSW_REG_CTL);
+ }
+
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
am65_cpsw_port_enable_dscp_map(port);
@@ -2559,20 +2569,15 @@ static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
{
u32 mac_lo, mac_hi, offset;
struct regmap *syscon;
- int ret;
- syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ syscon = syscon_regmap_lookup_by_phandle_args(of_node, "ti,syscon-efuse",
+ 1, &offset);
if (IS_ERR(syscon)) {
if (PTR_ERR(syscon) == -ENODEV)
return 0;
return PTR_ERR(syscon);
}
- ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
- &offset);
- if (ret)
- return ret;
-
regmap_read(syscon, offset, &mac_lo);
regmap_read(syscon, offset + 4, &mac_hi);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4ef8cf6ea135..0cb6fa6e5b7d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -635,6 +635,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -684,7 +686,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -1225,7 +1227,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 21d55a180ef6..bdc4db0d169c 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -434,18 +434,6 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
int cpsw_nway_reset(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a98bcc5eb566..cec0a90659d9 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -554,7 +554,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
soft_reset("cpsw", &cpsw->regs->soft_reset);
cpsw_ale_start(cpsw->ale);
- /* switch to vlan unaware mode */
+ /* switch to vlan aware mode */
cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&cpsw->regs->control);
@@ -778,6 +778,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = phy;
+ phy_disable_eee(slave->phy);
+
phy_attached_info(slave->phy);
phy_start(slave->phy);
@@ -1209,7 +1211,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_link_ksettings = cpsw_get_link_ksettings,
.set_link_ksettings = cpsw_set_link_ksettings,
.get_eee = cpsw_get_eee,
- .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 1f448290b9f4..f2fc55d9295d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -497,7 +497,6 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index d76fe6d05e10..00ed97860547 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -561,61 +561,134 @@ const struct icss_iep_clockops prueth_iep_clockops = {
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ int port_mask;
+ u8 vlan_id;
- port_mask |= icssg_fdb_lookup(emac, addr, 0);
- icssg_fdb_add_del(emac, addr, 0, port_mask, true);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
+
+ port_mask = BIT(emac->port_id) | icssg_fdb_lookup(emac, addr, vlan_id);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, true);
return 0;
}
static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- int port_mask = BIT(emac->port_id);
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
int other_port_mask;
+ int port_mask;
+ u8 vlan_id;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_MAC;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ emac = netdev_priv(real_dev);
- other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+ port_mask = BIT(emac->port_id);
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, vlan_id);
- icssg_fdb_add_del(emac, addr, 0, port_mask, false);
- icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+ icssg_fdb_add_del(emac, addr, vlan_id, port_mask, false);
+ icssg_vtbl_modify(emac, vlan_id, port_mask, port_mask, false);
if (other_port_mask) {
- icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
- icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ icssg_fdb_add_del(emac, addr, vlan_id, other_port_mask, true);
+ icssg_vtbl_modify(emac, vlan_id, other_port_mask,
+ other_port_mask, true);
}
return 0;
}
-static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
+ const u8 *addr, u8 vid, bool add)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
-
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
+ icssg_fdb_add_del(emac, addr, vid,
ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, true);
+ ICSSG_FDB_ENTRY_BLOCK, add);
+
+ if (add)
+ icssg_vtbl_modify(emac, vid, BIT(emac->port_id),
+ BIT(emac->port_id), add);
+}
+
+static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ true);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, true);
+ }
- icssg_vtbl_modify(emac, emac->port_vlan, BIT(emac->port_id),
- BIT(emac->port_id), true);
return 0;
}
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct prueth *prueth = emac->prueth;
+ struct net_device *real_dev;
+ struct prueth_emac *emac;
+ u8 vlan_id, i;
+
+ vlan_id = is_vlan_dev(ndev) ? vlan_dev_vlan_id(ndev) : PRUETH_DFLT_VLAN_HSR;
+ real_dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+
+ if (is_hsr_master(real_dev)) {
+ for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
+ emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
+ if (!emac)
+ return -EINVAL;
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
+ false);
+ }
+ } else {
+ emac = netdev_priv(real_dev);
+ icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id, false);
+ }
- icssg_fdb_add_del(emac, addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK, false);
+ return 0;
+}
+
+static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
+ void *args)
+{
+ struct prueth_emac *emac = args;
+
+ if (!vdev || !vid)
+ return 0;
+
+ netif_addr_lock_bh(vdev);
+ __hw_addr_sync_multiple(&emac->vlan_mcast_list[vid], &vdev->mc,
+ vdev->addr_len);
+ netif_addr_unlock_bh(vdev);
+
+ if (emac->prueth->is_hsr_offload_mode)
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_hsr_add_mcast,
+ icssg_prueth_hsr_del_mcast);
+ else
+ __hw_addr_sync_dev(&emac->vlan_mcast_list[vid], vdev,
+ icssg_prueth_add_mcast,
+ icssg_prueth_del_mcast);
return 0;
}
@@ -857,12 +930,22 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
return;
}
- if (emac->prueth->is_hsr_offload_mode)
+ if (emac->prueth->is_hsr_offload_mode) {
__dev_mc_sync(ndev, icssg_prueth_hsr_add_mcast,
icssg_prueth_hsr_del_mcast);
- else
+ if (rtnl_trylock()) {
+ vlan_for_each(emac->prueth->hsr_dev,
+ icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ } else {
__dev_mc_sync(ndev, icssg_prueth_add_mcast,
icssg_prueth_del_mcast);
+ if (rtnl_trylock()) {
+ vlan_for_each(ndev, icssg_update_vlan_mcast, emac);
+ rtnl_unlock();
+ }
+ }
}
/**
@@ -907,19 +990,19 @@ static int emac_ndo_vlan_rx_add_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
- port_mask = BIT(PRUETH_PORT_HOST) | BIT(emac->port_id);
- untag_mask = 0;
+ if (prueth->is_hsr_offload_mode)
+ port_mask |= BIT(PRUETH_PORT_HOST);
- netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ __hw_addr_init(&emac->vlan_mcast_list[vid]);
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
- icssg_set_pvid(emac->prueth, vid, emac->port_id);
- }
return 0;
}
@@ -928,18 +1011,16 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int port_mask = BIT(emac->port_id);
int untag_mask = 0;
- int port_mask;
- if (prueth->is_hsr_offload_mode) {
+ if (prueth->is_hsr_offload_mode)
port_mask = BIT(PRUETH_PORT_HOST);
- untag_mask = 0;
- netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
- vid, port_mask, untag_mask);
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X untag_mask %X\n",
+ vid, port_mask, untag_mask);
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- icssg_vtbl_modify(emac, vid, port_mask, untag_mask, false);
- }
return 0;
}
@@ -1254,7 +1335,7 @@ static int prueth_netdevice_port_link(struct net_device *ndev,
if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
prueth->br_members & BIT(PRUETH_PORT_MII1)) {
prueth->is_switch_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_SW;
emac->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
}
@@ -1312,7 +1393,7 @@ static int prueth_hsr_port_link(struct net_device *ndev)
NETIF_PRUETH_HSR_OFFLOAD_FEATURES))
return -EOPNOTSUPP;
prueth->is_hsr_offload_mode = true;
- prueth->default_vlan = 1;
+ prueth->default_vlan = PRUETH_DFLT_VLAN_HSR;
emac0->port_vlan = prueth->default_vlan;
emac1->port_vlan = prueth->default_vlan;
icssg_change_mode(prueth);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 5473315ea204..329b46e9ee53 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -83,6 +83,12 @@
#define ICSS_CMD_ADD_FILTER 0x7
#define ICSS_CMD_ADD_MAC 0x8
+/* VLAN Filtering Related MACROs */
+#define PRUETH_DFLT_VLAN_HSR 1
+#define PRUETH_DFLT_VLAN_SW 1
+#define PRUETH_DFLT_VLAN_MAC 0
+#define MAX_VLAN_ID 256
+
/* In switch mode there are 3 real ports i.e. 3 mac addrs.
* however Linux sees only the host side port. The other 2 ports
* are the switch ports.
@@ -200,6 +206,8 @@ struct prueth_emac {
/* RX IRQ Coalescing Related */
struct hrtimer rx_hrtimer;
unsigned long rx_pace_timeout_ns;
+
+ struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
};
/**
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 3dc86397c367..64a19ff39562 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -1031,8 +1031,6 @@ static int prueth_probe(struct platform_device *pdev)
(unsigned long)prueth->msmcram.va);
prueth->msmcram.size = msmc_ram_size;
memset_io(prueth->msmcram.va, 0, msmc_ram_size);
- dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
- prueth->msmcram.va, prueth->msmcram.size);
prueth->iep0 = icss_iep_get_idx(np, 0);
if (IS_ERR(prueth->iep0)) {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index dd4a07c97eee..5aa93144a4f5 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2320,7 +2320,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
if (ret < 0)
goto out_free_tmp_vptr_1;
- napi_disable(&vptr->napi);
+ netdev_lock(dev);
+ napi_disable_locked(&vptr->napi);
spin_lock_irqsave(&vptr->lock, flags);
@@ -2342,12 +2343,13 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
velocity_give_many_rx_descs(vptr);
- napi_enable(&vptr->napi);
+ napi_enable_locked(&vptr->napi);
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
spin_unlock_irqrestore(&vptr->lock, flags);
+ netdev_unlock(dev);
velocity_free_rings(tmp_vptr);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ae743991117c..07850ad331dd 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2337,11 +2337,12 @@ static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
}
static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_get_state(pcs_phy, state);
+ phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
}
static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index bc658bc60885..642155cb8315 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -235,7 +235,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
/* Update tunnel dst according to Geneve options. */
@@ -322,7 +322,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
len = skb->len;
err = gro_cells_receive(&geneve->gro_cells, skb);
if (likely(err == NET_RX_SUCCESS))
- dev_sw_netstats_rx_add(geneve->dev, len);
+ dev_dstats_rx_add(geneve->dev, len);
return;
drop:
@@ -387,14 +387,14 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely((!geneve->cfg.inner_proto_inherit &&
inner_proto != htons(ETH_P_TEB)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
opts_len = geneveh->opt_len * 4;
if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto,
!net_eq(geneve->net, dev_net(geneve->dev)))) {
- DEV_STATS_INC(geneve->dev, rx_dropped);
+ dev_dstats_rx_dropped(geneve->dev);
goto drop;
}
@@ -1023,7 +1023,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
dev_kfree_skb(skb);
- DEV_STATS_INC(dev, tx_dropped);
+ dev_dstats_tx_dropped(dev);
return NETDEV_TX_OK;
}
} else {
@@ -1202,7 +1202,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index ee2c3cf4df36..da3a97a65507 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -799,6 +799,12 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_NOTIFY_PEERS:
+ case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ call_netdevice_notifiers(event, ipvlan->dev);
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1993b90b1a5f..c8840c3b9a1b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -264,13 +264,12 @@ static int __init blackhole_netdev_init(void)
if (!blackhole_netdev)
return -ENOMEM;
- rtnl_lock();
+ rtnl_net_lock(&init_net);
dev_init_scheduler(blackhole_netdev);
dev_activate(blackhole_netdev);
- rtnl_unlock();
+ rtnl_net_unlock(&init_net);
blackhole_netdev->flags |= IFF_UP | IFF_RUNNING;
- dev_net_set(blackhole_netdev, &init_net);
return 0;
}
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index d2b3f5a59141..e3dcdeacc12c 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -177,8 +177,7 @@ static struct mctp_i2c_client *mctp_i2c_new_client(struct i2c_client *client)
return mcli;
err:
if (mcli) {
- if (mcli->client)
- i2c_unregister_device(mcli->client);
+ i2c_unregister_device(mcli->client);
kfree(mcli);
}
return ERR_PTR(rc);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 2beb83154d39..cb53dccbde1a 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -17,37 +17,20 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
{
struct cavium_mdiobus *bus;
struct mii_bus *mii_bus;
- struct resource *res_mem;
- resource_size_t mdio_phys;
- resource_size_t regsize;
union cvmx_smix_en smi_en;
- int err = -ENOENT;
+ int err;
mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus));
if (!mii_bus)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "found no memory resource\n");
- return -ENXIO;
- }
-
bus = mii_bus->priv;
bus->mii_bus = mii_bus;
- mdio_phys = res_mem->start;
- regsize = resource_size(res_mem);
- if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize,
- res_mem->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- return -ENXIO;
- }
-
- bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize);
- if (!bus->register_base) {
+ bus->register_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bus->register_base)) {
dev_err(&pdev->dev, "dev_ioremap failed\n");
- return -ENOMEM;
+ return PTR_ERR(bus->register_base);
}
smi_en.u64 = 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4ea44a2f48f7..86ab4a42769a 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -36,6 +36,7 @@
#include <linux/inet.h>
#include <linux/configfs.h>
#include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
#include <linux/utsname.h>
#include <linux/rtnetlink.h>
@@ -90,6 +91,12 @@ static DEFINE_MUTEX(target_cleanup_list_lock);
*/
static struct console netconsole_ext;
+struct netconsole_target_stats {
+ u64_stats_t xmit_drop_count;
+ u64_stats_t enomem_count;
+ struct u64_stats_sync syncp;
+};
+
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
@@ -97,6 +104,7 @@ static struct console netconsole_ext;
* @userdata_group: Links to the userdata configfs hierarchy
* @userdata_complete: Cached, formatted string of append
* @userdata_length: String length of userdata_complete
+ * @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -124,6 +132,7 @@ struct netconsole_target {
char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
size_t userdata_length;
#endif
+ struct netconsole_target_stats stats;
bool enabled;
bool extended;
bool release;
@@ -262,6 +271,7 @@ static void netconsole_process_cleanups_core(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | transmit_errors
* | userdata/
* | <key>/
* | value
@@ -371,6 +381,21 @@ static ssize_t remote_mac_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac);
}
+static ssize_t transmit_errors_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item);
+ u64 xmit_drop_count, enomem_count;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&nt->stats.syncp);
+ xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count);
+ enomem_count = u64_stats_read(&nt->stats.enomem_count);
+ } while (u64_stats_fetch_retry(&nt->stats.syncp, start));
+
+ return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -705,7 +730,7 @@ static void update_userdata(struct netconsole_target *nt)
struct userdatum *udm_item;
struct config_item *item;
- if (child_count >= MAX_USERDATA_ITEMS)
+ if (WARN_ON_ONCE(child_count >= MAX_USERDATA_ITEMS))
break;
child_count++;
@@ -842,6 +867,7 @@ CONFIGFS_ATTR(, remote_ip);
CONFIGFS_ATTR_RO(, local_mac);
CONFIGFS_ATTR(, remote_mac);
CONFIGFS_ATTR(, release);
+CONFIGFS_ATTR_RO(, transmit_errors);
static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_enabled,
@@ -854,6 +880,7 @@ static struct configfs_attribute *netconsole_target_attrs[] = {
&attr_remote_ip,
&attr_local_mac,
&attr_remote_mac,
+ &attr_transmit_errors,
NULL,
};
@@ -1058,6 +1085,33 @@ static struct notifier_block netconsole_netdev_notifier = {
.notifier_call = netconsole_netdev_event,
};
+/**
+ * send_udp - Wrapper for netpoll_send_udp that counts errors
+ * @nt: target to send message to
+ * @msg: message to send
+ * @len: length of message
+ *
+ * Calls netpoll_send_udp and classifies the return value. If an error
+ * occurred it increments statistics in nt->stats accordingly.
+ * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled.
+ */
+static void send_udp(struct netconsole_target *nt, const char *msg, int len)
+{
+ int result = netpoll_send_udp(&nt->np, msg, len);
+
+ if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) {
+ if (result == NET_XMIT_DROP) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.xmit_drop_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ } else if (result == -ENOMEM) {
+ u64_stats_update_begin(&nt->stats.syncp);
+ u64_stats_inc(&nt->stats.enomem_count);
+ u64_stats_update_end(&nt->stats.syncp);
+ }
+ }
+}
+
static void send_msg_no_fragmentation(struct netconsole_target *nt,
const char *msg,
int msg_len,
@@ -1085,7 +1139,7 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
MAX_PRINT_CHUNK - msg_len,
"%s", userdata);
- netpoll_send_udp(&nt->np, buf, msg_len);
+ send_udp(nt, buf, msg_len);
}
static void append_release(char *buf)
@@ -1178,7 +1232,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
this_offset += this_chunk;
}
- netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ send_udp(nt, buf, this_header + this_offset);
offset += this_offset;
}
}
@@ -1288,7 +1342,7 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
tmp = msg;
for (left = len; left;) {
frag = min(left, MAX_PRINT_CHUNK);
- netpoll_send_udp(&nt->np, tmp, frag);
+ send_udp(nt, tmp, frag);
tmp += frag;
left -= frag;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5fe1eaef99b5..12163635b759 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -2,7 +2,6 @@
// Copyright (c) 2020 Facebook
#include <linux/debugfs.h>
-#include <linux/ethtool.h>
#include <linux/random.h>
#include "netdevsim.h"
@@ -72,6 +71,12 @@ static void nsim_get_ringparam(struct net_device *dev,
struct netdevsim *ns = netdev_priv(dev);
memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
+ kernel_ring->tcp_data_split = dev->ethtool->hds_config;
+ kernel_ring->hds_thresh = dev->ethtool->hds_thresh;
+ kernel_ring->hds_thresh_max = NSIM_HDS_THRESHOLD_MAX;
+
+ if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
}
static int nsim_set_ringparam(struct net_device *dev,
@@ -103,10 +108,10 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct netdevsim *ns = netdev_priv(dev);
int err;
- mutex_lock(&dev->lock);
+ netdev_lock(dev);
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
- mutex_unlock(&dev->lock);
+ netdev_unlock(dev);
if (err)
return err;
@@ -161,6 +166,8 @@ static int nsim_get_ts_info(struct net_device *dev,
static const struct ethtool_ops nsim_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_pause_stats = nsim_get_pause_stats,
.get_pauseparam = nsim_get_pauseparam,
.set_pauseparam = nsim_set_pauseparam,
@@ -182,6 +189,9 @@ static void nsim_ethtool_ring_init(struct netdevsim *ns)
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
ns->ethtool.ring.tx_max_pending = 4096;
+
+ ns->netdev->ethtool->hds_config = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+ ns->netdev->ethtool->hds_thresh = 0;
}
void nsim_ethtool_init(struct netdevsim *ns)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e068a9761c09..f92b05ccdca9 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -15,11 +15,13 @@
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/net_shaper.h>
@@ -29,6 +31,8 @@
#include "netdevsim.h"
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
+
#define NSIM_RING_SIZE 256
static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
@@ -51,6 +55,7 @@ static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct ethtool_netdev_state *ethtool;
struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
@@ -69,7 +74,14 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
rxq = skb_get_queue_mapping(skb);
if (rxq >= peer_dev->num_rx_queues)
rxq = rxq % peer_dev->num_rx_queues;
- rq = &peer_ns->rq[rxq];
+ rq = peer_ns->rq[rxq];
+
+ ethtool = peer_dev->ethtool;
+ if (skb_is_nonlinear(skb) &&
+ (ethtool->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED ||
+ (ethtool->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ ethtool->hds_thresh > len)))
+ skb_linearize(skb);
skb_tx_timestamp(skb);
if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
@@ -359,25 +371,24 @@ static int nsim_poll(struct napi_struct *napi, int budget)
return done;
}
-static int nsim_create_page_pool(struct nsim_rq *rq)
+static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi)
{
- struct page_pool_params p = {
+ struct page_pool_params params = {
.order = 0,
.pool_size = NSIM_RING_SIZE,
.nid = NUMA_NO_NODE,
- .dev = &rq->napi.dev->dev,
- .napi = &rq->napi,
+ .dev = &napi->dev->dev,
+ .napi = napi,
.dma_dir = DMA_BIDIRECTIONAL,
- .netdev = rq->napi.dev,
+ .netdev = napi->dev,
};
+ struct page_pool *pool;
- rq->page_pool = page_pool_create(&p);
- if (IS_ERR(rq->page_pool)) {
- int err = PTR_ERR(rq->page_pool);
+ pool = page_pool_create(&params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- rq->page_pool = NULL;
- return err;
- }
+ *p = pool;
return 0;
}
@@ -388,15 +399,15 @@ static int nsim_init_napi(struct netdevsim *ns)
int err, i;
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- netif_napi_add(dev, &rq->napi, nsim_poll);
+ netif_napi_add_config(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
- rq = &ns->rq[i];
+ rq = ns->rq[i];
- err = nsim_create_page_pool(rq);
+ err = nsim_create_page_pool(&rq->page_pool, &rq->napi);
if (err)
goto err_pp_destroy;
}
@@ -405,12 +416,12 @@ static int nsim_init_napi(struct netdevsim *ns)
err_pp_destroy:
while (i--) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i].napi);
+ __netif_napi_del(&ns->rq[i]->napi);
return err;
}
@@ -421,7 +432,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
napi_enable(&rq->napi);
@@ -448,7 +459,7 @@ static void nsim_del_napi(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++) {
- struct nsim_rq *rq = &ns->rq[i];
+ struct nsim_rq *rq = ns->rq[i];
napi_disable(&rq->napi);
__netif_napi_del(&rq->napi);
@@ -456,8 +467,8 @@ static void nsim_del_napi(struct netdevsim *ns)
synchronize_net();
for (i = 0; i < dev->num_rx_queues; i++) {
- page_pool_destroy(ns->rq[i].page_pool);
- ns->rq[i].page_pool = NULL;
+ page_pool_destroy(ns->rq[i]->page_pool);
+ ns->rq[i]->page_pool = NULL;
}
}
@@ -595,6 +606,182 @@ static const struct netdev_stat_ops nsim_stat_ops = {
.get_base_stats = nsim_get_base_stats,
};
+static struct nsim_rq *nsim_queue_alloc(void)
+{
+ struct nsim_rq *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL_ACCOUNT);
+ if (!rq)
+ return NULL;
+
+ skb_queue_head_init(&rq->skb_queue);
+ return rq;
+}
+
+static void nsim_queue_free(struct nsim_rq *rq)
+{
+ skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
+ kfree(rq);
+}
+
+/* Queue reset mode is controlled by ns->rq_reset_mode.
+ * - normal - new NAPI new pool (old NAPI enabled when new added)
+ * - mode 1 - allocate new pool (NAPI is only disabled / enabled)
+ * - mode 2 - new NAPI new pool (old NAPI removed before new added)
+ * - mode 3 - new NAPI new pool (old NAPI disabled when new added)
+ */
+struct nsim_queue_mem {
+ struct nsim_rq *rq;
+ struct page_pool *pp;
+};
+
+static int
+nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ if (ns->rq_reset_mode > 3)
+ return -EINVAL;
+
+ if (ns->rq_reset_mode == 1)
+ return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+
+ qmem->rq = nsim_queue_alloc();
+ if (!qmem->rq)
+ return -ENOMEM;
+
+ err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi);
+ if (err)
+ goto err_free;
+
+ if (!ns->rq_reset_mode)
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+
+ return 0;
+
+err_free:
+ nsim_queue_free(qmem->rq);
+ return err;
+}
+
+static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ page_pool_destroy(qmem->pp);
+ if (qmem->rq) {
+ if (!ns->rq_reset_mode)
+ netif_napi_del(&qmem->rq->napi);
+ page_pool_destroy(qmem->rq->page_pool);
+ nsim_queue_free(qmem->rq);
+ }
+}
+
+static int
+nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->rq_reset_mode == 1) {
+ ns->rq[idx]->page_pool = qmem->pp;
+ napi_enable(&ns->rq[idx]->napi);
+ return 0;
+ }
+
+ /* netif_napi_add()/_del() should normally be called from alloc/free,
+ * here we want to test various call orders.
+ */
+ if (ns->rq_reset_mode == 2) {
+ netif_napi_del(&ns->rq[idx]->napi);
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ } else if (ns->rq_reset_mode == 3) {
+ netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_del(&ns->rq[idx]->napi);
+ }
+
+ ns->rq[idx] = qmem->rq;
+ napi_enable(&ns->rq[idx]->napi);
+
+ return 0;
+}
+
+static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
+{
+ struct nsim_queue_mem *qmem = per_queue_mem;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ napi_disable(&ns->rq[idx]->napi);
+
+ if (ns->rq_reset_mode == 1) {
+ qmem->pp = ns->rq[idx]->page_pool;
+ page_pool_disable_direct_recycling(qmem->pp);
+ } else {
+ qmem->rq = ns->rq[idx];
+ }
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct nsim_queue_mem),
+ .ndo_queue_mem_alloc = nsim_queue_mem_alloc,
+ .ndo_queue_mem_free = nsim_queue_mem_free,
+ .ndo_queue_start = nsim_queue_start,
+ .ndo_queue_stop = nsim_queue_stop,
+};
+
+static ssize_t
+nsim_qreset_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ unsigned int queue, mode;
+ char buf[32];
+ ssize_t ret;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+ if (copy_from_user(buf, data, count))
+ return -EFAULT;
+ buf[count] = '\0';
+
+ ret = sscanf(buf, "%u %u", &queue, &mode);
+ if (ret != 2)
+ return -EINVAL;
+
+ rtnl_lock();
+ if (!netif_running(ns->netdev)) {
+ ret = -ENETDOWN;
+ goto exit_unlock;
+ }
+
+ if (queue >= ns->netdev->real_num_rx_queues) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ns->rq_reset_mode = mode;
+ ret = netdev_rx_queue_restart(ns->netdev, queue);
+ ns->rq_reset_mode = 0;
+ if (ret)
+ goto exit_unlock;
+
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+ return ret;
+}
+
+static const struct file_operations nsim_qreset_fops = {
+ .open = simple_open,
+ .write = nsim_qreset_write,
+ .owner = THIS_MODULE,
+};
+
static ssize_t
nsim_pp_hold_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
@@ -628,7 +815,7 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!netif_running(ns->netdev) && val) {
ret = -ENETDOWN;
} else if (val) {
- ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool);
if (!ns->page)
ret = -ENOMEM;
} else {
@@ -677,27 +864,35 @@ static int nsim_queue_init(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
int i;
- ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
- GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ ns->rq = kcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT);
if (!ns->rq)
return -ENOMEM;
- for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_head_init(&ns->rq[i].skb_queue);
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ ns->rq[i] = nsim_queue_alloc();
+ if (!ns->rq[i])
+ goto err_free_prev;
+ }
return 0;
+
+err_free_prev:
+ while (i--)
+ kfree(ns->rq[i]);
+ kfree(ns->rq);
+ return -ENOMEM;
}
-static void nsim_queue_free(struct netdevsim *ns)
+static void nsim_queue_uninit(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
int i;
for (i = 0; i < dev->num_rx_queues; i++)
- skb_queue_purge_reason(&ns->rq[i].skb_queue,
- SKB_DROP_REASON_QUEUE_PURGE);
+ nsim_queue_free(ns->rq[i]);
- kvfree(ns->rq);
+ kfree(ns->rq);
ns->rq = NULL;
}
@@ -713,6 +908,7 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
ns->netdev->stat_ops = &nsim_stat_ops;
+ ns->netdev->queue_mgmt_ops = &nsim_queue_mgmt_ops;
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
@@ -741,7 +937,7 @@ err_ipsec_teardown:
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
err_rq_destroy:
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -798,6 +994,9 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
ns, &nsim_pp_hold_fops);
+ ns->qr_dfs = debugfs_create_file("queue_reset", 0200,
+ nsim_dev_port->ddir, ns,
+ &nsim_qreset_fops);
return ns;
@@ -811,6 +1010,7 @@ void nsim_destroy(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
+ debugfs_remove(ns->qr_dfs);
debugfs_remove(ns->pp_dfs);
rtnl_lock();
@@ -823,7 +1023,7 @@ void nsim_destroy(struct netdevsim *ns)
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
- nsim_queue_free(ns);
+ nsim_queue_uninit(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index bf02efa10956..dcf073bc4802 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -16,6 +16,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
@@ -36,6 +37,8 @@
#define NSIM_IPSEC_VALID BIT(31)
#define NSIM_UDP_TUNNEL_N_PORTS 4
+#define NSIM_HDS_THRESHOLD_MAX 1024
+
struct nsim_sa {
struct xfrm_state *xs;
__be32 ipaddr[4];
@@ -101,7 +104,9 @@ struct netdevsim {
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
- struct nsim_rq *rq;
+ struct nsim_rq **rq;
+
+ int rq_reset_mode;
u64 tx_packets;
u64 tx_bytes;
@@ -134,6 +139,7 @@ struct netdevsim {
struct page *page;
struct dentry *pp_dfs;
+ struct dentry *qr_dfs;
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index c1d881dc6409..1e1b00756be7 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -338,6 +338,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
enum netkit_mode mode = NETKIT_L3;
unsigned char ifname_assign_type;
+ u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
struct net_device *peer;
char ifname[IFNAMSIZ];
@@ -371,6 +372,10 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
if (err < 0)
return err;
}
+ if (data[IFLA_NETKIT_HEADROOM])
+ headroom = nla_get_u16(data[IFLA_NETKIT_HEADROOM]);
+ if (data[IFLA_NETKIT_TAILROOM])
+ tailroom = nla_get_u16(data[IFLA_NETKIT_TAILROOM]);
}
if (ifmp && tbp[IFLA_IFNAME]) {
@@ -390,6 +395,14 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
return PTR_ERR(peer);
netif_inherit_tso_max(peer, dev);
+ if (headroom) {
+ peer->needed_headroom = headroom;
+ dev->needed_headroom = headroom;
+ }
+ if (tailroom) {
+ peer->needed_tailroom = tailroom;
+ dev->needed_tailroom = tailroom;
+ }
if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
@@ -401,6 +414,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
nk->policy = policy_peer;
nk->scrub = scrub_peer;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(peer);
@@ -426,6 +440,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
nk->policy = policy_prim;
nk->scrub = scrub_prim;
nk->mode = mode;
+ nk->headroom = headroom;
bpf_mprog_bundle_init(&nk->bundle);
err = register_netdevice(dev);
@@ -850,7 +865,18 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
struct net_device *peer = rtnl_dereference(nk->peer);
enum netkit_action policy;
struct nlattr *attr;
- int err;
+ int err, i;
+ static const struct {
+ u32 attr;
+ char *name;
+ } fixed_params[] = {
+ { IFLA_NETKIT_MODE, "operating mode" },
+ { IFLA_NETKIT_SCRUB, "scrubbing" },
+ { IFLA_NETKIT_PEER_SCRUB, "peer scrubbing" },
+ { IFLA_NETKIT_PEER_INFO, "peer info" },
+ { IFLA_NETKIT_HEADROOM, "headroom" },
+ { IFLA_NETKIT_TAILROOM, "tailroom" },
+ };
if (!nk->primary) {
NL_SET_ERR_MSG(extack,
@@ -858,28 +884,14 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
- if (data[IFLA_NETKIT_MODE]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE],
- "netkit link operating mode cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_SCRUB]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
- "netkit scrubbing cannot be changed after device creation");
- return -EACCES;
- }
-
- if (data[IFLA_NETKIT_PEER_INFO]) {
- NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
- "netkit peer info cannot be changed after device creation");
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(fixed_params); i++) {
+ attr = data[fixed_params[i].attr];
+ if (attr) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, attr,
+ "netkit link %s cannot be changed after device creation",
+ fixed_params[i].name);
+ return -EACCES;
+ }
}
if (data[IFLA_NETKIT_POLICY]) {
@@ -914,6 +926,8 @@ static size_t netkit_get_size(const struct net_device *dev)
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_HEADROOM */
+ nla_total_size(sizeof(u16)) + /* IFLA_NETKIT_TAILROOM */
0;
}
@@ -930,6 +944,10 @@ static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_HEADROOM, dev->needed_headroom))
+ return -EMSGSIZE;
+ if (nla_put_u16(skb, IFLA_NETKIT_TAILROOM, dev->needed_tailroom))
+ return -EMSGSIZE;
if (peer) {
nk = netkit_priv(peer);
@@ -947,6 +965,8 @@ static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
[IFLA_NETKIT_MODE] = NLA_POLICY_MAX(NLA_U32, NETKIT_L3),
[IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
[IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
+ [IFLA_NETKIT_HEADROOM] = { .type = NLA_U16 },
+ [IFLA_NETKIT_TAILROOM] = { .type = NLA_U16 },
[IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
[IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index b79aedad855b..e46f588cae7d 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -35,6 +35,27 @@ enum sgmii_speed {
#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
#define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs)
+static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ return LINK_INBAND_ENABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
struct phylink_link_state *state)
{
@@ -79,7 +100,7 @@ static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
state->duplex = DUPLEX_FULL;
}
-static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
@@ -88,7 +109,7 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_mii_c22_pcs_get_state(lynx->mdio, state);
+ phylink_mii_c22_pcs_get_state(lynx->mdio, neg_mode, state);
break;
case PHY_INTERFACE_MODE_2500BASEX:
lynx_pcs_get_state_2500basex(lynx->mdio, state);
@@ -306,15 +327,26 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_inband_caps = lynx_pcs_inband_caps,
.pcs_get_state = lynx_pcs_get_state,
.pcs_config = lynx_pcs_config,
.pcs_an_restart = lynx_pcs_an_restart,
.pcs_link_up = lynx_pcs_link_up,
};
+static const phy_interface_t lynx_interfaces[] = {
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+};
+
static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
{
struct lynx_pcs *lynx;
+ int i;
lynx = kzalloc(sizeof(*lynx), GFP_KERNEL);
if (!lynx)
@@ -326,6 +358,9 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
lynx->pcs.neg_mode = true;
lynx->pcs.poll = true;
+ for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++)
+ __set_bit(lynx_interfaces[i], lynx->pcs.supported_interfaces);
+
return lynx_to_phylink_pcs(lynx);
}
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 4f63abe638c4..7d6261dee534 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -88,7 +88,24 @@ static struct mtk_pcs_lynxi *pcs_to_mtk_pcs_lynxi(struct phylink_pcs *pcs)
return container_of(pcs, struct mtk_pcs_lynxi, pcs);
}
+static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
struct mtk_pcs_lynxi *mpcs = pcs_to_mtk_pcs_lynxi(pcs);
@@ -98,7 +115,8 @@ static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &bm);
regmap_read(mpcs->regmap, SGMSYS_PCS_ADVERTISE, &adv);
- phylink_mii_c22_pcs_decode_state(state, FIELD_GET(SGMII_BMSR, bm),
+ phylink_mii_c22_pcs_decode_state(state, neg_mode,
+ FIELD_GET(SGMII_BMSR, bm),
FIELD_GET(SGMII_LPA, adv));
}
@@ -241,6 +259,7 @@ static void mtk_pcs_lynxi_disable(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
+ .pcs_inband_caps = mtk_pcs_lynxi_inband_caps,
.pcs_get_state = mtk_pcs_lynxi_get_state,
.pcs_config = mtk_pcs_lynxi_config,
.pcs_an_restart = mtk_pcs_lynxi_restart_an,
@@ -290,6 +309,10 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
+ __set_bit(PHY_INTERFACE_MODE_SGMII, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, mpcs->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, mpcs->pcs.supported_interfaces);
+
return &mpcs->pcs;
}
EXPORT_SYMBOL(mtk_pcs_lynxi_create);
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 3059435af596..1faa37f0e7b9 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -567,14 +567,40 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
return 0;
}
-void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
+static unsigned int xpcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ const struct dw_xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs, interface);
+ if (!compat)
+ return 0;
+
+ switch (compat->an_mode) {
+ case DW_AN_C73:
+ return LINK_INBAND_ENABLE;
+
+ case DW_AN_C37_SGMII:
+ case DW_AN_C37_1000BASEX:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+
+ case DW_10GBASER:
+ case DW_2500BASEX:
+ return LINK_INBAND_DISABLE;
+
+ default:
+ return 0;
+ }
+}
+
+static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
{
const struct dw_xpcs_compat *compat;
for (compat = xpcs->desc->compat; compat->supported; compat++)
__set_bit(compat->interface, interfaces);
}
-EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
@@ -1006,6 +1032,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
}
static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int lpa, bmsr;
@@ -1034,7 +1061,7 @@ static int xpcs_get_state_c37_1000basex(struct dw_xpcs *xpcs,
}
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
return 0;
@@ -1062,7 +1089,7 @@ static int xpcs_get_state_2500basex(struct dw_xpcs *xpcs,
return 0;
}
-static void xpcs_get_state(struct phylink_pcs *pcs,
+static void xpcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state)
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
@@ -1090,7 +1117,7 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
"xpcs_get_state_c37_sgmii", ERR_PTR(ret));
break;
case DW_AN_C37_1000BASEX:
- ret = xpcs_get_state_c37_1000basex(xpcs, state);
+ ret = xpcs_get_state_c37_1000basex(xpcs, neg_mode, state);
if (ret)
dev_err(&xpcs->mdiodev->dev, "%s returned %pe\n",
"xpcs_get_state_c37_1000basex", ERR_PTR(ret));
@@ -1308,6 +1335,7 @@ static const struct dw_xpcs_desc xpcs_desc_list[] = {
static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_validate = xpcs_validate,
+ .pcs_inband_caps = xpcs_inband_caps,
.pcs_pre_config = xpcs_pre_config,
.pcs_config = xpcs_config,
.pcs_get_state = xpcs_get_state,
@@ -1420,6 +1448,8 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev)
if (ret)
goto out_clear_clks;
+ xpcs_get_interfaces(xpcs, xpcs->pcs.supported_interfaces);
+
if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs->pcs.poll = false;
else
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 15828f4710a9..41c15a2c2037 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -287,8 +287,15 @@ config MICROCHIP_PHY
config MICROCHIP_T1_PHY
tristate "Microchip T1 PHYs"
+ select MICROCHIP_PHY_RDS_PTP if NETWORK_PHY_TIMESTAMPING
+ depends on PTP_1588_CLOCK_OPTIONAL
help
- Supports the LAN87XX PHYs.
+ Supports the LAN8XXX PHYs.
+
+config MICROCHIP_PHY_RDS_PTP
+ tristate
+ help
+ Currently supports LAN887X T1 PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
@@ -343,10 +350,7 @@ config QSEMI_PHY
help
Currently supports the qs6612
-config REALTEK_PHY
- tristate "Realtek PHYs"
- help
- Supports the Realtek 821x PHY.
+source "drivers/net/phy/realtek/Kconfig"
config RENESAS_PHY
tristate "Renesas PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e6145153e837..c8dac6e92278 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
+obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o
obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROCHIP_T1S_PHY) += microchip_t1s.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
@@ -94,7 +95,7 @@ obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
-obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_REALTEK_PHY) += realtek/
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index a2a862bae2ed..7fa713ca8d45 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -1038,7 +1038,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) },
{ }
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 85f910e2d4fb..6bb469429b9d 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -340,7 +340,7 @@ static struct phy_driver adin_driver[] = {
module_phy_driver(adin_driver);
-static struct mdio_device_id __maybe_unused adin_tbl[] = {
+static const struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
index 8d076b9609fd..e9fd24cb7270 100644
--- a/drivers/net/phy/air_en8811h.c
+++ b/drivers/net/phy/air_en8811h.c
@@ -1075,7 +1075,7 @@ static struct phy_driver en8811h_driver[] = {
module_phy_driver(en8811h_driver);
-static struct mdio_device_id __maybe_unused en8811h_tbl[] = {
+static const struct mdio_device_id __maybe_unused en8811h_tbl[] = {
{ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 930b15fa6ce9..75b5fe65500a 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -111,7 +111,7 @@ static struct phy_driver am79c_drivers[] = {
module_phy_driver(am79c_drivers);
-static struct mdio_device_id __maybe_unused amd_tbl[] = {
+static const struct mdio_device_id __maybe_unused amd_tbl[] = {
{ PHY_ID_AC101L, 0xfffffff0 },
{ PHY_ID_AM79C874, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index bb56a66d2a48..e42ace4e682a 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -1200,7 +1200,7 @@ static struct phy_driver aqr_driver[] = {
module_phy_driver(aqr_driver);
-static struct mdio_device_id __maybe_unused aqr_tbl[] = {
+static const struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index eb74a8cf8df1..694df1401aa2 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -121,7 +121,7 @@ static struct phy_driver asix_driver[] = {
module_phy_driver(asix_driver);
-static struct mdio_device_id __maybe_unused asix_tbl[] = {
+static const struct mdio_device_id __maybe_unused asix_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
{ PHY_ID_ASIX_AX88796B, 0xfffffff0 },
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index da8f7cb41b44..15cbef8202bc 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -278,7 +278,7 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
}
};
-static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
{ PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 2eea3d09b1e6..7969345f6b35 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -883,7 +883,7 @@ static struct phy_driver bcm54140_drivers[] = {
};
module_phy_driver(bcm54140_drivers);
-static struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm54140_tbl[] = {
{ PHY_ID_BCM54140, BCM54140_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 0eb33be824f1..b46a736a3130 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -93,7 +93,7 @@ static struct phy_driver bcm63xx_driver[] = {
module_phy_driver(bcm63xx_driver);
-static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
{ 0x00406000, 0xfffffc00 },
{ 0x002bdc00, 0xfffffc00 },
{ }
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 97638ba7ae85..00e8fa14aa77 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -929,7 +929,7 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_16NM_EPHY(PHY_ID_BCM7712, "Broadcom BCM7712"),
};
-static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM72116, 0xfffffff0, },
{ PHY_ID_BCM72165, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm84881.c b/drivers/net/phy/bcm84881.c
index 97da3aee4942..d7f7cc44c532 100644
--- a/drivers/net/phy/bcm84881.c
+++ b/drivers/net/phy/bcm84881.c
@@ -235,11 +235,21 @@ static int bcm84881_read_status(struct phy_device *phydev)
return genphy_c45_read_mdix(phydev);
}
+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
+ * or 802.3z control word, so inband will not work.
+ */
+static unsigned int bcm84881_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static struct phy_driver bcm84881_drivers[] = {
{
.phy_id = 0xae025150,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM84881",
+ .inband_caps = bcm84881_inband_caps,
.config_init = bcm84881_config_init,
.probe = bcm84881_probe,
.get_features = bcm84881_get_features,
@@ -252,7 +262,7 @@ static struct phy_driver bcm84881_drivers[] = {
module_phy_driver(bcm84881_drivers);
/* FIXME: module auto-loading for Clause 45 PHYs seems non-functional */
-static struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
+static const struct mdio_device_id __maybe_unused bcm84881_tbl[] = {
{ 0xae025150, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index ddded162c44c..22edb7e4c1a1 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -1717,7 +1717,7 @@ static struct phy_driver broadcom_drivers[] = {
module_phy_driver(broadcom_drivers);
-static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+static const struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
{ PHY_ID_BCM54210E, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index ef5f412e101f..d87cf8b94cf8 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -145,7 +145,7 @@ static struct phy_driver cis820x_driver[] = {
module_phy_driver(cis820x_driver);
-static struct mdio_device_id __maybe_unused cicada_tbl[] = {
+static const struct mdio_device_id __maybe_unused cicada_tbl[] = {
{ 0x000fc410, 0x000ffff0 },
{ 0x000fc440, 0x000fffc0 },
{ }
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 40514a94e6ff..3b65f37f1c57 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -87,7 +87,7 @@ static struct phy_driver cortina_driver[] = {
module_phy_driver(cortina_driver);
-static struct mdio_device_id __maybe_unused cortina_tbl[] = {
+static const struct mdio_device_id __maybe_unused cortina_tbl[] = {
{ PHY_ID_CS4340, 0xffffffff},
{},
};
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 4ac4bce1bf32..fa3692508f16 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -209,7 +209,7 @@ static struct phy_driver dm91xx_driver[] = {
module_phy_driver(dm91xx_driver);
-static struct mdio_device_id __maybe_unused davicom_tbl[] = {
+static const struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
{ 0x0181b8b0, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 075d2beea716..85e231451093 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1548,7 +1548,7 @@ MODULE_LICENSE("GPL");
module_init(dp83640_init);
module_exit(dp83640_exit);
-static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index cf8b6d0bfaa9..6599feca1967 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -22,8 +22,6 @@
#define DP83826C_PHY_ID 0x2000a130
#define DP83826NC_PHY_ID 0x2000a110
-#define DP83822_DEVADDR 0x1f
-
#define MII_DP83822_CTRL_2 0x0a
#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
@@ -32,6 +30,10 @@
#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
+#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LEDCFG1 0x460
+#define MII_DP83822_IOCTRL1 0x462
+#define MII_DP83822_IOCTRL2 0x463
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
@@ -106,6 +108,50 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* MLEDCR bits */
+#define DP83822_MLEDCR_CFG GENMASK(6, 3)
+#define DP83822_MLEDCR_ROUTE GENMASK(1, 0)
+#define DP83822_MLEDCR_ROUTE_LED_0 DP83822_MLEDCR_ROUTE
+
+/* LEDCFG1 bits */
+#define DP83822_LEDCFG1_LED1_CTRL GENMASK(11, 8)
+#define DP83822_LEDCFG1_LED3_CTRL GENMASK(7, 4)
+
+/* IOCTRL1 bits */
+#define DP83822_IOCTRL1_GPIO3_CTRL GENMASK(10, 8)
+#define DP83822_IOCTRL1_GPIO3_CTRL_LED3 BIT(0)
+#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+
+/* IOCTRL2 bits */
+#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
+#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF GENMASK(1, 0)
+#define DP83822_IOCTRL2_GPIO2_CTRL_MLED BIT(0)
+
+#define DP83822_CLK_SRC_MAC_IF 0x0
+#define DP83822_CLK_SRC_XI 0x1
+#define DP83822_CLK_SRC_INT_REF 0x2
+#define DP83822_CLK_SRC_RMII_MASTER_MODE_REF 0x4
+#define DP83822_CLK_SRC_FREE_RUNNING 0x6
+#define DP83822_CLK_SRC_RECOVERED 0x7
+
+#define DP83822_LED_FN_LINK 0x0 /* Link established */
+#define DP83822_LED_FN_RX_TX 0x1 /* Receive or Transmit activity */
+#define DP83822_LED_FN_TX 0x2 /* Transmit activity */
+#define DP83822_LED_FN_RX 0x3 /* Receive activity */
+#define DP83822_LED_FN_COLLISION 0x4 /* Collision detected */
+#define DP83822_LED_FN_LINK_100_BTX 0x5 /* 100 BTX link established */
+#define DP83822_LED_FN_LINK_10_BT 0x6 /* 10BT link established */
+#define DP83822_LED_FN_FULL_DUPLEX 0x7 /* Full duplex */
+#define DP83822_LED_FN_LINK_RX_TX 0x8 /* Link established, blink for rx or tx activity */
+#define DP83822_LED_FN_ACTIVE_STRETCH 0x9 /* Active Stretch Signal */
+#define DP83822_LED_FN_MII_LINK 0xa /* MII LINK (100BT+FD) */
+#define DP83822_LED_FN_LPI_MODE 0xb /* LPI Mode (EEE) */
+#define DP83822_LED_FN_RX_TX_ERR 0xc /* TX/RX MII Error */
+#define DP83822_LED_FN_LINK_LOST 0xd /* Link Lost */
+#define DP83822_LED_FN_PRBS_ERR 0xe /* Blink for PRBS error */
+
/* SOR1 mode */
#define DP83822_STRAP_MODE1 0
#define DP83822_STRAP_MODE2 BIT(0)
@@ -134,6 +180,13 @@
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
+#define DP83822_MAX_LED_PINS 4
+
+#define DP83822_LED_INDEX_LED_0 0
+#define DP83822_LED_INDEX_LED_1_GPIO1 1
+#define DP83822_LED_INDEX_COL_GPIO2 2
+#define DP83822_LED_INDEX_RX_D3_GPIO3 3
+
struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
@@ -141,6 +194,9 @@ struct dp83822_private {
u8 cfg_dac_minus;
u8 cfg_dac_plus;
struct ethtool_wolinfo wol;
+ bool set_gpio2_clk_out;
+ u32 gpio2_clk_out;
+ bool led_pin_enable[DP83822_MAX_LED_PINS];
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -159,14 +215,14 @@ static int dp83822_config_wol(struct phy_device *phydev,
/* MAC addresses start with byte 5, but stored in mac[0].
* 822 PHYs store bytes 4|5, 2|3, 0|1
*/
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA1,
(mac[1] << 8) | mac[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA2,
(mac[3] << 8) | mac[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_DA3,
(mac[5] << 8) | mac[4]);
- value = phy_read_mmd(phydev, DP83822_DEVADDR,
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG);
if (wol->wolopts & WAKE_MAGIC)
value |= DP83822_WOL_MAGIC_EN;
@@ -174,13 +230,13 @@ static int dp83822_config_wol(struct phy_device *phydev,
value &= ~DP83822_WOL_MAGIC_EN;
if (wol->wolopts & WAKE_MAGICSECURE) {
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]);
- phy_write_mmd(phydev, DP83822_DEVADDR,
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]);
value |= DP83822_WOL_SECURE_ON;
@@ -194,10 +250,10 @@ static int dp83822_config_wol(struct phy_device *phydev,
value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
DP83822_WOL_CLR_INDICATION;
- return phy_write_mmd(phydev, DP83822_DEVADDR,
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG, value);
} else {
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_WOL_CFG,
DP83822_WOL_EN |
DP83822_WOL_MAGIC_EN |
@@ -226,23 +282,23 @@ static void dp83822_get_wol(struct phy_device *phydev,
wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE);
wol->wolopts = 0;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (value & DP83822_WOL_MAGIC_EN)
wol->wolopts |= WAKE_MAGIC;
if (value & DP83822_WOL_SECURE_ON) {
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP1);
wol->sopass[0] = (sopass_val & 0xff);
wol->sopass[1] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP2);
wol->sopass[2] = (sopass_val & 0xff);
wol->sopass[3] = (sopass_val >> 8);
- sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ sopass_val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RXSOP3);
wol->sopass[4] = (sopass_val & 0xff);
wol->sopass[5] = (sopass_val >> 8);
@@ -405,6 +461,48 @@ static int dp83822_read_status(struct phy_device *phydev)
return 0;
}
+static int dp83822_config_init_leds(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR,
+ DP83822_MLEDCR_ROUTE,
+ FIELD_PREP(DP83822_MLEDCR_ROUTE,
+ DP83822_MLEDCR_ROUTE_LED_0));
+ if (ret)
+ return ret;
+ } else if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_MLED));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_1_GPIO1]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO1_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO1_CTRL,
+ DP83822_IOCTRL1_GPIO1_CTRL_LED_1));
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3]) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL1,
+ DP83822_IOCTRL1_GPIO3_CTRL,
+ FIELD_PREP(DP83822_IOCTRL1_GPIO3_CTRL,
+ DP83822_IOCTRL1_GPIO3_CTRL_LED3));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
@@ -415,6 +513,19 @@ static int dp83822_config_init(struct phy_device *phydev)
int err = 0;
int bmcr;
+ if (dp83822->set_gpio2_clk_out)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_IOCTRL2,
+ DP83822_IOCTRL2_GPIO2_CTRL |
+ DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CTRL,
+ DP83822_IOCTRL2_GPIO2_CTRL_CLK_REF) |
+ FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
+ dp83822->gpio2_clk_out));
+
+ err = dp83822_config_init_leds(phydev);
+ if (err)
+ return err;
+
if (phy_interface_is_rgmii(phydev)) {
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
@@ -430,18 +541,18 @@ static int dp83822_config_init(struct phy_device *phydev)
if (tx_int_delay <= 0)
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
if (err)
return err;
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
return err;
} else {
- err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
if (err)
@@ -496,7 +607,7 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
if (dp83822->fx_signal_det_low) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
MII_DP83822_GENCFG,
DP83822_SIG_DET_LOW);
if (err)
@@ -514,10 +625,10 @@ static int dp8382x_config_rmii_mode(struct phy_device *phydev)
if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
if (strcmp(of_val, "master") == 0) {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else if (strcmp(of_val, "slave") == 0) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_SEL);
} else {
phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
@@ -539,7 +650,7 @@ static int dp83826_config_init(struct phy_device *phydev)
int ret;
if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
- ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -548,7 +659,7 @@ static int dp83826_config_init(struct phy_device *phydev)
if (ret)
return ret;
} else {
- ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_RCSR,
DP83822_RMII_MODE_EN);
if (ret)
return ret;
@@ -560,7 +671,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG1, mask, val);
if (ret)
return ret;
@@ -568,7 +679,7 @@ static int dp83826_config_init(struct phy_device *phydev)
FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
dp83822->cfg_dac_minus));
mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -577,7 +688,7 @@ static int dp83826_config_init(struct phy_device *phydev)
val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
- ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83826_VOD_CFG2, mask, val);
if (ret)
return ret;
}
@@ -609,10 +720,66 @@ static int dp83822_phy_reset(struct phy_device *phydev)
}
#ifdef CONFIG_OF_MDIO
+static int dp83822_of_init_leds(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device_node *leds;
+ u32 index;
+ int err;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ err = of_property_read_u32(led, "reg", &index);
+ if (err) {
+ of_node_put(leds);
+ return err;
+ }
+
+ if (index <= DP83822_LED_INDEX_RX_D3_GPIO3) {
+ dp83822->led_pin_enable[index] = true;
+ } else {
+ of_node_put(leds);
+ return -EINVAL;
+ }
+ }
+
+ of_node_put(leds);
+ /* LED_0 and COL(GPIO2) use the MLED function. MLED can be routed to
+ * only one of these two pins at a time.
+ */
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_LED_0] &&
+ dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2]) {
+ phydev_err(phydev, "LED_0 and COL(GPIO2) cannot be used as LED output at the same time\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_COL_GPIO2] &&
+ dp83822->set_gpio2_clk_out) {
+ phydev_err(phydev, "COL(GPIO2) cannot be used as LED output, already used as clock output\n");
+ return -EINVAL;
+ }
+
+ if (dp83822->led_pin_enable[DP83822_LED_INDEX_RX_D3_GPIO3] &&
+ phydev->interface != PHY_INTERFACE_MODE_RMII) {
+ phydev_err(phydev, "RX_D3 can only be used as LED output when in RMII mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dp83822_of_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -625,7 +792,30 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->fx_enabled = device_property_present(dev,
"ti,fiber-mode");
- return 0;
+ if (!device_property_read_string(dev, "ti,gpio2-clk-out", &of_val)) {
+ if (strcmp(of_val, "mac-if") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_MAC_IF;
+ } else if (strcmp(of_val, "xi") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_XI;
+ } else if (strcmp(of_val, "int-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_INT_REF;
+ } else if (strcmp(of_val, "rmii-master-mode-ref") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RMII_MASTER_MODE_REF;
+ } else if (strcmp(of_val, "free-running") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_FREE_RUNNING;
+ } else if (strcmp(of_val, "recovered") == 0) {
+ dp83822->gpio2_clk_out = DP83822_CLK_SRC_RECOVERED;
+ } else {
+ phydev_err(phydev,
+ "Invalid value for ti,gpio2-clk-out property (%s)\n",
+ of_val);
+ return -EINVAL;
+ }
+
+ dp83822->set_gpio2_clk_out = true;
+ }
+
+ return dp83822_of_init_leds(phydev);
}
static int dp83826_to_dac_minus_one_regval(int percent)
@@ -673,7 +863,7 @@ static int dp83822_read_straps(struct phy_device *phydev)
int fx_enabled, fx_sd_enable;
int val;
- val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_SOR1);
if (val < 0)
return val;
@@ -723,7 +913,9 @@ static int dp83822_probe(struct phy_device *phydev)
if (ret)
return ret;
- dp83822_of_init(phydev);
+ ret = dp83822_of_init(phydev);
+ if (ret)
+ return ret;
if (dp83822->fx_enabled)
phydev->port = PORT_FIBRE;
@@ -748,7 +940,7 @@ static int dp83822_suspend(struct phy_device *phydev)
{
int value;
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
if (!(value & DP83822_WOL_EN))
genphy_suspend(phydev);
@@ -762,14 +954,138 @@ static int dp83822_resume(struct phy_device *phydev)
genphy_resume(phydev);
- value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+ value = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG);
- phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value |
+ phy_write_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_WOL_CFG, value |
DP83822_WOL_CLR_INDICATION);
return 0;
}
+static int dp83822_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83822_LED_FN_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83822_LED_FN_LINK_10_BT;
+ case BIT(TRIGGER_NETDEV_LINK_100):
+ return DP83822_LED_FN_LINK_100_BTX;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83822_LED_FN_FULL_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83822_LED_FN_TX;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_RX_TX;
+ case BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR):
+ return DP83822_LED_FN_RX_TX_ERR;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83822_LED_FN_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83822_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int dp83822_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = dp83822_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_MLEDCR, DP83822_MLEDCR_CFG,
+ FIELD_PREP(DP83822_MLEDCR_CFG, mode));
+ else if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED1_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED1_CTRL,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ MII_DP83822_LEDCFG1,
+ DP83822_LEDCFG1_LED3_CTRL,
+ FIELD_PREP(DP83822_LEDCFG1_LED3_CTRL,
+ mode));
+}
+
+static int dp83822_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index == DP83822_LED_INDEX_LED_0 || index == DP83822_LED_INDEX_COL_GPIO2) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_MLEDCR);
+ if (val < 0)
+ return val;
+
+ val = FIELD_GET(DP83822_MLEDCR_CFG, val);
+ } else {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LEDCFG1);
+ if (val < 0)
+ return val;
+
+ if (index == DP83822_LED_INDEX_LED_1_GPIO1)
+ val = FIELD_GET(DP83822_LEDCFG1_LED1_CTRL, val);
+ else
+ val = FIELD_GET(DP83822_LEDCFG1_LED3_CTRL, val);
+ }
+
+ switch (val) {
+ case DP83822_LED_FN_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case DP83822_LED_FN_LINK_10_BT:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83822_LED_FN_LINK_100_BTX:
+ *rules = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case DP83822_LED_FN_FULL_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83822_LED_FN_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83822_LED_FN_RX:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83822_LED_FN_RX_TX_ERR:
+ *rules = BIT(TRIGGER_NETDEV_TX_ERR) | BIT(TRIGGER_NETDEV_RX_ERR);
+ break;
+ case DP83822_LED_FN_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
#define DP83822_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -785,6 +1101,9 @@ static int dp83822_resume(struct phy_device *phydev)
.handle_interrupt = dp83822_handle_interrupt, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
+ .led_hw_is_supported = dp83822_led_hw_is_supported, \
+ .led_hw_control_set = dp83822_led_hw_control_set, \
+ .led_hw_control_get = dp83822_led_hw_control_get, \
}
#define DP83825_PHY_DRIVER(_id, _name) \
@@ -830,7 +1149,7 @@ static struct phy_driver dp83822_driver[] = {
};
module_phy_driver(dp83822_driver);
-static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83822_tbl[] = {
{ DP83822_PHY_ID, 0xfffffff0 },
{ DP83825I_PHY_ID, 0xfffffff0 },
{ DP83826C_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 351411f0aa6f..d88b1999d596 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -123,7 +123,7 @@ static int dp83848_config_init(struct phy_device *phydev)
return 0;
}
-static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TI_DP83620_PHY_ID, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 4120385c5a79..c1451df430ac 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1210,7 +1210,7 @@ static struct phy_driver dp83867_driver[] = {
};
module_phy_driver(dp83867_driver);
-static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83867_tbl[] = {
{ DP83867_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index b6b38caf9c0e..a62cd838a9ea 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -928,7 +928,7 @@ static struct phy_driver dp83869_driver[] = {
};
module_phy_driver(dp83869_driver);
-static struct mdio_device_id __maybe_unused dp83869_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83869_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83869_PHY_ID) },
{ PHY_ID_MATCH_MODEL(DP83561_PHY_ID) },
{ }
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 7ea32fb77190..e480c2a07450 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -403,7 +403,7 @@ static struct phy_driver dp83811_driver[] = {
};
module_phy_driver(dp83811_driver);
-static struct mdio_device_id __maybe_unused dp83811_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83811_tbl[] = {
{ DP83TC811_PHY_ID, 0xfffffff0 },
{ },
};
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index 92aa3a2b9744..a42af9c168ec 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -34,6 +34,29 @@
#define DP83TD510E_CTRL_HW_RESET BIT(15)
#define DP83TD510E_CTRL_SW_RESET BIT(14)
+/*
+ * DP83TD510E_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TD510E_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TD510E_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TD510E_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TD510E_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TD510E_PKT_STAT_1 0x12b
+#define DP83TD510E_PKT_STAT_2 0x12c
+#define DP83TD510E_PKT_STAT_3 0x12d
+#define DP83TD510E_PKT_STAT_4 0x12e
+#define DP83TD510E_PKT_STAT_5 0x12f
+#define DP83TD510E_PKT_STAT_6 0x130
+
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
@@ -58,8 +81,16 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+struct dp83td510_stats {
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
struct dp83td510_priv {
bool alcd_test_active;
+ struct dp83td510_stats stats;
};
/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
@@ -177,6 +208,85 @@ struct dp83td510_priv {
#define DP83TD510E_ALCD_COMPLETE BIT(15)
#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+/**
+ * dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_update_stats(struct phy_device *phydev)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* The DP83TD510E_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TD510E_PKT_STAT_1 to DP83TD510E_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TD510E_PKT_STAT_4 to DP83TD510E_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83td510_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83td510_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -599,13 +709,15 @@ static struct phy_driver dp83td510_driver[] = {
.get_sqi_max = dp83td510_get_sqi_max,
.cable_test_start = dp83td510_cable_test_start,
.cable_test_get_status = dp83td510_cable_test_get_status,
+ .get_phy_stats = dp83td510_get_phy_stats,
+ .update_stats = dp83td510_update_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83td510_driver);
-static struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83td510_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 0ef4d7dba065..050f4537d140 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -51,6 +51,9 @@
/* Register 0x0405: Unknown Register */
#define DP83TG720S_UNKNOWN_0405 0x405
+#define DP83TG720S_LINK_QUAL_3 0x547
+#define DP83TG720S_LINK_LOSS_CNT_MASK GENMASK(15, 10)
+
/* Register 0x0576: TDR Master Link Down Control */
#define DP83TG720S_TDR_MASTER_LINK_DOWN 0x576
@@ -60,6 +63,29 @@
/* In RGMII mode, Enable or disable the internal delay for TXD */
#define DP83TG720S_RGMII_TX_CLK_SEL BIT(0)
+/*
+ * DP83TG720S_PKT_STAT_x registers correspond to similarly named registers
+ * in the datasheet (PKT_STAT_1 through PKT_STAT_6). These registers store
+ * 32-bit or 16-bit counters for TX and RX statistics and must be read in
+ * sequence to ensure the counters are cleared correctly.
+ *
+ * - DP83TG720S_PKT_STAT_1: Contains TX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_2: Contains TX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_3: Contains TX error packet count.
+ * - DP83TG720S_PKT_STAT_4: Contains RX packet count bits [15:0].
+ * - DP83TG720S_PKT_STAT_5: Contains RX packet count bits [31:16].
+ * - DP83TG720S_PKT_STAT_6: Contains RX error packet count.
+ *
+ * Keeping the register names as defined in the datasheet helps maintain
+ * clarity and alignment with the documentation.
+ */
+#define DP83TG720S_PKT_STAT_1 0x639
+#define DP83TG720S_PKT_STAT_2 0x63a
+#define DP83TG720S_PKT_STAT_3 0x63b
+#define DP83TG720S_PKT_STAT_4 0x63c
+#define DP83TG720S_PKT_STAT_5 0x63d
+#define DP83TG720S_PKT_STAT_6 0x63e
+
/* Register 0x083F: Unknown Register */
#define DP83TG720S_UNKNOWN_083F 0x83f
@@ -69,6 +95,113 @@
#define DP83TG720_SQI_MAX 7
+struct dp83tg720_stats {
+ u64 link_loss_cnt;
+ u64 tx_pkt_cnt;
+ u64 tx_err_pkt_cnt;
+ u64 rx_pkt_cnt;
+ u64 rx_err_pkt_cnt;
+};
+
+struct dp83tg720_priv {
+ struct dp83tg720_stats stats;
+};
+
+/**
+ * dp83tg720_update_stats - Update the PHY statistics for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * The function reads the PHY statistics registers and updates the statistics
+ * structure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83tg720_update_stats(struct phy_device *phydev)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+ u32 count;
+ int ret;
+
+ /* Read the link loss count */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LINK_QUAL_3);
+ if (ret < 0)
+ return ret;
+ /* link_loss_cnt */
+ count = FIELD_GET(DP83TG720S_LINK_LOSS_CNT_MASK, ret);
+ priv->stats.link_loss_cnt += count;
+
+ /* The DP83TG720S_PKT_STAT registers are divided into two groups:
+ * - Group 1 (TX stats): DP83TG720S_PKT_STAT_1 to DP83TG720S_PKT_STAT_3
+ * - Group 2 (RX stats): DP83TG720S_PKT_STAT_4 to DP83TG720S_PKT_STAT_6
+ *
+ * Registers in each group are cleared only after reading them in a
+ * plain sequence (e.g., 1, 2, 3 for Group 1 or 4, 5, 6 for Group 2).
+ * Any deviation from the sequence, such as reading 1, 2, 1, 2, 3, will
+ * prevent the group from being cleared. Additionally, the counters
+ * for a group are frozen as soon as the first register in that group
+ * is accessed.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_1);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_2);
+ if (ret < 0)
+ return ret;
+ /* tx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.tx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_3);
+ if (ret < 0)
+ return ret;
+ /* tx_err_pkt_cnt */
+ priv->stats.tx_err_pkt_cnt += ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_4);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_15_0 */
+ count = ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_5);
+ if (ret < 0)
+ return ret;
+ /* rx_pkt_cnt_31_16 */
+ count |= ret << 16;
+ priv->stats.rx_pkt_cnt += count;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_PKT_STAT_6);
+ if (ret < 0)
+ return ret;
+ /* rx_err_pkt_cnt */
+ priv->stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void dp83tg720_get_link_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ link_stats->link_down_events = priv->stats.link_loss_cnt;
+}
+
+static void dp83tg720_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct dp83tg720_priv *priv = phydev->priv;
+
+ stats->tx_packets = priv->stats.tx_pkt_cnt;
+ stats->tx_errors = priv->stats.tx_err_pkt_cnt;
+ stats->rx_packets = priv->stats.rx_pkt_cnt;
+ stats->rx_errors = priv->stats.rx_err_pkt_cnt;
+}
+
/**
* dp83tg720_cable_test_start - Start the cable test for the DP83TG720 PHY.
* @phydev: Pointer to the phy_device structure.
@@ -182,6 +315,11 @@ static int dp83tg720_cable_test_get_status(struct phy_device *phydev,
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
return phy_init_hw(phydev);
}
@@ -217,6 +355,11 @@ static int dp83tg720_read_status(struct phy_device *phydev)
phy_sts = phy_read(phydev, DP83TG720S_MII_REG_10);
phydev->link = !!(phy_sts & DP83TG720S_LINK_STATUS);
if (!phydev->link) {
+ /* save the current stats before resetting the PHY */
+ ret = dp83tg720_update_stats(phydev);
+ if (ret)
+ return ret;
+
/* According to the "DP83TC81x, DP83TG72x Software
* Implementation Guide", the PHY needs to be reset after a
* link loss or if no link is created after at least 100ms.
@@ -341,12 +484,27 @@ static int dp83tg720_config_init(struct phy_device *phydev)
return genphy_c45_pma_baset1_read_master_slave(phydev);
}
+static int dp83tg720_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct dp83tg720_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver dp83tg720_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
.name = "TI DP83TG720S",
.flags = PHY_POLL_CABLE_TEST,
+ .probe = dp83tg720_probe,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
@@ -355,13 +513,16 @@ static struct phy_driver dp83tg720_driver[] = {
.get_sqi_max = dp83tg720_get_sqi_max,
.cable_test_start = dp83tg720_cable_test_start,
.cable_test_get_status = dp83tg720_cable_test_get_status,
+ .get_link_stats = dp83tg720_get_link_stats,
+ .get_phy_stats = dp83tg720_get_phy_stats,
+ .update_stats = dp83tg720_update_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
module_phy_driver(dp83tg720_driver);
-static struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
+static const struct mdio_device_id __maybe_unused dp83tg720_tbl[] = {
{ PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index be1b71d7cab7..6cd8d77586fd 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -94,7 +94,7 @@ static struct phy_driver et1011c_driver[] = { {
module_phy_driver(et1011c_driver);
-static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
+static const struct mdio_device_id __maybe_unused et1011c_tbl[] = {
{ 0x0282f014, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index ee438b71a0b4..bbcc7d2b54cd 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -623,7 +623,7 @@ static struct phy_driver icplus_driver[] = {
module_phy_driver(icplus_driver);
-static struct mdio_device_id __maybe_unused icplus_tbl[] = {
+static const struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ PHY_ID_MATCH_MODEL(IP175C_PHY_ID) },
{ PHY_ID_MATCH_MODEL(IP1001_PHY_ID) },
{ PHY_ID_MATCH_EXACT(IP101A_PHY_ID) },
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index e6ed2413e514..a44771e8acdc 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -691,7 +691,7 @@ static struct phy_driver xway_gphy[] = {
};
module_phy_driver(xway_gphy);
-static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
{ PHY_ID_PHY11G_1_3, 0xffffffff },
{ PHY_ID_PHY22F_1_3, 0xffffffff },
{ PHY_ID_PHY11G_1_4, 0xffffffff },
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index e3bf827b7959..5251a61c8b0f 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -348,7 +348,7 @@ static struct phy_driver lxt97x_driver[] = {
module_phy_driver(lxt97x_driver);
-static struct mdio_device_id __maybe_unused lxt_tbl[] = {
+static const struct mdio_device_id __maybe_unused lxt_tbl[] = {
{ 0x78100000, 0xfffffff0 },
{ 0x001378e0, 0xfffffff0 },
{ 0x00137a10, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 5107f58338af..4494b3e39ce2 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -939,7 +939,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
module_phy_driver(mv88q2xxx_driver);
-static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index 0b777cdd7078..fad2f54c1eac 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -613,7 +613,7 @@ static struct phy_driver mv2222_drivers[] = {
};
module_phy_driver(mv2222_drivers);
-static struct mdio_device_id __maybe_unused mv2222_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv2222_tbl[] = {
{ MARVELL_PHY_ID_88X2222, MARVELL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cd50cd6a7f75..44e1927de499 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -717,6 +717,48 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
return genphy_check_and_restart_aneg(phydev, changed);
}
+static unsigned int m88e1111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* In 1000base-X and SGMII modes, the inband mode can be changed
+ * through the Fibre page BMCR ANENABLE bit.
+ */
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+
+ return 0;
+}
+
+static int m88e1111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 extsr, bmcr;
+ int err;
+
+ if (phydev->interface != PHY_INTERFACE_MODE_1000BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII)
+ return -EINVAL;
+
+ if (modes == LINK_INBAND_BYPASS)
+ extsr = MII_M1111_HWCFG_SERIAL_AN_BYPASS;
+ else
+ extsr = 0;
+
+ if (modes == LINK_INBAND_DISABLE)
+ bmcr = 0;
+ else
+ bmcr = BMCR_ANENABLE;
+
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS, extsr);
+ if (err < 0)
+ return extsr;
+
+ return phy_modify_paged(phydev, MII_MARVELL_FIBER_PAGE, MII_BMCR,
+ BMCR_ANENABLE, bmcr);
+}
+
static int m88e1111_config_aneg(struct phy_device *phydev)
{
int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
@@ -1508,7 +1550,6 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
@@ -1518,8 +1559,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
/* According to the Marvell data sheet EEE must be disabled for
* Fast Link Down detection to work properly
*/
- ret = genphy_c45_ethtool_get_eee(phydev, &eee);
- if (!ret && eee.eee_enabled) {
+ if (phydev->eee_cfg.eee_enabled) {
phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
return -EBUSY;
}
@@ -3677,6 +3717,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1112",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1112_config_init,
.config_aneg = marvell_config_aneg,
.config_intr = marvell_config_intr,
@@ -3698,6 +3740,8 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -3721,6 +3765,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1111 (Finisar)",
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
+ .inband_caps = m88e1111_inband_caps,
+ .config_inband = m88e1111_config_inband,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
@@ -4143,7 +4189,7 @@ static struct phy_driver marvell_drivers[] = {
module_phy_driver(marvell_drivers);
-static struct mdio_device_id __maybe_unused marvell_tbl[] = {
+static const struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 6642eb642d4b..623bdb8466b8 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -1484,7 +1484,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
-static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
+static const struct mdio_device_id __maybe_unused mv3310_tbl[] = {
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index 38dc898eaf7b..bdf99b327029 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -1356,7 +1356,7 @@ static struct phy_driver mtk_socphy_driver[] = {
module_phy_driver(mtk_socphy_driver);
-static struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_socphy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988) },
{ }
diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c
index ed2617bc20f4..b517ca8573e7 100644
--- a/drivers/net/phy/mediatek/mtk-ge.c
+++ b/drivers/net/phy/mediatek/mtk-ge.c
@@ -93,7 +93,7 @@ static struct phy_driver mtk_gephy_driver[] = {
module_phy_driver(mtk_gephy_driver);
-static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
+static const struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = {
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530) },
{ PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531) },
{ }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index bb9b33b6bce2..962ebbbc1348 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -221,7 +221,7 @@ static struct phy_driver meson_gxl_phy[] = {
},
};
-static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
+static const struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
{ PHY_ID_MATCH_VENDOR(0x01814400) },
{ PHY_ID_MATCH_VENDOR(0x01803301) },
{ }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index eeb33eb181ac..9c0b1c229af6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2006,7 +2006,7 @@ static int ksz9477_config_init(struct phy_device *phydev)
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
- linkmode_fill(phydev->eee_broken_modes);
+ phy_disable_eee(phydev);
return kszphy_config_init(phydev);
}
@@ -5689,7 +5689,7 @@ MODULE_DESCRIPTION("Micrel PHY driver");
MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused micrel_tbl[] = {
+static const struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 691969a4910f..0e17cc458efd 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -548,7 +548,7 @@ static struct phy_driver microchip_phy_driver[] = {
module_phy_driver(microchip_phy_driver);
-static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+static const struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c132, 0xfffffff2 },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) },
{ }
diff --git a/drivers/net/phy/microchip_rds_ptp.c b/drivers/net/phy/microchip_rds_ptp.c
new file mode 100644
index 000000000000..2936e46531cf
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.c
@@ -0,0 +1,1039 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2024 Microchip Technology
+
+#include "microchip_rds_ptp.h"
+
+static int mchp_rds_phy_read_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_read_mmd(phydev, PTP_MMD(clock), addr);
+}
+
+static int mchp_rds_phy_write_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_write_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_rds_phy_modify_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 mask, u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_modify_mmd(phydev, PTP_MMD(clock), addr, mask, val);
+}
+
+static int mchp_rds_phy_set_bits_mmd(struct mchp_rds_ptp_clock *clock,
+ u32 offset, enum mchp_rds_ptp_base base,
+ u16 val)
+{
+ struct phy_device *phydev = clock->phydev;
+ u32 addr;
+
+ addr = (offset + ((base == MCHP_RDS_PTP_PORT) ? BASE_PORT(clock) :
+ BASE_CLK(clock)));
+
+ return phy_set_bits_mmd(phydev, PTP_MMD(clock), addr, val);
+}
+
+static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock,
+ enum mchp_rds_ptp_fifo_dir dir)
+{
+ int rc;
+
+ if (dir == MCHP_RDS_PTP_EGRESS_FIFO)
+ skb_queue_purge(&clock->tx_queue);
+ else
+ skb_queue_purge(&clock->rx_queue);
+
+ for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) {
+ rc = mchp_rds_phy_read_mmd(clock,
+ dir == MCHP_RDS_PTP_EGRESS_FIFO ?
+ MCHP_RDS_PTP_TX_MSG_HDR2 :
+ MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return rc;
+ }
+ return mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+}
+
+static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock,
+ bool enable)
+{
+ /* Enable or disable ptp interrupts */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_INT_EN,
+ MCHP_RDS_PTP_PORT,
+ enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0);
+}
+
+static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ switch (clock->hwts_tx_type) {
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (ptp_msg_is_sync(skb, type)) {
+ kfree_skb(skb);
+ return;
+ }
+ fallthrough;
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_queue_tail(&clock->tx_queue, skb);
+ break;
+ case HWTSTAMP_TX_OFF:
+ default:
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ skb_push(skb, ETH_HLEN);
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ skb_pull_inline(skb, ETH_HLEN);
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->rx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->rx_queue);
+
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+ }
+
+ return rc;
+}
+
+static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock,
+ struct mchp_rds_ptp_rx_ts *rx_ts)
+{
+ unsigned long flags;
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!mchp_rds_ptp_match_skb(clock, rx_ts)) {
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &clock->rx_ts_list);
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+}
+
+static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock *clock,
+ struct sk_buff *skb)
+{
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ unsigned long flags;
+ u16 skb_sig;
+
+ if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
+ return;
+
+ /* Iterate over all RX timestamps and match it with the received skbs */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ /* Check if we found the signature we were looking for. */
+ if (skb_sig != rx_ts->seq_id)
+ continue;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
+ netif_rx(skb);
+
+ rx_ts_var = rx_ts;
+
+ break;
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ if (rx_ts_var) {
+ list_del(&rx_ts_var->list);
+ kfree(rx_ts_var);
+ } else {
+ skb_queue_tail(&clock->rx_queue, skb);
+ }
+}
+
+static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ if (clock->rx_filter == HWTSTAMP_FILTER_NONE ||
+ type == PTP_CLASS_NONE)
+ return false;
+
+ if ((type & clock->version) == 0 || (type & clock->layer) == 0)
+ return false;
+
+ /* Here if match occurs skb is sent to application, If not skb is added
+ * to queue and sending skb to application will get handled when
+ * interrupt occurs i.e., it get handles in interrupt handler. By
+ * any means skb will reach the application so we should not return
+ * false here if skb doesn't matches.
+ */
+ mchp_rds_ptp_match_rx_skb(clock, skb);
+
+ return true;
+}
+
+static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct mchp_rds_ptp_clock *clock =
+ container_of(mii_ts, struct mchp_rds_ptp_clock,
+ mii_ts);
+ struct mchp_rds_ptp_rx_ts *rx_ts, *tmp;
+ int txcfg = 0, rxcfg = 0;
+ unsigned long flags;
+ int rc;
+
+ clock->hwts_tx_type = config->tx_type;
+ clock->rx_filter = config->rx_filter;
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clock->layer = 0;
+ clock->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ clock->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Setup parsing of the frames and enable the timestamping for ptp
+ * frames
+ */
+ if (clock->layer & PTP_CLASS_L2) {
+ rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
+ }
+ if (clock->layer & PTP_CLASS_L4) {
+ rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
+ MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
+ }
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, rxcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, txcfg);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_TIMESTAMP_EN,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
+ if (rc < 0)
+ return rc;
+
+ if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ /* Enable / disable of the TX timestamp in the SYNC frames */
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+ else
+ rc = mchp_rds_phy_modify_mmd(clock, MCHP_RDS_PTP_TX_MOD,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT,
+ (u16)~MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT);
+
+ if (rc < 0)
+ return rc;
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ spin_lock_irqsave(&clock->rx_ts_lock, flags);
+ list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+ spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO);
+ if (rc < 0)
+ return rc;
+
+ /* Now enable the timestamping interrupts */
+ rc = mchp_rds_ptp_config_intr(clock,
+ config->rx_filter != HWTSTAMP_FILTER_NONE);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int mchp_rds_ptp_ts_info(struct mii_timestamper *mii_ts,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
+ struct mchp_rds_ptp_clock,
+ mii_ts);
+
+ info->phc_index = ptp_clock_index(clock->ptp_clock);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+static int mchp_rds_ptp_ltc_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ struct timespec64 ts;
+ bool add = true;
+ int rc = 0;
+ u32 nsec;
+ s32 sec;
+
+ /* The HW allows up to 15 sec to adjust the time, but here we limit to
+ * 10 sec the adjustment. The reason is, in case the adjustment is 14
+ * sec and 999999999 nsec, then we add 8ns to compensate the actual
+ * increment so the value can be bigger than 15 sec. Therefore limit the
+ * possible adjustments so we will not have these corner cases
+ */
+ if (delta > 10000000000LL || delta < -10000000000LL) {
+ /* The timeadjustment is too big, so fall back using set time */
+ u64 now;
+
+ info->gettime64(info, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ info->settime64(info, &ts);
+ return 0;
+ }
+ sec = div_u64_rem(abs(delta), NSEC_PER_SEC, &nsec);
+ if (delta < 0 && nsec != 0) {
+ /* It is not allowed to adjust low the nsec part, therefore
+ * subtract more from second part and add to nanosecond such
+ * that would roll over, so the second part will increase
+ */
+ sec--;
+ nsec = NSEC_PER_SEC - nsec;
+ }
+
+ /* Calculate the adjustments and the direction */
+ if (delta < 0)
+ add = false;
+
+ if (nsec > 0) {
+ /* add 8 ns to cover the likely normal increment */
+ nsec += 8;
+
+ if (nsec >= NSEC_PER_SEC) {
+ /* carry into seconds */
+ sec++;
+ nsec -= NSEC_PER_SEC;
+ }
+ }
+
+ mutex_lock(&clock->ptp_lock);
+ if (sec) {
+ sec = abs(sec);
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, sec);
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ ((add ?
+ MCHP_RDS_PTP_STEP_ADJ_HI_DIR :
+ 0) | ((sec >> 16) &
+ GENMASK(13, 0))));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC);
+ if (rc < 0)
+ goto out_unlock;
+ }
+
+ if (nsec) {
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK,
+ nsec & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_STEP_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK,
+ (nsec >> 16) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC);
+ }
+
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_adjfine(struct ptp_clock_info *info,
+ long scaled_ppm)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ u16 rate_lo, rate_hi;
+ bool faster = true;
+ u32 rate;
+ int rc;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ faster = false;
+ }
+
+ rate = MCHP_RDS_PTP_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (MCHP_RDS_PTP_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+
+ rate_lo = rate & GENMASK(15, 0);
+ rate_hi = (rate >> 16) & GENMASK(13, 0);
+
+ if (faster)
+ rate_hi |= MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_HI,
+ MCHP_RDS_PTP_CLOCK, rate_hi);
+ if (rc < 0)
+ goto error;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_RATE_ADJ_LO,
+ MCHP_RDS_PTP_CLOCK, rate_lo);
+ if (rc > 0)
+ rc = 0;
+error:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_gettime64(struct ptp_clock_info *info,
+ struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ time64_t secs;
+ int rc = 0;
+ s64 nsecs;
+
+ mutex_lock(&clock->ptp_lock);
+ /* Set read bit to 1 to save current values of 1588 local time counter
+ * into PTP LTC seconds and nanoseconds registers.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_READ);
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Get LTC clock values */
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_MID,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+ secs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_SEC_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ secs |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_HI,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs = (rc & GENMASK(13, 0));
+ nsecs <<= 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_LTC_READ_NS_LO,
+ MCHP_RDS_PTP_CLOCK);
+ if (rc < 0)
+ goto out_unlock;
+ nsecs |= rc;
+
+ set_normalized_timespec64(ts, secs, nsecs);
+
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static int mchp_rds_ptp_ltc_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct mchp_rds_ptp_clock *clock = container_of(info,
+ struct mchp_rds_ptp_clock,
+ caps);
+ int rc;
+
+ mutex_lock(&clock->ptp_lock);
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_MID,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_sec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_SEC_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_32_bits(ts->tv_sec) & GENMASK(15, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_LO,
+ MCHP_RDS_PTP_CLOCK,
+ lower_16_bits(ts->tv_nsec));
+ if (rc < 0)
+ goto out_unlock;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LTC_NS_HI,
+ MCHP_RDS_PTP_CLOCK,
+ upper_16_bits(ts->tv_nsec) & GENMASK(13, 0));
+ if (rc < 0)
+ goto out_unlock;
+
+ /* Set load bit to 1 to write PTP LTC seconds and nanoseconds
+ * registers to 1588 local time counter.
+ */
+ rc = mchp_rds_phy_set_bits_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD);
+ if (rc > 0)
+ rc = 0;
+out_unlock:
+ mutex_unlock(&clock->ptp_lock);
+
+ return rc;
+}
+
+static bool mchp_rds_ptp_get_sig_tx(struct sk_buff *skb, u16 *sig)
+{
+ struct ptp_header *ptp_header;
+ int type;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return false;
+
+ ptp_header = ptp_parse_header(skb, type);
+ if (!ptp_header)
+ return false;
+
+ *sig = (__force u16)(ntohs(ptp_header->sequence_id));
+
+ return true;
+}
+
+static void mchp_rds_ptp_match_tx_skb(struct mchp_rds_ptp_clock *clock,
+ u32 seconds, u32 nsec, u16 seq_id)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb, *skb_tmp;
+ unsigned long flags;
+ bool rc = false;
+ u16 skb_sig;
+
+ spin_lock_irqsave(&clock->tx_queue.lock, flags);
+ skb_queue_walk_safe(&clock->tx_queue, skb, skb_tmp) {
+ if (!mchp_rds_ptp_get_sig_tx(skb, &skb_sig))
+ continue;
+
+ if (skb_sig != seq_id)
+ continue;
+
+ __skb_unlink(skb, &clock->tx_queue);
+ rc = true;
+ break;
+ }
+ spin_unlock_irqrestore(&clock->tx_queue.lock, flags);
+
+ if (rc) {
+ shhwtstamps.hwtstamp = ktime_set(seconds, nsec);
+ skb_complete_tx_timestamp(skb, &shhwtstamps);
+ }
+}
+
+static struct mchp_rds_ptp_rx_ts
+ *mchp_rds_ptp_get_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ struct phy_device *phydev = clock->phydev;
+ struct mchp_rds_ptp_rx_ts *rx_ts = NULL;
+ u32 sec, nsec;
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ if (!(rc & MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID)) {
+ phydev_err(phydev, "RX Timestamp is not valid!\n");
+ goto error;
+ }
+ nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ nsec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_INGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+ sec |= rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_RX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ goto error;
+
+ rx_ts = kmalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return NULL;
+
+ rx_ts->seconds = sec;
+ rx_ts->nsec = nsec;
+ rx_ts->seq_id = rc;
+
+error:
+ return rx_ts;
+}
+
+static void mchp_rds_ptp_process_rx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ struct mchp_rds_ptp_rx_ts *rx_ts;
+
+ rx_ts = mchp_rds_ptp_get_rx_ts(clock);
+ if (rx_ts)
+ mchp_rds_ptp_match_rx_ts(clock, rx_ts);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_RX_TS_CNT(caps) > 0);
+}
+
+static bool mchp_rds_ptp_get_tx_ts(struct mchp_rds_ptp_clock *clock,
+ u32 *sec, u32 *nsec, u16 *seq)
+{
+ int rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ if (!(rc & MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID))
+ return false;
+ *nsec = (rc & GENMASK(13, 0)) << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_NS_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *nsec = *nsec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_HI,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = rc << 16;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_EGRESS_SEC_LO,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+ *sec = *sec | rc;
+
+ rc = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_TX_MSG_HDR2,
+ MCHP_RDS_PTP_PORT);
+ if (rc < 0)
+ return false;
+
+ *seq = rc;
+
+ return true;
+}
+
+static void mchp_rds_ptp_process_tx_ts(struct mchp_rds_ptp_clock *clock)
+{
+ int caps;
+
+ do {
+ u32 sec, nsec;
+ u16 seq;
+
+ if (mchp_rds_ptp_get_tx_ts(clock, &sec, &nsec, &seq))
+ mchp_rds_ptp_match_tx_skb(clock, sec, nsec, seq);
+
+ caps = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_CAP_INFO,
+ MCHP_RDS_PTP_PORT);
+ if (caps < 0)
+ return;
+ } while (MCHP_RDS_PTP_TX_TS_CNT(caps) > 0);
+}
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool clear)
+{
+ if (clear)
+ return phy_clear_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+ else
+ return phy_set_bits_mmd(clock->phydev, PTP_MMD(clock), reg,
+ val);
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_top_config_intr);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock)
+{
+ int irq_sts;
+
+ /* To handle rogue interrupt scenarios */
+ if (!clock)
+ return IRQ_NONE;
+
+ do {
+ irq_sts = mchp_rds_phy_read_mmd(clock, MCHP_RDS_PTP_INT_STS,
+ MCHP_RDS_PTP_PORT);
+ if (irq_sts < 0)
+ return IRQ_NONE;
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_EN)
+ mchp_rds_ptp_process_rx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_EN)
+ mchp_rds_ptp_process_tx_ts(clock);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_EGRESS_FIFO);
+
+ if (irq_sts & MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN)
+ mchp_rds_ptp_flush_fifo(clock,
+ MCHP_RDS_PTP_INGRESS_FIFO);
+ } while (irq_sts & (MCHP_RDS_PTP_INT_RX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_EN |
+ MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN |
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN));
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_handle_interrupt);
+
+static int mchp_rds_ptp_init(struct mchp_rds_ptp_clock *clock)
+{
+ int rc;
+
+ /* Disable PTP */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_DIS);
+ if (rc < 0)
+ return rc;
+
+ /* Disable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ /* Clear PTP interrupt status registers */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_HARD_RESET,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_HARDRESET);
+ if (rc < 0)
+ return rc;
+
+ /* Predictor enable */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_LATENCY_CORRECTION_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_LATENCY_SETTING);
+ if (rc < 0)
+ return rc;
+
+ /* Configure PTP operational mode */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_OP_MODE,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_OP_MODE_STANDALONE);
+ if (rc < 0)
+ return rc;
+
+ /* Reference clock configuration */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_REF_CLK_CFG,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_REF_CLK_CFG_SET);
+ if (rc < 0)
+ return rc;
+
+ /* Classifier configurations */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_CONFIG,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN,
+ MCHP_RDS_PTP_PORT, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_RX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TX_VERSION,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_MAX_VERSION(0xff) |
+ MCHP_RDS_PTP_MIN_VERSION(0x0));
+ if (rc < 0)
+ return rc;
+
+ /* Enable TSU */
+ rc = mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_TSU_GEN_CONFIG,
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Enable PTP */
+ return mchp_rds_phy_write_mmd(clock, MCHP_RDS_PTP_CMD_CTL,
+ MCHP_RDS_PTP_CLOCK,
+ MCHP_RDS_PTP_CMD_CTL_EN);
+}
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base_addr,
+ u16 port_base_addr)
+{
+ struct mchp_rds_ptp_clock *clock;
+ int rc;
+
+ clock = devm_kzalloc(&phydev->mdio.dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->port_base_addr = port_base_addr;
+ clock->clk_base_addr = clk_base_addr;
+ clock->mmd = mmd;
+
+ mutex_init(&clock->ptp_lock);
+ /* Register PTP clock */
+ clock->caps.owner = THIS_MODULE;
+ snprintf(clock->caps.name, 30, "%s", phydev->drv->name);
+ clock->caps.max_adj = MCHP_RDS_PTP_MAX_ADJ;
+ clock->caps.n_ext_ts = 0;
+ clock->caps.pps = 0;
+ clock->caps.adjfine = mchp_rds_ptp_ltc_adjfine;
+ clock->caps.adjtime = mchp_rds_ptp_ltc_adjtime;
+ clock->caps.gettime64 = mchp_rds_ptp_ltc_gettime64;
+ clock->caps.settime64 = mchp_rds_ptp_ltc_settime64;
+ clock->ptp_clock = ptp_clock_register(&clock->caps,
+ &phydev->mdio.dev);
+ if (IS_ERR(clock->ptp_clock))
+ return ERR_PTR(-EINVAL);
+
+ /* Check if PHC support is missing at the configuration level */
+ if (!clock->ptp_clock)
+ return NULL;
+
+ /* Initialize the SW */
+ skb_queue_head_init(&clock->tx_queue);
+ skb_queue_head_init(&clock->rx_queue);
+ INIT_LIST_HEAD(&clock->rx_ts_list);
+ spin_lock_init(&clock->rx_ts_lock);
+
+ clock->mii_ts.rxtstamp = mchp_rds_ptp_rxtstamp;
+ clock->mii_ts.txtstamp = mchp_rds_ptp_txtstamp;
+ clock->mii_ts.hwtstamp = mchp_rds_ptp_hwtstamp;
+ clock->mii_ts.ts_info = mchp_rds_ptp_ts_info;
+
+ phydev->mii_ts = &clock->mii_ts;
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
+ clock->phydev = phydev;
+
+ rc = mchp_rds_ptp_init(clock);
+ if (rc < 0)
+ return ERR_PTR(rc);
+
+ return clock;
+}
+EXPORT_SYMBOL_GPL(mchp_rds_ptp_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MICROCHIP PHY RDS PTP driver");
+MODULE_AUTHOR("Divya Koppera");
diff --git a/drivers/net/phy/microchip_rds_ptp.h b/drivers/net/phy/microchip_rds_ptp.h
new file mode 100644
index 000000000000..e95c065728b5
--- /dev/null
+++ b/drivers/net/phy/microchip_rds_ptp.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2024 Microchip Technology
+ */
+
+#ifndef _MICROCHIP_RDS_PTP_H
+#define _MICROCHIP_RDS_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_clock.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#define MCHP_RDS_PTP_CMD_CTL 0x0
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_NSEC BIT(6)
+#define MCHP_RDS_PTP_CMD_CTL_LTC_STEP_SEC BIT(5)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_LOAD BIT(4)
+#define MCHP_RDS_PTP_CMD_CTL_CLOCK_READ BIT(3)
+#define MCHP_RDS_PTP_CMD_CTL_EN BIT(1)
+#define MCHP_RDS_PTP_CMD_CTL_DIS BIT(0)
+
+#define MCHP_RDS_PTP_REF_CLK_CFG 0x2
+#define MCHP_RDS_PTP_REF_CLK_SRC_250MHZ 0x0
+#define MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE BIT(9)
+#define MCHP_RDS_PTP_REF_CLK_PERIOD 4
+#define MCHP_RDS_PTP_REF_CLK_CFG_SET (MCHP_RDS_PTP_REF_CLK_SRC_250MHZ |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD_OVERRIDE |\
+ MCHP_RDS_PTP_REF_CLK_PERIOD)
+
+#define MCHP_RDS_PTP_LTC_SEC_HI 0x5
+#define MCHP_RDS_PTP_LTC_SEC_MID 0x6
+#define MCHP_RDS_PTP_LTC_SEC_LO 0x7
+#define MCHP_RDS_PTP_LTC_NS_HI 0x8
+#define MCHP_RDS_PTP_LTC_NS_LO 0x9
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI 0xc
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_LTC_RATE_ADJ_LO 0xd
+#define MCHP_RDS_PTP_STEP_ADJ_HI 0x12
+#define MCHP_RDS_PTP_STEP_ADJ_HI_DIR BIT(15)
+#define MCHP_RDS_PTP_STEP_ADJ_LO 0x13
+#define MCHP_RDS_PTP_LTC_READ_SEC_HI 0x29
+#define MCHP_RDS_PTP_LTC_READ_SEC_MID 0x2a
+#define MCHP_RDS_PTP_LTC_READ_SEC_LO 0x2b
+#define MCHP_RDS_PTP_LTC_READ_NS_HI 0x2c
+#define MCHP_RDS_PTP_LTC_READ_NS_LO 0x2d
+#define MCHP_RDS_PTP_OP_MODE 0x41
+#define MCHP_RDS_PTP_OP_MODE_DIS 0
+#define MCHP_RDS_PTP_OP_MODE_STANDALONE 1
+#define MCHP_RDS_PTP_LATENCY_CORRECTION_CTL 0x44
+#define MCHP_RDS_PTP_PREDICTOR_EN BIT(6)
+#define MCHP_RDS_PTP_TX_PRED_DIS BIT(1)
+#define MCHP_RDS_PTP_RX_PRED_DIS BIT(0)
+#define MCHP_RDS_PTP_LATENCY_SETTING (MCHP_RDS_PTP_PREDICTOR_EN | \
+ MCHP_RDS_PTP_TX_PRED_DIS | \
+ MCHP_RDS_PTP_RX_PRED_DIS)
+
+#define MCHP_RDS_PTP_INT_EN 0x0
+#define MCHP_RDS_PTP_INT_STS 0x01
+#define MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN BIT(3)
+#define MCHP_RDS_PTP_INT_TX_TS_EN BIT(2)
+#define MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN BIT(1)
+#define MCHP_RDS_PTP_INT_RX_TS_EN BIT(0)
+#define MCHP_RDS_PTP_INT_ALL_MSK (MCHP_RDS_PTP_INT_TX_TS_OVRFL_EN | \
+ MCHP_RDS_PTP_INT_TX_TS_EN | \
+ MCHP_RDS_PTP_INT_RX_TS_OVRFL_EN |\
+ MCHP_RDS_PTP_INT_RX_TS_EN)
+
+#define MCHP_RDS_PTP_CAP_INFO 0x2e
+#define MCHP_RDS_PTP_TX_TS_CNT(v) (((v) & GENMASK(11, 8)) >> 8)
+#define MCHP_RDS_PTP_RX_TS_CNT(v) ((v) & GENMASK(3, 0))
+
+#define MCHP_RDS_PTP_RX_PARSE_CONFIG 0x42
+#define MCHP_RDS_PTP_RX_PARSE_L2_ADDR_EN 0x44
+#define MCHP_RDS_PTP_RX_PARSE_IPV4_ADDR_EN 0x45
+
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG 0x4e
+#define MCHP_RDS_PTP_RX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_RX_VERSION 0x48
+#define MCHP_RDS_PTP_RX_TIMESTAMP_EN 0x4d
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI 0x54
+#define MCHP_RDS_PTP_RX_INGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_RX_INGRESS_NS_LO 0x55
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_HI 0x56
+#define MCHP_RDS_PTP_RX_INGRESS_SEC_LO 0x57
+#define MCHP_RDS_PTP_RX_MSG_HDR2 0x59
+
+#define MCHP_RDS_PTP_TX_PARSE_CONFIG 0x82
+#define MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN BIT(0)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN BIT(1)
+#define MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN BIT(2)
+
+#define MCHP_RDS_PTP_TX_PARSE_L2_ADDR_EN 0x84
+#define MCHP_RDS_PTP_TX_PARSE_IPV4_ADDR_EN 0x85
+
+#define MCHP_RDS_PTP_TX_VERSION 0x88
+#define MCHP_RDS_PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
+#define MCHP_RDS_PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_EN 0x8d
+#define MCHP_RDS_PTP_TIMESTAMP_EN_SYNC BIT(0)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_DREQ BIT(1)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ BIT(2)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_PDRES BIT(3)
+#define MCHP_RDS_PTP_TIMESTAMP_EN_ALL (MCHP_RDS_PTP_TIMESTAMP_EN_SYNC |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_DREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDREQ |\
+ MCHP_RDS_PTP_TIMESTAMP_EN_PDRES)
+
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG 0x8e
+#define MCHP_RDS_PTP_TX_TIMESTAMP_CONFIG_PTP_FCS_DIS BIT(0)
+
+#define MCHP_RDS_PTP_TX_MOD 0x8f
+#define MCHP_RDS_TX_MOD_PTP_SYNC_TS_INSERT BIT(12)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI 0x94
+#define MCHP_RDS_PTP_TX_EGRESS_NS_HI_TS_VALID BIT(15)
+
+#define MCHP_RDS_PTP_TX_EGRESS_NS_LO 0x95
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_HI 0x96
+#define MCHP_RDS_PTP_TX_EGRESS_SEC_LO 0x97
+#define MCHP_RDS_PTP_TX_MSG_HDR2 0x99
+
+#define MCHP_RDS_PTP_TSU_GEN_CONFIG 0xc0
+#define MCHP_RDS_PTP_TSU_GEN_CFG_TSU_EN BIT(0)
+
+#define MCHP_RDS_PTP_TSU_HARD_RESET 0xc1
+#define MCHP_RDS_PTP_TSU_HARDRESET BIT(0)
+
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 4 clock cycles in 250MHz.
+ * The value is calculated as following: (1/1000000)/((2^-32)/4)
+ */
+#define MCHP_RDS_PTP_1PPM_FORMAT 17179
+#define MCHP_RDS_PTP_FIFO_SIZE 8
+#define MCHP_RDS_PTP_MAX_ADJ 31249999
+
+#define BASE_CLK(p) ((p)->clk_base_addr)
+#define BASE_PORT(p) ((p)->port_base_addr)
+#define PTP_MMD(p) ((p)->mmd)
+
+enum mchp_rds_ptp_base {
+ MCHP_RDS_PTP_PORT,
+ MCHP_RDS_PTP_CLOCK
+};
+
+enum mchp_rds_ptp_fifo_dir {
+ MCHP_RDS_PTP_INGRESS_FIFO,
+ MCHP_RDS_PTP_EGRESS_FIFO
+};
+
+struct mchp_rds_ptp_clock {
+ struct mii_timestamper mii_ts;
+ struct phy_device *phydev;
+ struct ptp_clock *ptp_clock;
+
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ struct list_head rx_ts_list;
+
+ struct ptp_clock_info caps;
+
+ /* Lock for Rx ts fifo */
+ spinlock_t rx_ts_lock;
+ int hwts_tx_type;
+
+ enum hwtstamp_rx_filters rx_filter;
+ int layer;
+ int version;
+ u16 port_base_addr;
+ u16 clk_base_addr;
+
+ /* Lock for phc */
+ struct mutex ptp_lock;
+ u8 mmd;
+};
+
+struct mchp_rds_ptp_rx_ts {
+ struct list_head list;
+ u32 seconds;
+ u32 nsec;
+ u16 seq_id;
+};
+
+#if IS_ENABLED(CONFIG_MICROCHIP_PHY_RDS_PTP)
+
+struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device *phydev, u8 mmd,
+ u16 clk_base, u16 port_base);
+
+int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable);
+
+irqreturn_t mchp_rds_ptp_handle_interrupt(struct mchp_rds_ptp_clock *clock);
+
+#else
+
+static inline struct mchp_rds_ptp_clock *mchp_rds_ptp_probe(struct phy_device
+ *phydev, u8 mmd,
+ u16 clk_base,
+ u16 port_base)
+{
+ return NULL;
+}
+
+static inline int mchp_rds_ptp_top_config_intr(struct mchp_rds_ptp_clock *clock,
+ u16 reg, u16 val, bool enable)
+{
+ return 0;
+}
+
+static inline irqreturn_t mchp_rds_ptp_handle_interrupt(struct
+ mchp_rds_ptp_clock
+ * clock)
+{
+ return IRQ_NONE;
+}
+
+#endif //CONFIG_MICROCHIP_PHY_RDS_PTP
+
+#endif //_MICROCHIP_RDS_PTP_H
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index b17bf6708003..76e5b01832f3 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -10,11 +10,15 @@
#include <linux/ethtool.h>
#include <linux/ethtool_netlink.h>
#include <linux/bitfield.h>
+#include "microchip_rds_ptp.h"
#define PHY_ID_LAN87XX 0x0007c150
#define PHY_ID_LAN937X 0x0007c180
#define PHY_ID_LAN887X 0x0007c1f0
+#define MCHP_RDS_PTP_LTC_BASE_ADDR 0xe000
+#define MCHP_RDS_PTP_PORT_BASE_ADDR (MCHP_RDS_PTP_LTC_BASE_ADDR + 0x800)
+
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
#define LAN87XX_EXT_REG_CTL_RD_CTL (0x1000)
@@ -229,6 +233,7 @@
#define LAN887X_INT_STS 0xf000
#define LAN887X_INT_MSK 0xf001
+#define LAN887X_INT_MSK_P1588_MOD_INT_MSK BIT(3)
#define LAN887X_INT_MSK_T1_PHY_INT_MSK BIT(2)
#define LAN887X_INT_MSK_LINK_UP_MSK BIT(1)
#define LAN887X_INT_MSK_LINK_DOWN_MSK BIT(0)
@@ -319,6 +324,8 @@ struct lan887x_regwr_map {
struct lan887x_priv {
u64 stats[ARRAY_SIZE(lan887x_hw_stats)];
+ struct mchp_rds_ptp_clock *clock;
+ bool init_done;
};
static int lan937x_dsp_workaround(struct phy_device *phydev, u16 ereg, u8 bank)
@@ -1269,8 +1276,19 @@ static int lan887x_get_features(struct phy_device *phydev)
static int lan887x_phy_init(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int ret;
+ if (!priv->init_done && phy_interrupt_is_valid(phydev)) {
+ priv->clock = mchp_rds_ptp_probe(phydev, MDIO_MMD_VEND1,
+ MCHP_RDS_PTP_LTC_BASE_ADDR,
+ MCHP_RDS_PTP_PORT_BASE_ADDR);
+ if (IS_ERR(priv->clock))
+ return PTR_ERR(priv->clock);
+
+ priv->init_done = true;
+ }
+
/* Clear loopback */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
LAN887X_MIS_CFG_REG2,
@@ -1470,6 +1488,7 @@ static int lan887x_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
+ priv->init_done = false;
phydev->priv = priv;
return lan887x_phy_setup(phydev);
@@ -1518,6 +1537,7 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
static int lan887x_config_intr(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
@@ -1537,12 +1557,24 @@ static int lan887x_config_intr(struct phy_device *phydev)
rc = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
}
+ if (rc < 0)
+ return rc;
- return rc < 0 ? rc : 0;
+ if (phy_is_default_hwtstamp(phydev)) {
+ return mchp_rds_ptp_top_config_intr(priv->clock,
+ LAN887X_INT_MSK,
+ LAN887X_INT_MSK_P1588_MOD_INT_MSK,
+ (phydev->interrupts ==
+ PHY_INTERRUPT_ENABLED));
+ }
+
+ return 0;
}
static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
{
+ struct lan887x_priv *priv = phydev->priv;
+ int rc = IRQ_NONE;
int irq_status;
irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND1, LAN887X_INT_STS);
@@ -1553,10 +1585,13 @@ static irqreturn_t lan887x_handle_interrupt(struct phy_device *phydev)
if (irq_status & LAN887X_MX_CHIP_TOP_LINK_MSK) {
phy_trigger_machine(phydev);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
}
- return IRQ_NONE;
+ if (irq_status & LAN887X_INT_MSK_P1588_MOD_INT_MSK)
+ rc = mchp_rds_ptp_handle_interrupt(priv->clock);
+
+ return rc;
}
static int lan887x_cd_reset(struct phy_device *phydev,
@@ -2119,7 +2154,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
module_phy_driver(microchip_t1_phy_driver);
-static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
+static const struct mdio_device_id __maybe_unused microchip_t1_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX) },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X) },
{ PHY_ID_MATCH_MODEL(PHY_ID_LAN887X) },
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 75d291154b4c..e50a0c102a86 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -497,7 +497,7 @@ static struct phy_driver microchip_t1s_driver[] = {
module_phy_driver(microchip_t1s_driver);
-static struct mdio_device_id __maybe_unused tbl[] = {
+static const struct mdio_device_id __maybe_unused tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC1) },
{ PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVC2) },
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index bee381200ab8..19cf12ee8990 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2699,7 +2699,7 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
-static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_MATCH_VENDOR(PHY_VENDOR_MSCC) },
{ }
};
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index a8ccf257c109..94d9cb727121 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -1274,7 +1274,7 @@ static struct phy_driver gpy_drivers[] = {
};
module_phy_driver(gpy_drivers);
-static struct mdio_device_id __maybe_unused gpy_tbl[] = {
+static const struct mdio_device_id __maybe_unused gpy_tbl[] = {
{PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx)},
{PHY_ID_GPY115B, PHY_ID_GPYx15B_MASK},
{PHY_ID_MATCH_MODEL(PHY_ID_GPY115C)},
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 9ae9cc6b23c2..7f3ff322892e 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -173,7 +173,7 @@ MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused ns_tbl[] = {
+static const struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c
index 5680584f659e..cabdd83c614f 100644
--- a/drivers/net/phy/ncn26000.c
+++ b/drivers/net/phy/ncn26000.c
@@ -159,7 +159,7 @@ static struct phy_driver ncn26000_driver[] = {
module_phy_driver(ncn26000_driver);
-static struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
+static const struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) },
{ }
};
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index ade544bc007d..323717a4821f 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -2008,7 +2008,7 @@ static struct phy_driver nxp_c45_driver[] = {
module_phy_driver(nxp_c45_driver);
-static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
+static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
{ /*sentinel*/ },
diff --git a/drivers/net/phy/nxp-cbtx.c b/drivers/net/phy/nxp-cbtx.c
index 3d25491043a3..3286fcb4f47e 100644
--- a/drivers/net/phy/nxp-cbtx.c
+++ b/drivers/net/phy/nxp-cbtx.c
@@ -215,7 +215,7 @@ static struct phy_driver cbtx_driver[] = {
module_phy_driver(cbtx_driver);
-static struct mdio_device_id __maybe_unused cbtx_tbl[] = {
+static const struct mdio_device_id __maybe_unused cbtx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_CBTX_SJA1110) },
{ },
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 2c263ae44b4f..ed7fa26bac8e 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -888,7 +888,7 @@ static struct phy_driver tja11xx_driver[] = {
module_phy_driver(tja11xx_driver);
-static struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 944ae98ad110..0dac08e85304 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -1469,18 +1469,17 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
* @phydev: target phy_device struct
* @adv: variable to store advertised linkmodes
* @lp: variable to store LP advertised linkmodes
- * @is_enabled: variable to store EEE enabled/disabled configuration value
*
* Description: this function will read local and link partner PHY
* advertisements. Compare them return current EEE state.
*/
int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp, bool *is_enabled)
+ unsigned long *lp)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- bool eee_enabled, eee_active;
+ bool eee_active;
int ret;
ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
@@ -1491,9 +1490,8 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
if (ret)
return ret;
- eee_enabled = !linkmode_empty(tmp_adv);
linkmode_and(common, tmp_adv, tmp_lp);
- if (eee_enabled && !linkmode_empty(common))
+ if (!linkmode_empty(tmp_adv) && !linkmode_empty(common))
eee_active = phy_check_valid(phydev->speed, phydev->duplex,
common);
else
@@ -1503,8 +1501,6 @@ int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
linkmode_copy(adv, tmp_adv);
if (lp)
linkmode_copy(lp, tmp_lp);
- if (is_enabled)
- *is_enabled = eee_enabled;
return eee_active;
}
@@ -1521,15 +1517,13 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data)
{
- bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, data->advertised,
- data->lp_advertised, &is_enabled);
+ data->lp_advertised);
if (ret < 0)
return ret;
- data->eee_enabled = is_enabled;
data->eee_active = phydev->eee_active;
linkmode_copy(data->supported, phydev->supported_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 0d20b534122b..c008fe050245 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -616,6 +616,49 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
EXPORT_SYMBOL(phy_ethtool_get_stats);
/**
+ * __phy_ethtool_get_phy_stats - Retrieve standardized PHY statistics
+ * @phydev: Pointer to the PHY device
+ * @phy_stats: Pointer to ethtool_eth_phy_stats structure
+ * @phydev_stats: Pointer to ethtool_phy_stats structure
+ *
+ * Fetches PHY statistics using a kernel-defined interface for consistent
+ * diagnostics. Unlike phy_ethtool_get_stats(), which allows custom stats,
+ * this function enforces a standardized format for better interoperability.
+ */
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ if (!phydev->drv || !phydev->drv->get_phy_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_phy_stats(phydev, phy_stats, phydev_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
+ * __phy_ethtool_get_link_ext_stats - Retrieve extended link statistics for a PHY
+ * @phydev: Pointer to the PHY device
+ * @link_stats: Pointer to the structure to store extended link statistics
+ *
+ * Populates the ethtool_link_ext_stats structure with link down event counts
+ * and additional driver-specific link statistics, if available.
+ */
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ link_stats->link_down_events = READ_ONCE(phydev->link_down_events);
+
+ if (!phydev->drv || !phydev->drv->get_link_stats)
+ return;
+
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_link_stats(phydev, link_stats);
+ mutex_unlock(&phydev->lock);
+}
+
+/**
* phy_ethtool_get_plca_cfg - Get PLCA RS configuration
* @phydev: the phy_device struct
* @plca_cfg: where to store the retrieved configuration
@@ -988,8 +1031,7 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
- err = genphy_c45_eee_is_active(phydev,
- NULL, NULL, NULL);
+ err = genphy_c45_eee_is_active(phydev, NULL, NULL);
phydev->eee_active = err > 0;
phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled &&
phydev->eee_active;
@@ -1006,6 +1048,59 @@ static int phy_check_link_status(struct phy_device *phydev)
}
/**
+ * phy_inband_caps - query which in-band signalling modes are supported
+ * @phydev: a pointer to a &struct phy_device
+ * @interface: the interface mode for the PHY
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * by the PHY for this interface mode.
+ */
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (phydev->drv && phydev->drv->inband_caps)
+ return phydev->drv->inband_caps(phydev, interface);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_inband_caps);
+
+/**
+ * phy_config_inband - configure the desired PHY in-band mode
+ * @phydev: the phy_device struct
+ * @modes: in-band modes to configure
+ *
+ * Description: disables, enables or enables-with-bypass in-band signalling
+ * between the PHY and host system.
+ *
+ * Returns: zero on success, or negative errno value.
+ */
+int phy_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int err;
+
+ if (!!(modes & LINK_INBAND_DISABLE) +
+ !!(modes & LINK_INBAND_ENABLE) +
+ !!(modes & LINK_INBAND_BYPASS) != 1)
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+ if (!phydev->drv)
+ err = -EIO;
+ else if (!phydev->drv->config_inband)
+ err = -EOPNOTSUPP;
+ else
+ err = phydev->drv->config_inband(phydev, modes);
+ mutex_unlock(&phydev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phy_config_inband);
+
+/**
* _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -1347,6 +1442,23 @@ static int phy_enable_interrupts(struct phy_device *phydev)
}
/**
+ * phy_update_stats - Update PHY device statistics if supported.
+ * @phydev: Pointer to the PHY device structure.
+ *
+ * If the PHY driver provides an update_stats callback, this function
+ * invokes it to update the PHY statistics. If not, it returns 0.
+ *
+ * Return: 0 on success, or a negative error code if the callback fails.
+ */
+static int phy_update_stats(struct phy_device *phydev)
+{
+ if (!phydev->drv->update_stats)
+ return 0;
+
+ return phydev->drv->update_stats(phydev);
+}
+
+/**
* phy_request_interrupt - request and enable interrupt for a PHY device
* @phydev: target phy_device struct
*
@@ -1415,6 +1527,9 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
case PHY_RUNNING:
err = phy_check_link_status(phydev);
func = &phy_check_link_status;
+
+ if (!err)
+ err = phy_update_stats(phydev);
break;
case PHY_CABLETEST:
err = phydev->drv->cable_test_get_status(phydev, &finished);
@@ -1589,6 +1704,27 @@ void phy_mac_interrupt(struct phy_device *phydev)
EXPORT_SYMBOL(phy_mac_interrupt);
/**
+ * phy_eee_rx_clock_stop() - configure PHY receive clock in LPI
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: flag to indicate whether the clock can be stopped
+ *
+ * Configure whether the PHY can disable its receive clock during LPI mode,
+ * See IEEE 802.3 sections 22.2.2.2, 35.2.2.10, and 45.2.3.1.4.
+ *
+ * Returns: 0 or negative error.
+ */
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable)
+{
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN,
+ clk_stop_enable ? MDIO_PCS_CTRL1_CLKSTOP_EN : 0);
+}
+EXPORT_SYMBOL_GPL(phy_eee_rx_clock_stop);
+
+/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
* @clk_stop_enable: PHY may stop the clock during LPI
@@ -1605,18 +1741,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phydev->drv)
return -EIO;
- ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL);
+ ret = genphy_c45_eee_is_active(phydev, NULL, NULL);
if (ret < 0)
return ret;
if (!ret)
return -EPROTONOSUPPORT;
if (clk_stop_enable)
- /* Configure the PHY to stop receiving xMII
- * clock while it is signaling LPI.
- */
- ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
- MDIO_PCS_CTRL1_CLKSTOP_EN);
+ ret = phy_eee_rx_clock_stop(phydev, true);
return ret < 0 ? ret : 0;
}
@@ -1649,8 +1781,8 @@ EXPORT_SYMBOL(phy_get_eee_err);
* @phydev: target phy_device struct
* @data: ethtool_keee data
*
- * Description: reports the Supported/Advertisement/LP Advertisement
- * capabilities, etc.
+ * Description: get the current EEE settings, filling in all members of
+ * @data.
*/
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b26bb33cd1d4..5b34d39d1d52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -32,6 +32,7 @@
#include <linux/phy_link_topology.h>
#include <linux/pse-pd/pse.h>
#include <linux/property.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/rtnetlink.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -1998,6 +1999,15 @@ void phy_detach(struct phy_device *phydev)
phy_suspend(phydev);
if (dev) {
+ struct hwtstamp_provider *hwprov;
+
+ hwprov = rtnl_dereference(dev->hwprov);
+ /* Disable timestamp if it is the one selected */
+ if (hwprov && hwprov->phydev == phydev) {
+ rcu_assign_pointer(dev->hwprov, NULL);
+ kfree_rcu(hwprov, rcu_head);
+ }
+
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_link_topo_del_phy(dev, phydev);
@@ -2994,6 +3004,23 @@ void phy_support_eee(struct phy_device *phydev)
EXPORT_SYMBOL(phy_support_eee);
/**
+ * phy_disable_eee - Disable EEE for the PHY
+ * @phydev: Target phy_device struct
+ *
+ * This function is used by MAC drivers for MAC's which don't support EEE.
+ * It disables EEE on the PHY layer.
+ */
+void phy_disable_eee(struct phy_device *phydev)
+{
+ linkmode_zero(phydev->advertising_eee);
+ phydev->eee_cfg.tx_lpi_enabled = false;
+ phydev->eee_cfg.eee_enabled = false;
+ /* don't let userspace re-enable EEE advertisement */
+ linkmode_fill(phydev->eee_broken_modes);
+}
+EXPORT_SYMBOL_GPL(phy_disable_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -3773,6 +3800,8 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
static const struct phylib_stubs __phylib_stubs = {
.hwtstamp_get = __phy_hwtstamp_get,
.hwtstamp_set = __phy_hwtstamp_set,
+ .get_phy_stats = __phy_ethtool_get_phy_stats,
+ .get_link_ext_stats = __phy_ethtool_get_link_ext_stats,
};
static void phylib_register_stubs(void)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 30a654e98352..ff0efb52189f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -56,7 +56,8 @@ struct phylink {
struct phy_device *phydev;
phy_interface_t link_interface; /* PHY_INTERFACE_xxx */
u8 cfg_link_an_mode; /* MLO_AN_xxx */
- u8 cur_link_an_mode;
+ u8 req_link_an_mode; /* Requested MLO_AN_xxx mode */
+ u8 act_link_an_mode; /* Active MLO_AN_xxx mode */
u8 link_port; /* The current non-phy ethtool port */
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
@@ -74,6 +75,7 @@ struct phylink {
struct mutex state_mutex;
struct phylink_link_state phy_state;
+ unsigned int phy_ib_mode;
struct work_struct resolve;
unsigned int pcs_neg_mode;
unsigned int pcs_state;
@@ -174,6 +176,24 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static const char *phylink_pcs_mode_str(unsigned int mode)
+{
+ if (!mode)
+ return "none";
+
+ if (mode & PHYLINK_PCS_NEG_OUTBAND)
+ return "outband";
+
+ if (mode & PHYLINK_PCS_NEG_INBAND) {
+ if (mode & PHYLINK_PCS_NEG_ENABLED)
+ return "inband,an-enabled";
+ else
+ return "inband,an-disabled";
+ }
+
+ return "unknown";
+}
+
static unsigned int phylink_interface_signal_rate(phy_interface_t interface)
{
switch (interface) {
@@ -671,6 +691,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return -EINVAL;
}
+ /* Ensure that this PCS supports the interface which the MAC
+ * returned it for. It is an error for the MAC to return a PCS
+ * that does not support the interface mode.
+ */
+ if (!phy_interface_empty(pcs->supported_interfaces) &&
+ !test_bit(state->interface, pcs->supported_interfaces)) {
+ phylink_err(pl, "MAC returned PCS which does not support %s\n",
+ phy_modes(state->interface));
+ return -EINVAL;
+ }
+
/* Validate the link parameters with the PCS */
if (pcs->ops->pcs_validate) {
ret = pcs->ops->pcs_validate(pcs, supported, state);
@@ -971,6 +1002,15 @@ static void phylink_resolve_an_pause(struct phylink_link_state *state)
}
}
+static unsigned int phylink_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ if (pcs && pcs->ops->pcs_inband_caps)
+ return pcs->ops->pcs_inband_caps(pcs, interface);
+
+ return 0;
+}
+
static void phylink_pcs_pre_config(struct phylink_pcs *pcs,
phy_interface_t interface)
{
@@ -1024,6 +1064,24 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
}
+/* Query inband for a specific interface mode, asking the MAC for the
+ * PCS which will be used to handle the interface mode.
+ */
+static unsigned int phylink_inband_caps(struct phylink *pl,
+ phy_interface_t interface)
+{
+ struct phylink_pcs *pcs;
+
+ if (!pl->mac_ops->mac_select_pcs)
+ return 0;
+
+ pcs = pl->mac_ops->mac_select_pcs(pl->config, interface);
+ if (!pcs)
+ return 0;
+
+ return phylink_pcs_inband_caps(pcs, interface);
+}
+
static void phylink_pcs_poll_stop(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_INBAND)
@@ -1065,13 +1123,13 @@ static void phylink_mac_config(struct phylink *pl,
phylink_dbg(pl,
"%s: mode=%s/%s/%s adv=%*pb pause=%02x\n",
- __func__, phylink_an_mode_str(pl->cur_link_an_mode),
+ __func__, phylink_an_mode_str(pl->act_link_an_mode),
phy_modes(st.interface),
phy_rate_matching_to_str(st.rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising,
st.pause);
- pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st);
+ pl->mac_ops->mac_config(pl->config, pl->act_link_an_mode, &st);
}
static void phylink_pcs_an_restart(struct phylink *pl)
@@ -1079,13 +1137,14 @@ static void phylink_pcs_an_restart(struct phylink *pl)
if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
pl->link_config.advertising) &&
phy_interface_mode_is_8023z(pl->link_config.interface) &&
- phylink_autoneg_inband(pl->cur_link_an_mode))
+ phylink_autoneg_inband(pl->act_link_an_mode))
pl->pcs->ops->pcs_an_restart(pl->pcs);
}
/**
* phylink_pcs_neg_mode() - helper to determine PCS inband mode
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pcs: a pointer to &struct phylink_pcs
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
*
@@ -1102,11 +1161,21 @@ static void phylink_pcs_an_restart(struct phylink *pl)
* Note: this is for cases where the PCS itself is involved in negotiation
* (e.g. Clause 37, SGMII and similar) not Clause 73.
*/
-static unsigned int phylink_pcs_neg_mode(unsigned int mode,
- phy_interface_t interface,
- const unsigned long *advertising)
+static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs,
+ phy_interface_t interface,
+ const unsigned long *advertising)
{
- unsigned int neg_mode;
+ unsigned int pcs_ib_caps = 0;
+ unsigned int phy_ib_caps = 0;
+ unsigned int neg_mode, mode;
+ enum {
+ INBAND_CISCO_SGMII,
+ INBAND_BASEX,
+ } type;
+
+ mode = pl->req_link_an_mode;
+
+ pl->phy_ib_mode = 0;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -1119,10 +1188,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
* inband communication. Note: there exist PHYs that run
* with SGMII but do not send the inband data.
*/
- if (!phylink_autoneg_inband(mode))
- neg_mode = PHYLINK_PCS_NEG_OUTBAND;
- else
- neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ type = INBAND_CISCO_SGMII;
break;
case PHY_INTERFACE_MODE_1000BASEX:
@@ -1133,21 +1199,143 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
* as well, but drivers may not support this, so may
* need to override this.
*/
- if (!phylink_autoneg_inband(mode))
+ type = INBAND_BASEX;
+ break;
+
+ default:
+ pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE;
+ pl->act_link_an_mode = mode;
+ return;
+ }
+
+ if (pcs)
+ pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface);
+
+ if (pl->phydev)
+ phy_ib_caps = phy_inband_caps(pl->phydev, interface);
+
+ phylink_dbg(pl, "interface %s inband modes: pcs=%02x phy=%02x\n",
+ phy_modes(interface), pcs_ib_caps, phy_ib_caps);
+
+ if (!phylink_autoneg_inband(mode)) {
+ bool pcs_ib_only = false;
+ bool phy_ib_only = false;
+
+ if (pcs_ib_caps && pcs_ib_caps != LINK_INBAND_DISABLE) {
+ /* PCS supports reporting in-band capabilities, and
+ * supports more than disable mode.
+ */
+ if (pcs_ib_caps & LINK_INBAND_DISABLE)
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ else if (pcs_ib_caps & LINK_INBAND_ENABLE)
+ pcs_ib_only = true;
+ }
+
+ if (phy_ib_caps && phy_ib_caps != LINK_INBAND_DISABLE) {
+ /* PHY supports in-band capabilities, and supports
+ * more than disable mode.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+ else if (phy_ib_caps & LINK_INBAND_ENABLE)
+ phy_ib_only = true;
+ }
+
+ /* If either the PCS or PHY requires inband to be enabled,
+ * this is an invalid configuration. Provide a diagnostic
+ * message for this case, but don't try to force the issue.
+ */
+ if (pcs_ib_only || phy_ib_only)
+ phylink_warn(pl,
+ "firmware wants %s mode, but %s%s%s requires inband\n",
+ phylink_an_mode_str(mode),
+ pcs_ib_only ? "PCS" : "",
+ pcs_ib_only && phy_ib_only ? " and " : "",
+ phy_ib_only ? "PHY" : "");
+
+ neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ } else if (type == INBAND_CISCO_SGMII || pl->phydev) {
+ /* For SGMII modes which are designed to be used with PHYs, or
+ * Base-X with a PHY, we try to use in-band mode where-ever
+ * possible. However, there are some PHYs e.g. BCM84881 which
+ * do not support in-band.
+ */
+ const unsigned int inband_ok = LINK_INBAND_ENABLE |
+ LINK_INBAND_BYPASS;
+ const unsigned int outband_ok = LINK_INBAND_DISABLE |
+ LINK_INBAND_BYPASS;
+ /* PCS PHY
+ * D E D E
+ * 0 0 0 0 no information inband enabled
+ * 1 0 0 0 pcs doesn't support outband
+ * 0 1 0 0 pcs required inband enabled
+ * 1 1 0 0 pcs optional inband enabled
+ * 0 0 1 0 phy doesn't support outband
+ * 1 0 1 0 pcs+phy doesn't support outband
+ * 0 1 1 0 pcs required, phy doesn't support, invalid
+ * 1 1 1 0 pcs optional, phy doesn't support, outband
+ * 0 0 0 1 phy required inband enabled
+ * 1 0 0 1 pcs doesn't support, phy required, invalid
+ * 0 1 0 1 pcs+phy required inband enabled
+ * 1 1 0 1 pcs optional, phy required inband enabled
+ * 0 0 1 1 phy optional inband enabled
+ * 1 0 1 1 pcs doesn't support, phy optional, outband
+ * 0 1 1 1 pcs required, phy optional inband enabled
+ * 1 1 1 1 pcs+phy optional inband enabled
+ */
+ if ((!pcs_ib_caps || pcs_ib_caps & inband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & inband_ok)) {
+ /* In-band supported or unknown at both ends. Enable
+ * in-band mode with or without bypass at the PHY.
+ */
+ if (phy_ib_caps & LINK_INBAND_ENABLE)
+ pl->phy_ib_mode = LINK_INBAND_ENABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ } else if ((!pcs_ib_caps || pcs_ib_caps & outband_ok) &&
+ (!phy_ib_caps || phy_ib_caps & outband_ok)) {
+ /* Either in-band not supported at at least one end.
+ * In-band bypass at the other end is possible.
+ */
+ if (phy_ib_caps & LINK_INBAND_DISABLE)
+ pl->phy_ib_mode = LINK_INBAND_DISABLE;
+ else if (phy_ib_caps & LINK_INBAND_BYPASS)
+ pl->phy_ib_mode = LINK_INBAND_BYPASS;
+
neg_mode = PHYLINK_PCS_NEG_OUTBAND;
+ if (pl->phydev)
+ mode = MLO_AN_PHY;
+ } else {
+ /* invalid */
+ phylink_warn(pl, "%s: incompatible in-band capabilities, trying in-band",
+ phy_modes(interface));
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
+ }
+ } else {
+ /* For Base-X without a PHY */
+ if (pcs_ib_caps == LINK_INBAND_DISABLE)
+ /* If the PCS doesn't support inband, then inband must
+ * be disabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
+ else if (pcs_ib_caps == LINK_INBAND_ENABLE)
+ /* If the PCS requires inband, then inband must always
+ * be enabled.
+ */
+ neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
advertising))
neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
else
neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
- break;
-
- default:
- neg_mode = PHYLINK_PCS_NEG_NONE;
- break;
}
- return neg_mode;
+ pl->pcs_neg_mode = neg_mode;
+ pl->act_link_an_mode = mode;
}
static void phylink_major_config(struct phylink *pl, bool restart,
@@ -1159,11 +1347,9 @@ static void phylink_major_config(struct phylink *pl, bool restart,
unsigned int neg_mode;
int err;
- phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
-
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- state->interface,
- state->advertising);
+ phylink_dbg(pl, "major config, requested %s/%s\n",
+ phylink_an_mode_str(pl->req_link_an_mode),
+ phy_modes(state->interface));
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
@@ -1177,10 +1363,17 @@ static void phylink_major_config(struct phylink *pl, bool restart,
pcs_changed = pl->pcs != pcs;
}
+ phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising);
+
+ phylink_dbg(pl, "major config, active %s/%s/%s\n",
+ phylink_an_mode_str(pl->act_link_an_mode),
+ phylink_pcs_mode_str(pl->pcs_neg_mode),
+ phy_modes(state->interface));
+
phylink_pcs_poll_stop(pl);
if (pl->mac_ops->mac_prepare) {
- err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_prepare(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
@@ -1214,7 +1407,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1230,13 +1423,20 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_pcs_an_restart(pl);
if (pl->mac_ops->mac_finish) {
- err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
+ err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
if (err < 0)
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
}
+ if (pl->phydev && pl->phy_ib_mode) {
+ err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
+ if (err < 0)
+ phylink_err(pl, "phy_config_inband: %pe\n",
+ ERR_PTR(err));
+ }
+
if (pl->sfp_bus) {
rate_kbd = phylink_interface_signal_rate(state->interface);
if (rate_kbd)
@@ -1261,17 +1461,16 @@ static int phylink_change_inband_advert(struct phylink *pl)
return 0;
phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising,
pl->link_config.pause);
/* Recompute the PCS neg mode */
- pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
- pl->link_config.interface,
- pl->link_config.advertising);
+ phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
+ pl->link_config.advertising);
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
@@ -1293,12 +1492,24 @@ static int phylink_change_inband_advert(struct phylink *pl)
static void phylink_mac_pcs_get_state(struct phylink *pl,
struct phylink_link_state *state)
{
+ struct phylink_pcs *pcs;
+ bool autoneg;
+
linkmode_copy(state->advertising, pl->link_config.advertising);
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->rate_matching = pl->link_config.rate_matching;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising)) {
+ state->an_complete = 0;
+ state->link = 1;
+
+ pcs = pl->pcs;
+ if (!pcs || pcs->neg_mode)
+ autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
+ else
+ autoneg = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ state->advertising);
+
+ if (autoneg) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -1307,11 +1518,9 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->duplex = pl->link_config.duplex;
state->pause = pl->link_config.pause;
}
- state->an_complete = 0;
- state->link = 1;
- if (pl->pcs)
- pl->pcs->ops->pcs_get_state(pl->pcs, state);
+ if (pcs)
+ pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state);
else
state->link = 0;
}
@@ -1336,7 +1545,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
struct phylink_link_state link_state;
- switch (pl->cur_link_an_mode) {
+ switch (pl->req_link_an_mode) {
case MLO_AN_PHY:
link_state = pl->phy_state;
break;
@@ -1410,14 +1619,14 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- neg_mode = pl->cur_link_an_mode;
+ neg_mode = pl->act_link_an_mode;
if (pl->pcs && pl->pcs->neg_mode)
neg_mode = pl->pcs_neg_mode;
phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
pl->cur_interface, speed, duplex,
!!(link_state.pause & MLO_PAUSE_TX), rx_pause);
@@ -1437,7 +1646,7 @@ static void phylink_link_down(struct phylink *pl)
if (ndev)
netif_carrier_off(ndev);
- pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
+ pl->mac_ops->mac_link_down(pl->config, pl->act_link_an_mode,
pl->cur_interface);
phylink_info(pl, "Link is Down\n");
}
@@ -1463,10 +1672,10 @@ static void phylink_resolve(struct work_struct *w)
} else if (pl->link_failed) {
link_state.link = false;
retrigger = true;
- } else if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ } else if (pl->act_link_an_mode == MLO_AN_FIXED) {
phylink_get_fixed_state(pl, &link_state);
mac_config = link_state.link;
- } else if (pl->cur_link_an_mode == MLO_AN_PHY) {
+ } else if (pl->act_link_an_mode == MLO_AN_PHY) {
link_state = pl->phy_state;
mac_config = link_state.link;
} else {
@@ -1520,7 +1729,7 @@ static void phylink_resolve(struct work_struct *w)
}
}
- if (pl->cur_link_an_mode != MLO_AN_FIXED)
+ if (pl->act_link_an_mode != MLO_AN_FIXED)
phylink_apply_manual_flow(pl, &link_state);
if (mac_config) {
@@ -1644,7 +1853,7 @@ int phylink_set_fixed_link(struct phylink *pl,
pl->link_config.an_complete = 1;
pl->cfg_link_an_mode = MLO_AN_FIXED;
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
return 0;
}
@@ -1732,7 +1941,7 @@ struct phylink *phylink_create(struct phylink_config *config,
}
}
- pl->cur_link_an_mode = pl->cfg_link_an_mode;
+ pl->req_link_an_mode = pl->cfg_link_an_mode;
ret = phylink_register_sfp(pl, fwnode);
if (ret < 0) {
@@ -2189,7 +2398,7 @@ void phylink_start(struct phylink *pl)
ASSERT_RTNL();
phylink_info(pl, "configuring for %s/%s link mode\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(pl->link_config.interface));
/* Always set the carrier off */
@@ -2474,7 +2683,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
linkmode_copy(kset->link_modes.supported, pl->supported);
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
/* We are using fixed settings. Report these as the
* current link settings - and note that these also
@@ -2505,6 +2714,26 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
}
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
+static bool phylink_validate_pcs_inband_autoneg(struct phylink *pl,
+ phy_interface_t interface,
+ unsigned long *adv)
+{
+ unsigned int inband = phylink_inband_caps(pl, interface);
+ unsigned int mask;
+
+ /* If the PCS doesn't implement inband support, be permissive. */
+ if (!inband)
+ return true;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv))
+ mask = LINK_INBAND_ENABLE;
+ else
+ mask = LINK_INBAND_DISABLE;
+
+ /* Check whether the PCS implements the required mode */
+ return !!(inband & mask);
+}
+
/**
* phylink_ethtool_ksettings_set() - set the link settings
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -2566,7 +2795,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* If we have a fixed link, refuse to change link parameters.
* If the link parameters match, accept them but do nothing.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (s->speed != pl->link_config.speed ||
s->duplex != pl->link_config.duplex)
return -EINVAL;
@@ -2582,7 +2811,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* is our default case) but do not allow the advertisement to
* be changed. If the advertisement matches, simply return.
*/
- if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+ if (pl->req_link_an_mode == MLO_AN_FIXED) {
if (!linkmode_equal(config.advertising,
pl->link_config.advertising))
return -EINVAL;
@@ -2617,7 +2846,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_copy(support, pl->supported);
if (phylink_validate(pl, support, &config)) {
phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
- phylink_an_mode_str(pl->cur_link_an_mode),
+ phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(config.interface),
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
@@ -2635,6 +2864,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
+ /* Validate the autonegotiation state. We don't have a PHY in this
+ * situation, so the PCS is the media-facing entity.
+ */
+ if (!phylink_validate_pcs_inband_autoneg(pl, config.interface,
+ config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2717,7 +2953,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
ASSERT_RTNL();
- if (pl->cur_link_an_mode == MLO_AN_FIXED)
+ if (pl->req_link_an_mode == MLO_AN_FIXED)
return -EOPNOTSUPP;
if (!phylink_test(pl->supported, Pause) &&
@@ -2981,7 +3217,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
struct phylink_link_state state;
int val = 0xffff;
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
if (phy_id == 0) {
phylink_get_fixed_state(pl, &state);
@@ -3006,7 +3242,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
unsigned int reg, unsigned int val)
{
- switch (pl->cur_link_an_mode) {
+ switch (pl->act_link_an_mode) {
case MLO_AN_FIXED:
break;
@@ -3176,10 +3412,11 @@ static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
return interface;
}
-static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
+static void phylink_sfp_set_config(struct phylink *pl,
unsigned long *supported,
struct phylink_link_state *state)
{
+ u8 mode = MLO_AN_INBAND;
bool changed = false;
phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
@@ -3196,9 +3433,9 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
changed = true;
}
- if (pl->cur_link_an_mode != mode ||
+ if (pl->req_link_an_mode != mode ||
pl->link_config.interface != state->interface) {
- pl->cur_link_an_mode = mode;
+ pl->req_link_an_mode = mode;
pl->link_config.interface = state->interface;
changed = true;
@@ -3213,8 +3450,7 @@ static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
phylink_mac_initial_config(pl, false);
}
-static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
- struct phy_device *phy)
+static int phylink_sfp_config_phy(struct phylink *pl, struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
@@ -3258,7 +3494,7 @@ static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, mode, support, &config);
+ phylink_sfp_set_config(pl, support, &config);
return 0;
}
@@ -3314,6 +3550,12 @@ static int phylink_sfp_config_optical(struct phylink *pl)
phylink_dbg(pl, "optical SFP: chosen %s interface\n",
phy_modes(interface));
+ if (!phylink_validate_pcs_inband_autoneg(pl, interface,
+ config.advertising)) {
+ phylink_err(pl, "autoneg setting not compatible with PCS");
+ return -EINVAL;
+ }
+
config.interface = interface;
/* Ignore errors if we're expecting a PHY to attach later */
@@ -3327,7 +3569,7 @@ static int phylink_sfp_config_optical(struct phylink *pl)
pl->link_port = pl->sfp_port;
- phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
+ phylink_sfp_set_config(pl, pl->sfp_support, &config);
return 0;
}
@@ -3398,19 +3640,16 @@ static void phylink_sfp_link_up(void *upstream)
phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
}
-/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
- * or 802.3z control word, so inband will not work.
- */
-static bool phylink_phy_no_inband(struct phy_device *phy)
-{
- return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1],
- 0xae025150, 0xfffffff0);
-}
-
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
struct phylink *pl = upstream;
- u8 mode;
+
+ if (!phy->drv) {
+ phylink_err(pl, "PHY %s (id 0x%.8lx) has no driver loaded\n",
+ phydev_name(phy), (unsigned long)phy->phy_id);
+ phylink_err(pl, "Drivers which handle known common cases: CONFIG_BCM84881_PHY, CONFIG_MARVELL_PHY\n");
+ return -EINVAL;
+ }
/*
* This is the new way of dealing with flow control for PHYs,
@@ -3421,17 +3660,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
*/
phy_support_asym_pause(phy);
- if (phylink_phy_no_inband(phy))
- mode = MLO_AN_PHY;
- else
- mode = MLO_AN_INBAND;
-
/* Set the PHY's host supported interfaces */
phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
pl->config->supported_interfaces);
/* Do the initial configuration */
- return phylink_sfp_config_phy(pl, mode, phy);
+ return phylink_sfp_config_phy(pl, phy);
}
static void phylink_sfp_disconnect_phy(void *upstream,
@@ -3626,6 +3860,7 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
/**
* phylink_mii_c22_pcs_decode_state() - Decode MAC PCS state from MII registers
* @state: a pointer to a &struct phylink_link_state.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @bmsr: The value of the %MII_BMSR register
* @lpa: The value of the %MII_LPA register
*
@@ -3638,32 +3873,45 @@ static void phylink_decode_usgmii_word(struct phylink_link_state *state,
* accessing @bmsr and @lpa cannot be done with MDIO directly.
*/
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
- u16 bmsr, u16 lpa)
+ unsigned int neg_mode, u16 bmsr, u16 lpa)
{
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- /* If there is no link or autonegotiation is disabled, the LP advertisement
- * data is not meaningful, so don't go any further.
- */
- if (!state->link || !linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising))
+
+ /* If the link is down, the advertisement data is undefined. */
+ if (!state->link)
return;
switch (state->interface) {
case PHY_INTERFACE_MODE_1000BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_1000);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_1000);
+ } else {
+ state->speed = SPEED_1000;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_2500BASEX:
- phylink_decode_c37_word(state, lpa, SPEED_2500);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
+ phylink_decode_c37_word(state, lpa, SPEED_2500);
+ } else {
+ state->speed = SPEED_2500;
+ state->duplex = DUPLEX_FULL;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ }
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
- phylink_decode_sgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_sgmii_word(state, lpa);
break;
+
case PHY_INTERFACE_MODE_QUSGMII:
- phylink_decode_usgmii_word(state, lpa);
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
+ phylink_decode_usgmii_word(state, lpa);
break;
default:
@@ -3676,6 +3924,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Helper for MAC PCS supporting the 802.3 clause 22 register set for
@@ -3688,6 +3937,7 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_decode_state);
* structure.
*/
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state)
{
int bmsr, lpa;
@@ -3699,7 +3949,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
return;
}
- phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+ phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_get_state);
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 105602581a03..26350b962890 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -1098,7 +1098,7 @@ static struct phy_driver at803x_driver[] = {
module_phy_driver(at803x_driver);
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+static const struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
{ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index bd8a51ec0ecd..3279de857b47 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -828,7 +828,7 @@ static struct phy_driver qca807x_drivers[] = {
};
module_phy_driver(qca807x_drivers);
-static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca807x_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
{ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
{ }
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
index 5048304ccc9e..71498c518f0f 100644
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -655,7 +655,7 @@ static struct phy_driver qca808x_driver[] = {
module_phy_driver(qca808x_driver);
-static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca808x_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
index 7a5039920b9f..bc70ed8efd86 100644
--- a/drivers/net/phy/qcom/qca83xx.c
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -259,7 +259,7 @@ static struct phy_driver qca83xx_driver[] = {
module_phy_driver(qca83xx_driver);
-static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+static const struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 30d15f7c9b03..7b70ba6cab66 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -155,7 +155,7 @@ static struct phy_driver qs6612_driver[] = { {
module_phy_driver(qs6612_driver);
-static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
+static const struct mdio_device_id __maybe_unused qs6612_tbl[] = {
{ 0x00181440, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/realtek/Kconfig b/drivers/net/phy/realtek/Kconfig
new file mode 100644
index 000000000000..31935f147d87
--- /dev/null
+++ b/drivers/net/phy/realtek/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config REALTEK_PHY
+ tristate "Realtek PHYs"
+ help
+ Currently supports RTL821x/RTL822x and fast ethernet PHYs
+
+config REALTEK_PHY_HWMON
+ def_bool REALTEK_PHY && HWMON
+ depends on !(REALTEK_PHY=y && HWMON=m)
+ help
+ Optional hwmon support for the temperature sensor
diff --git a/drivers/net/phy/realtek/Makefile b/drivers/net/phy/realtek/Makefile
new file mode 100644
index 000000000000..dd21cf87f2f1
--- /dev/null
+++ b/drivers/net/phy/realtek/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+realtek-y += realtek_main.o
+realtek-$(CONFIG_REALTEK_PHY_HWMON) += realtek_hwmon.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/realtek/realtek.h b/drivers/net/phy/realtek/realtek.h
new file mode 100644
index 000000000000..a39b44fa18a0
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef REALTEK_H
+#define REALTEK_H
+
+#include <linux/phy.h>
+
+int rtl822x_hwmon_init(struct phy_device *phydev);
+
+#endif /* REALTEK_H */
diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c
new file mode 100644
index 000000000000..1ecb410bb941
--- /dev/null
+++ b/drivers/net/phy/realtek/realtek_hwmon.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HWMON support for Realtek PHY's
+ *
+ * Author: Heiner Kallweit <hkallweit1@gmail.com>
+ */
+
+#include <linux/hwmon.h>
+#include <linux/phy.h>
+
+#include "realtek.h"
+
+#define RTL822X_VND2_TSALRM 0xa662
+#define RTL822X_VND2_TSRR 0xbd84
+#define RTL822X_VND2_TSSR 0xb54c
+
+static int rtl822x_hwmon_get_temp(int raw)
+{
+ if (raw >= 512)
+ raw -= 1024;
+
+ return 1000 * raw / 2;
+}
+
+static int rtl822x_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int raw;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSRR) & 0x3ff;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ case hwmon_temp_max:
+ /* Chip reduces speed to 1G if threshold is exceeded */
+ raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSSR) >> 6;
+ *val = rtl822x_hwmon_get_temp(raw);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops rtl822x_hwmon_ops = {
+ .visible = 0444,
+ .read = rtl822x_hwmon_read,
+};
+
+static const struct hwmon_channel_info * const rtl822x_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX),
+ NULL
+};
+
+static const struct hwmon_chip_info rtl822x_hwmon_chip_info = {
+ .ops = &rtl822x_hwmon_ops,
+ .info = rtl822x_hwmon_info,
+};
+
+int rtl822x_hwmon_init(struct phy_device *phydev)
+{
+ struct device *hwdev, *dev = &phydev->mdio.dev;
+ const char *name;
+
+ /* Ensure over-temp alarm is reset. */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3);
+
+ name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ hwdev = devm_hwmon_device_register_with_info(dev, name, phydev,
+ &rtl822x_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwdev);
+}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek/realtek_main.c
index f65d7f1f348e..38149958d95b 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -14,6 +14,8 @@
#include <linux/delay.h>
#include <linux/clk.h>
+#include "realtek.h"
+
#define RTL821x_PHYSR 0x11
#define RTL821x_PHYSR_DUPLEX BIT(13)
#define RTL821x_PHYSR_SPEED GENMASK(15, 14)
@@ -736,7 +738,11 @@ static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
+ if (devnum == MDIO_MMD_VEND2) {
+ rtl821x_write_page(phydev, regnum >> 4);
+ ret = __phy_read(phydev, 0x10 + ((regnum & 0xf) >> 1));
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
rtl821x_write_page(phydev, 0xa5c);
ret = __phy_read(phydev, 0x12);
rtl821x_write_page(phydev, 0);
@@ -760,7 +766,11 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ if (devnum == MDIO_MMD_VEND2) {
+ rtl821x_write_page(phydev, regnum >> 4);
+ ret = __phy_write(phydev, 0x10 + ((regnum & 0xf) >> 1), val);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
rtl821x_write_page(phydev, 0xa5d);
ret = __phy_write(phydev, 0x10, val);
rtl821x_write_page(phydev, 0);
@@ -812,6 +822,15 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
+static int rtl822x_probe(struct phy_device *phydev)
+{
+ if (IS_ENABLED(CONFIG_REALTEK_PHY_HWMON) &&
+ phydev->phy_id != RTL_GENERIC_PHYID)
+ return rtl822x_hwmon_init(phydev);
+
+ return 0;
+}
+
static int rtl822xb_config_init(struct phy_device *phydev)
{
bool has_2500, has_sgmii;
@@ -1511,6 +1530,7 @@ static struct phy_driver realtek_drvs[] = {
.match_phy_device = rtl_internal_nbaset_match_phy_device,
.name = "Realtek Internal NBASE-T PHY",
.flags = PHY_IS_INTERNAL,
+ .probe = rtl822x_probe,
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
.read_status = rtl822x_read_status,
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index bb13e75183ee..b338f385e15a 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -188,7 +188,7 @@ static struct phy_driver rockchip_phy_driver[] = {
module_phy_driver(rockchip_phy_driver);
-static struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
+static const struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = {
{ INTERNAL_EPHY_ID, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index e1853599d9ba..31463b9e5697 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -838,7 +838,7 @@ MODULE_DESCRIPTION("SMSC PHY driver");
MODULE_AUTHOR("Herbert Valerio Riedel");
MODULE_LICENSE("GPL");
-static struct mdio_device_id __maybe_unused smsc_tbl[] = {
+static const struct mdio_device_id __maybe_unused smsc_tbl[] = {
{ 0x0007c0a0, 0xfffffff0 },
{ 0x0007c0b0, 0xfffffff0 },
{ 0x0007c0c0, 0xfffffff0 },
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 7196e927c2cd..076a370be849 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -289,7 +289,7 @@ static int ks8995_reset(struct ks8995_switch *ks)
}
static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
struct device *dev;
struct ks8995_switch *ks8995;
@@ -301,7 +301,7 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
}
static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
{
struct device *dev;
struct ks8995_switch *ks8995;
@@ -401,8 +401,8 @@ static const struct bin_attribute ks8995_registers_attr = {
.mode = 0600,
},
.size = KS8995_REGS_SIZE,
- .read = ks8995_registers_read,
- .write = ks8995_registers_write,
+ .read_new = ks8995_registers_read,
+ .write_new = ks8995_registers_write,
};
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 309e4c3496c4..d4835d4c50e0 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -124,7 +124,7 @@ static struct phy_driver ste10xp_pdriver[] = {
module_phy_driver(ste10xp_pdriver);
-static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
+static const struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
{ STE101P_PHY_ID, 0xfffffff0 },
{ STE100P_PHY_ID, 0xffffffff },
{ }
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 8057ea8dbc21..752d4bf7bb99 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -87,7 +87,7 @@ static struct phy_driver teranetics_driver[] = {
module_phy_driver(teranetics_driver);
-static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+static const struct mdio_device_id __maybe_unused teranetics_tbl[] = {
{ PHY_ID_TN2020, 0xffffffff },
{ }
};
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 38834347a427..900cb756c366 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -90,7 +90,7 @@ static struct phy_driver upd60620_driver[1] = { {
module_phy_driver(upd60620_driver);
-static struct mdio_device_id __maybe_unused upd60620_tbl[] = {
+static const struct mdio_device_id __maybe_unused upd60620_tbl[] = {
{ UPD60620_PHY_ID, 0xfffffffe },
{ }
};
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2377179de017..b1b7bbba284e 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -674,7 +674,7 @@ static struct phy_driver vsc82xx_driver[] = {
module_phy_driver(vsc82xx_driver);
-static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
+static const struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 0af7db80b2f8..fc9e23927b3b 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -431,31 +431,6 @@ static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id)
return 0;
}
-static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
-{
- struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
- struct pd692x0_msg msg, buf = {0};
- int ret;
-
- ret = pd692x0_fw_unavailable(priv);
- if (ret)
- return ret;
-
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
- msg.sub[2] = id;
- ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf.sub[1]) {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- return 1;
- } else {
- priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- return 0;
- }
-}
-
struct pd692x0_pse_ext_state_mapping {
u32 status_code;
enum ethtool_c33_pse_ext_state pse_ext_state;
@@ -517,21 +492,38 @@ pd692x0_pse_ext_state_map[] = {
{ /* sentinel */ }
};
-static void
-pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info,
- u32 status_code)
+static int
+pd692x0_pi_get_ext_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info)
{
+ struct ethtool_c33_pse_ext_state_info *c33_ext_state_info;
const struct pd692x0_pse_ext_state_mapping *ext_state_map;
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ c33_ext_state_info = &ext_state_info->c33_ext_state_info;
ext_state_map = pd692x0_pse_ext_state_map;
while (ext_state_map->status_code) {
- if (ext_state_map->status_code == status_code) {
+ if (ext_state_map->status_code == buf.sub[0]) {
c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state;
c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate;
- return;
+ return 0;
}
ext_state_map++;
}
+
+ return 0;
}
struct pd692x0_class_pw {
@@ -613,35 +605,36 @@ static int pd692x0_pi_set_pw_from_table(struct device *dev,
}
static int
-pd692x0_pi_get_pw_ranges(struct pse_control_status *st)
+pd692x0_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
const struct pd692x0_class_pw *pw_table;
int i;
pw_table = pd692x0_class_pw_table;
- st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
- sizeof(struct ethtool_c33_pse_pw_limit_range),
- GFP_KERNEL);
- if (!st->c33_pw_limit_ranges)
+ c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
+ sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
return -ENOMEM;
for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
- st->c33_pw_limit_ranges[i].min = pw_table->class_pw;
- st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw;
+ c33_pw_limit_ranges[i].min = pw_table->class_pw;
+ c33_pw_limit_ranges[i].max = pw_table->class_pw +
+ pw_table->max_added_class_pw;
}
- st->c33_pw_limit_nb_ranges = i;
- return 0;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+ return i;
}
-static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+pd692x0_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- u32 class;
int ret;
ret = pd692x0_fw_unavailable(priv);
@@ -654,39 +647,65 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- /* Compare Port Status (Communication Protocol Document par. 7.1) */
- if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
- else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
- else if (buf.sub[0] == 0x12)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
- else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
if (buf.sub[1])
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
- priv->admin_state[id] = status->c33_admin_state;
+ priv->admin_state[id] = admin_state->c33_admin_state;
- pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]);
- status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100;
+ return 0;
+}
- msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+static int
+pd692x0_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
msg.sub[2] = id;
- memset(&buf, 0, sizeof(buf));
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
- if (ret < 0)
+ /* Compare Port Status (Communication Protocol Document par. 7.1) */
+ if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
+ else if (buf.sub[0] == 0x12)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
+ else
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ return 0;
+}
+
+static int
+pd692x0_pi_get_pw_class(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ u32 class;
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
return ret;
- status->c33_avail_pw_limit = ret;
- memset(&buf, 0, sizeof(buf));
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
msg.sub[2] = id;
ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
@@ -695,13 +714,29 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
class = buf.data[3] >> 4;
if (class <= 8)
- status->c33_pw_class = class;
+ return class;
- ret = pd692x0_pi_get_pw_ranges(status);
+ return 0;
+}
+
+static int
+pd692x0_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
if (ret < 0)
return ret;
- return 0;
+ return (buf.data[0] << 4 | buf.data[1]) * 100;
}
static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
@@ -999,13 +1034,12 @@ static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
}
-static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
- int id)
+static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
+ int id)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_msg msg, buf = {0};
- int mW, uV, uA, ret;
- s64 tmp_64;
+ int ret;
msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
msg.sub[2] = id;
@@ -1013,48 +1047,24 @@ static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
- if (ret < 0)
- return ret;
- mW = ret;
-
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
- tmp_64 = mW;
- tmp_64 *= 1000000000ull;
- /* uA = mW * 1000000000 / uV */
- uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
- return uA;
+ return pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
}
-static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
- int id, int max_uA)
+static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
{
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct device *dev = &priv->client->dev;
struct pd692x0_msg msg, buf = {0};
- int uV, ret, mW;
- s64 tmp_64;
+ int ret;
ret = pd692x0_fw_unavailable(priv);
if (ret)
return ret;
- ret = pd692x0_pi_get_voltage(pcdev, id);
- if (ret < 0)
- return ret;
- uV = ret;
-
msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
msg.sub[2] = id;
- tmp_64 = uV;
- tmp_64 *= max_uA;
- /* mW = uV * uA / 1000000000 */
- mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
- ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
+ ret = pd692x0_pi_set_pw_from_table(dev, &msg, max_mW);
if (ret)
return ret;
@@ -1063,13 +1073,17 @@ static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
static const struct pse_controller_ops pd692x0_ops = {
.setup_pi_matrix = pd692x0_setup_pi_matrix,
- .ethtool_get_status = pd692x0_ethtool_get_status,
+ .pi_get_admin_state = pd692x0_pi_get_admin_state,
+ .pi_get_pw_status = pd692x0_pi_get_pw_status,
+ .pi_get_ext_state = pd692x0_pi_get_ext_state,
+ .pi_get_pw_class = pd692x0_pi_get_pw_class,
+ .pi_get_actual_pw = pd692x0_pi_get_actual_pw,
.pi_enable = pd692x0_pi_enable,
.pi_disable = pd692x0_pi_disable,
- .pi_is_enabled = pd692x0_pi_is_enabled,
.pi_get_voltage = pd692x0_pi_get_voltage,
- .pi_get_current_limit = pd692x0_pi_get_current_limit,
- .pi_set_current_limit = pd692x0_pi_set_current_limit,
+ .pi_get_pw_limit = pd692x0_pi_get_pw_limit,
+ .pi_set_pw_limit = pd692x0_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = pd692x0_pi_get_pw_limit_ranges,
};
#define PD692X0_FW_LINE_MAX_SZ 0xff
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 2906ce173f66..4f2a54afc4d0 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -6,6 +6,7 @@
//
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/of.h>
#include <linux/pse-pd/pse.h>
#include <linux/regulator/driver.h>
@@ -210,16 +211,25 @@ out:
static int pse_pi_is_enabled(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ struct pse_admin_state admin_state = {0};
const struct pse_controller_ops *ops;
int id, ret;
ops = pcdev->ops;
- if (!ops->pi_is_enabled)
+ if (!ops->pi_get_admin_state)
return -EOPNOTSUPP;
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_is_enabled(pcdev, id);
+ ret = ops->pi_get_admin_state(pcdev, id, &admin_state);
+ if (ret)
+ goto out;
+
+ if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED ||
+ admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ ret = 1;
+
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -291,33 +301,25 @@ static int pse_pi_get_voltage(struct regulator_dev *rdev)
return ret;
}
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status);
-
static int pse_pi_get_current_limit(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- struct netlink_ext_ack extack = {};
- struct pse_control_status st = {};
- int id, uV, ret;
+ int id, uV, mW, ret;
s64 tmp_64;
ops = pcdev->ops;
id = rdev_get_id(rdev);
+ if (!ops->pi_get_pw_limit || !ops->pi_get_voltage)
+ return -EOPNOTSUPP;
+
mutex_lock(&pcdev->lock);
- if (ops->pi_get_current_limit) {
- ret = ops->pi_get_current_limit(pcdev, id);
+ ret = ops->pi_get_pw_limit(pcdev, id);
+ if (ret < 0)
goto out;
- }
+ mW = ret;
- /* If pi_get_current_limit() callback not populated get voltage
- * from pi_get_voltage() and power limit from ethtool_get_status()
- * to calculate current limit.
- */
- ret = _pse_pi_get_voltage(rdev);
+ ret = pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
ret = -ERANGE;
@@ -327,16 +329,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
goto out;
uV = ret;
- ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
- if (ret)
- goto out;
-
- if (!st.c33_avail_pw_limit) {
- ret = -ENODATA;
- goto out;
- }
-
- tmp_64 = st.c33_avail_pw_limit;
+ tmp_64 = mW;
tmp_64 *= 1000000000ull;
/* uA = mW * 1000000000 / uV */
ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
@@ -351,15 +344,33 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- int id, ret;
+ int id, mW, ret;
+ s64 tmp_64;
ops = pcdev->ops;
- if (!ops->pi_set_current_limit)
+ if (!ops->pi_set_pw_limit || !ops->pi_get_voltage)
return -EOPNOTSUPP;
+ if (max_uA > MAX_PI_CURRENT)
+ return -ERANGE;
+
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_set_current_limit(pcdev, id, max_uA);
+ ret = pse_pi_get_voltage(rdev);
+ if (!ret) {
+ dev_err(pcdev->dev, "Voltage null\n");
+ ret = -ERANGE;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ tmp_64 = ret;
+ tmp_64 *= max_uA;
+ /* mW = uA * uV / 1000000000 */
+ mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+ ret = ops->pi_set_pw_limit(pcdev, id, mW);
+out:
mutex_unlock(&pcdev->lock);
return ret;
@@ -403,17 +414,16 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
- if (pcdev->ops->pi_set_current_limit) {
+ if (pcdev->ops->pi_set_pw_limit)
rinit_data->constraints.valid_ops_mask |=
REGULATOR_CHANGE_CURRENT;
- rinit_data->constraints.max_uA = MAX_PI_CURRENT;
- }
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;
rconfig.driver_data = pcdev;
rconfig.init_data = rinit_data;
+ rconfig.of_node = pcdev->pi[id].np;
rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig);
if (IS_ERR(rdev)) {
@@ -444,6 +454,13 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
if (!pcdev->nr_lines)
pcdev->nr_lines = 1;
+ if (!pcdev->ops->pi_get_admin_state ||
+ !pcdev->ops->pi_get_pw_status) {
+ dev_err(pcdev->dev,
+ "Mandatory status report callbacks are missing");
+ return -EINVAL;
+ }
+
ret = of_load_pse_pis(pcdev);
if (ret)
return ret;
@@ -736,23 +753,6 @@ out:
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
-static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
- int id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
-{
- const struct pse_controller_ops *ops;
-
- ops = pcdev->ops;
- if (!ops->ethtool_get_status) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not support status report");
- return -EOPNOTSUPP;
- }
-
- return ops->ethtool_get_status(pcdev, id, extack, status);
-}
-
/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
@@ -763,15 +763,81 @@ static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+ struct ethtool_pse_control_status *status)
{
- int err;
+ struct pse_admin_state admin_state = {0};
+ struct pse_pw_status pw_status = {0};
+ const struct pse_controller_ops *ops;
+ struct pse_controller_dev *pcdev;
+ int ret;
- mutex_lock(&psec->pcdev->lock);
- err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
- mutex_unlock(&psec->pcdev->lock);
+ pcdev = psec->pcdev;
+ ops = pcdev->ops;
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_get_admin_state(pcdev, psec->id, &admin_state);
+ if (ret)
+ goto out;
+ status->podl_admin_state = admin_state.podl_admin_state;
+ status->c33_admin_state = admin_state.c33_admin_state;
- return err;
+ ret = ops->pi_get_pw_status(pcdev, psec->id, &pw_status);
+ if (ret)
+ goto out;
+ status->podl_pw_status = pw_status.podl_pw_status;
+ status->c33_pw_status = pw_status.c33_pw_status;
+
+ if (ops->pi_get_ext_state) {
+ struct pse_ext_state_info ext_state_info = {0};
+
+ ret = ops->pi_get_ext_state(pcdev, psec->id,
+ &ext_state_info);
+ if (ret)
+ goto out;
+
+ memcpy(&status->c33_ext_state_info,
+ &ext_state_info.c33_ext_state_info,
+ sizeof(status->c33_ext_state_info));
+ }
+
+ if (ops->pi_get_pw_class) {
+ ret = ops->pi_get_pw_class(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_class = ret;
+ }
+
+ if (ops->pi_get_actual_pw) {
+ ret = ops->pi_get_actual_pw(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_actual_pw = ret;
+ }
+
+ if (ops->pi_get_pw_limit) {
+ ret = ops->pi_get_pw_limit(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->c33_avail_pw_limit = ret;
+ }
+
+ if (ops->pi_get_pw_limit_ranges) {
+ struct pse_pw_limit_ranges pw_limit_ranges = {0};
+
+ ret = ops->pi_get_pw_limit_ranges(pcdev, psec->id,
+ &pw_limit_ranges);
+ if (ret < 0)
+ goto out;
+
+ status->c33_pw_limit_ranges =
+ pw_limit_ranges.c33_pw_limit_ranges;
+ status->c33_pw_limit_nb_ranges = ret;
+ }
+out:
+ mutex_unlock(&psec->pcdev->lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
@@ -876,6 +942,9 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
int uV, uA, ret;
s64 tmp_64;
+ if (pw_limit > MAX_PI_PW)
+ return -ERANGE;
+
ret = regulator_get_voltage(psec->ps);
if (!ret) {
NL_SET_ERR_MSG(extack,
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
index 64ab36974fe0..6ce6773fff31 100644
--- a/drivers/net/pse-pd/pse_regulator.c
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -52,17 +52,19 @@ pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id)
}
static int
-pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+pse_reg_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
- return regulator_is_enabled(priv->ps);
+ admin_state->podl_admin_state = priv->admin_state;
+
+ return 0;
}
static int
-pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+pse_reg_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
@@ -72,20 +74,19 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
return ret;
if (!ret)
- status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ pw_status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
else
- status->podl_pw_status =
+ pw_status->podl_pw_status =
ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
- status->podl_admin_state = priv->admin_state;
-
return 0;
}
static const struct pse_controller_ops pse_reg_ops = {
- .ethtool_get_status = pse_reg_ethtool_get_status,
+ .pi_get_admin_state = pse_reg_pi_get_admin_state,
+ .pi_get_pw_status = pse_reg_pi_get_pw_status,
.pi_enable = pse_reg_pi_enable,
- .pi_is_enabled = pse_reg_pi_is_enabled,
.pi_disable = pse_reg_pi_disable,
};
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 8797ca1a8a21..5e9dda2c0eac 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -25,20 +25,32 @@
#define TPS23881_REG_GEN_MASK 0x17
#define TPS23881_REG_NBITACC BIT(5)
#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_2PAIR_POL1 0x1e
#define TPS23881_REG_PORT_MAP 0x26
#define TPS23881_REG_PORT_POWER 0x29
-#define TPS23881_REG_POEPLUS 0x40
+#define TPS23881_REG_4PAIR_POL1 0x2a
+#define TPS23881_REG_INPUT_V 0x2e
+#define TPS23881_REG_CHAN1_A 0x30
+#define TPS23881_REG_CHAN1_V 0x32
+#define TPS23881_REG_FOLDBACK 0x40
#define TPS23881_REG_TPON BIT(0)
#define TPS23881_REG_FWREV 0x41
#define TPS23881_REG_DEVID 0x43
#define TPS23881_REG_DEVID_MASK 0xF0
#define TPS23881_DEVICE_ID 0x02
+#define TPS23881_REG_CHAN1_CLASS 0x4c
#define TPS23881_REG_SRAM_CTRL 0x60
#define TPS23881_REG_SRAM_DATA 0x61
+#define TPS23881_UV_STEP 3662
+#define TPS23881_NA_STEP 70190
+#define TPS23881_MW_STEP 500
+#define TPS23881_MIN_PI_PW_LIMIT_MW 2000
+
struct tps23881_port_desc {
u8 chan[2];
bool is_4p;
+ int pw_pol;
};
struct tps23881_priv {
@@ -53,36 +65,123 @@ static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev)
return container_of(pcdev, struct tps23881_priv, pcdev);
}
+/*
+ * Helper to extract a value from a u16 register value, which is made of two
+ * u8 registers. The function calculates the bit offset based on the channel
+ * and extracts the relevant bits using a provided field mask.
+ *
+ * @param reg_val: The u16 register value (composed of two u8 registers).
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply to extract the required bits.
+ * @return: The extracted value for the specific channel.
+ */
+static u16 tps23881_calc_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask)
+{
+ if (chan >= 4)
+ reg_val >>= 8;
+
+ return (reg_val >> field_offset) & field_mask;
+}
+
+/*
+ * Helper to combine individual channel values into a u16 register value.
+ * The function sets the value for a specific channel in the appropriate
+ * position.
+ *
+ * @param reg_val: The current u16 register value.
+ * @param chan: The channel number (0-7).
+ * @param field_offset: The base bit offset to apply (e.g., 0 or 4).
+ * @param field_mask: The mask to apply for the field (e.g., 0x0F).
+ * @param field_val: The value to set for the specific channel (masked by
+ * field_mask).
+ * @return: The updated u16 register value with the channel value set.
+ */
+static u16 tps23881_set_val(u16 reg_val, u8 chan, u8 field_offset,
+ u16 field_mask, u16 field_val)
+{
+ field_val &= field_mask;
+
+ if (chan < 4) {
+ reg_val &= ~(field_mask << field_offset);
+ reg_val |= (field_val << field_offset);
+ } else {
+ reg_val &= ~(field_mask << (field_offset + 8));
+ reg_val |= (field_val << (field_offset + 8));
+ }
+
+ return reg_val;
+}
+
+static int
+tps23881_pi_set_pw_pol_limit(struct tps23881_priv *priv, int id, u8 pw_pol,
+ bool is_4p)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ if (!is_4p) {
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ } else {
+ /* One chan is enough to configure the 4p PI power limit */
+ if ((chan % 4) < 2)
+ reg = TPS23881_REG_4PAIR_POL1;
+ else
+ reg = TPS23881_REG_4PAIR_POL1 + 1;
+ }
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, 0xff, pw_pol);
+ return i2c_smbus_write_word_data(client, reg, val);
+}
+
+static int tps23881_pi_enable_manual_pol(struct tps23881_priv *priv, int id)
+{
+ struct i2c_client *client = priv->client;
+ int ret;
+ u8 chan;
+ u16 val;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FOLDBACK);
+ if (ret < 0)
+ return ret;
+
+ /* No need to test if the chan is PoE4 as setting either bit for a
+ * 4P configured port disables the automatic configuration on both
+ * channels.
+ */
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ return i2c_smbus_write_byte_data(client, TPS23881_REG_FOLDBACK, val);
+}
+
static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
u8 chan;
u16 val;
- int ret;
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = BIT(chan);
- else
- val = BIT(chan + 4);
+ val = tps23881_set_val(0, chan, 0, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan);
- else
- val |= BIT(chan + 4);
+ val = tps23881_set_val(val, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
}
static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
@@ -97,32 +196,67 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
return -ERANGE;
chan = priv->port[id].chan[0];
- if (chan < 4)
- val = BIT(chan + 4);
- else
- val = BIT(chan + 8);
+ val = tps23881_set_val(0, chan, 4, BIT(chan % 4), BIT(chan % 4));
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- val |= BIT(chan + 4);
- else
- val |= BIT(chan + 8);
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
}
ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
if (ret)
return ret;
- return 0;
+ /* PWOFF command resets lots of register which need to be
+ * configured again. According to the datasheet "It may take upwards
+ * of 5ms after PWOFFn command for all register values to be updated"
+ */
+ mdelay(5);
+
+ /* Enable detection and classification */
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_CLA_EN);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4), BIT(chan % 4));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4),
+ BIT(chan % 4));
+ val = tps23881_set_val(val, chan, 4, BIT(chan % 4),
+ BIT(chan % 4));
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
+ if (ret)
+ return ret;
+
+ /* No power policy */
+ if (priv->port[id].pw_pol < 0)
+ return 0;
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ /* Set power policy */
+ return tps23881_pi_set_pw_pol_limit(priv, id, priv->port[id].pw_pol,
+ priv->port[id].is_4p);
}
-static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+static int
+tps23881_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
bool enabled;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -130,32 +264,35 @@ static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4)
- enabled = ret & BIT(chan);
- else
- enabled = ret & BIT(chan + 4);
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4)
- enabled &= !!(ret & BIT(chan));
- else
- enabled &= !!(ret & BIT(chan + 4));
+ val = tps23881_calc_val(ret, chan, 0, BIT(chan % 4));
+ enabled &= !!(val);
}
/* Return enabled status only if both channel are on this state */
- return enabled;
+ if (enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
}
-static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
- unsigned long id,
- struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+static int
+tps23881_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
{
struct tps23881_priv *priv = to_tps23881_priv(pcdev);
struct i2c_client *client = priv->client;
- bool enabled, delivering;
+ bool delivering;
u8 chan;
+ u16 val;
int ret;
ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
@@ -163,40 +300,197 @@ static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
return ret;
chan = priv->port[id].chan[0];
- if (chan < 4) {
- enabled = ret & BIT(chan);
- delivering = ret & BIT(chan + 4);
- } else {
- enabled = ret & BIT(chan + 4);
- delivering = ret & BIT(chan + 8);
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering = !!(val);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
- if (chan < 4) {
- enabled &= !!(ret & BIT(chan));
- delivering &= !!(ret & BIT(chan + 4));
- } else {
- enabled &= !!(ret & BIT(chan + 4));
- delivering &= !!(ret & BIT(chan + 8));
- }
+ val = tps23881_calc_val(ret, chan, 4, BIT(chan % 4));
+ delivering &= !!(val);
}
/* Return delivering status only if both channel are on this state */
if (delivering)
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
else
- status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
-
- /* Return enabled status only if both channel are on this state */
- if (enabled)
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
- else
- status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
return 0;
}
+static int tps23881_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret;
+ u64 uV;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_INPUT_V);
+ if (ret < 0)
+ return ret;
+
+ uV = ret & 0x3fff;
+ uV *= TPS23881_UV_STEP;
+
+ return (int)uV;
+}
+
+static int
+tps23881_pi_get_chan_current(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int reg, ret;
+ u64 tmp_64;
+
+ /* Registers 0x30 to 0x3d */
+ reg = TPS23881_REG_CHAN1_A + (chan % 4) * 4 + (chan >= 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret & 0x3fff;
+ tmp_64 *= TPS23881_NA_STEP;
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int tps23881_pi_get_pw_class(struct pse_controller_dev *pcdev,
+ int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ reg = TPS23881_REG_CHAN1_CLASS + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ return tps23881_calc_val(ret, chan, 4, 0x0f);
+}
+
+static int
+tps23881_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, uV, uA;
+ u64 tmp_64;
+ u8 chan;
+
+ ret = tps23881_pi_get_voltage(&priv->pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_chan_current(priv, chan);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+ }
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static int
+tps23881_pi_get_pw_limit_chan(struct tps23881_priv *priv, u8 chan)
+{
+ struct i2c_client *client = priv->client;
+ int ret, reg;
+ u16 val;
+
+ reg = TPS23881_REG_2PAIR_POL1 + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, chan, 0, 0xff);
+ return val * TPS23881_MW_STEP;
+}
+
+static int tps23881_pi_get_pw_limit(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, mW;
+ u8 chan;
+
+ chan = priv->port[id].chan[0];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+
+ mW = ret;
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ ret = tps23881_pi_get_pw_limit_chan(priv, chan);
+ if (ret < 0)
+ return ret;
+ mW += ret;
+ }
+
+ return mW;
+}
+
+static int tps23881_pi_set_pw_limit(struct pse_controller_dev *pcdev,
+ int id, int max_mW)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ u8 pw_pol;
+ int ret;
+
+ if (max_mW < TPS23881_MIN_PI_PW_LIMIT_MW || MAX_PI_PW < max_mW) {
+ dev_err(&priv->client->dev,
+ "power limit %d out of ranges [%d,%d]",
+ max_mW, TPS23881_MIN_PI_PW_LIMIT_MW, MAX_PI_PW);
+ return -ERANGE;
+ }
+
+ ret = tps23881_pi_enable_manual_pol(priv, id);
+ if (ret < 0)
+ return ret;
+
+ pw_pol = DIV_ROUND_CLOSEST_ULL(max_mW, TPS23881_MW_STEP);
+
+ /* Save power policy to reconfigure it after a disabled call */
+ priv->port[id].pw_pol = pw_pol;
+ return tps23881_pi_set_pw_pol_limit(priv, id, pw_pol,
+ priv->port[id].is_4p);
+}
+
+static int
+tps23881_pi_get_pw_limit_ranges(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges)
+{
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+
+ c33_pw_limit_ranges = kzalloc(sizeof(*c33_pw_limit_ranges),
+ GFP_KERNEL);
+ if (!c33_pw_limit_ranges)
+ return -ENOMEM;
+
+ c33_pw_limit_ranges->min = TPS23881_MIN_PI_PW_LIMIT_MW;
+ c33_pw_limit_ranges->max = MAX_PI_PW;
+ pw_limit_ranges->c33_pw_limit_ranges = c33_pw_limit_ranges;
+
+ /* Return the number of ranges */
+ return 1;
+}
+
/* Parse managers subnode into a array of device node */
static int
tps23881_get_of_channels(struct tps23881_priv *priv,
@@ -480,7 +774,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
struct i2c_client *client = priv->client;
u8 pi_id, lgcl_chan, hw_chan;
u16 val = 0;
- int i, ret;
+ int i;
for (i = 0; i < port_cnt; i++) {
pi_id = port_matrix[i].pi_id;
@@ -491,6 +785,9 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
if (port_matrix[i].exist)
priv->port[pi_id].chan[0] = lgcl_chan;
+ /* Initialize power policy internal value */
+ priv->port[pi_id].pw_pol = -1;
+
/* Set hardware port matrix for all ports */
val |= hw_chan << (lgcl_chan * 2);
@@ -511,11 +808,7 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
}
/* Write hardware ports matrix */
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
}
static int
@@ -564,11 +857,7 @@ tps23881_set_ports_conf(struct tps23881_priv *priv,
val |= BIT(port_matrix[i].lgcl_chan[1]) |
BIT(port_matrix[i].lgcl_chan[1] + 4);
}
- ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
- if (ret)
- return ret;
-
- return 0;
+ return i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
}
static int
@@ -594,11 +883,7 @@ tps23881_set_ports_matrix(struct tps23881_priv *priv,
if (ret)
return ret;
- ret = tps23881_set_ports_conf(priv, port_matrix);
- if (ret)
- return ret;
-
- return 0;
+ return tps23881_set_ports_conf(priv, port_matrix);
}
static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
@@ -626,8 +911,14 @@ static const struct pse_controller_ops tps23881_ops = {
.setup_pi_matrix = tps23881_setup_pi_matrix,
.pi_enable = tps23881_pi_enable,
.pi_disable = tps23881_pi_disable,
- .pi_is_enabled = tps23881_pi_is_enabled,
- .ethtool_get_status = tps23881_ethtool_get_status,
+ .pi_get_admin_state = tps23881_pi_get_admin_state,
+ .pi_get_pw_status = tps23881_pi_get_pw_status,
+ .pi_get_pw_class = tps23881_pi_get_pw_class,
+ .pi_get_actual_pw = tps23881_pi_get_actual_pw,
+ .pi_get_voltage = tps23881_pi_get_voltage,
+ .pi_get_pw_limit = tps23881_pi_get_pw_limit,
+ .pi_set_pw_limit = tps23881_pi_set_pw_limit,
+ .pi_get_pw_limit_ranges = tps23881_pi_get_pw_limit_ranges,
};
static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 5aa41d5f7765..5ca6ecf0ce5f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1329,9 +1329,9 @@ int tap_queue_resize(struct tap_dev *tap)
list_for_each_entry(q, &tap->queue_list, next)
rings[i++] = &q->ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- __skb_array_destroy_skb);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
kfree(rings);
return ret;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index c7690adec8db..dc7cbd6a9798 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -1175,6 +1175,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (netdev_has_upper_dev(port_dev, dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface");
+ netdev_err(dev, "Device %s is already a lower device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e816aaba8e5f..28624cca91f8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -574,14 +574,18 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
return ret;
}
-static inline bool tun_not_capable(struct tun_struct *tun)
+static inline bool tun_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
- return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
- (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
- !ns_capable(net->user_ns, CAP_NET_ADMIN);
+ if (ns_capable(net->user_ns, CAP_NET_ADMIN))
+ return 1;
+ if (uid_valid(tun->owner) && uid_eq(cred->euid, tun->owner))
+ return 1;
+ if (gid_valid(tun->group) && in_egroup_p(tun->group))
+ return 1;
+ return 0;
}
static void tun_set_real_num_queues(struct tun_struct *tun)
@@ -2778,7 +2782,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
- if (tun_not_capable(tun))
+ if (!tun_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
@@ -3697,9 +3701,9 @@ static int tun_queue_resize(struct tun_struct *tun)
list_for_each_entry(tfile, &tun->disabled, next)
rings[i++] = &tfile->tx_ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- tun_ptr_free);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
kfree(rings);
return ret;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 531b1b6a37d1..a91bf9c7e31d 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -439,7 +439,7 @@ struct lan78xx_net {
struct usb_anchor deferred;
struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
- struct mutex phy_mutex; /* for phy access */
+ struct mutex mdiobus_mutex; /* for MDIO bus access */
unsigned int pipe_in, pipe_out, pipe_intr;
unsigned int bulk_in_delay;
@@ -472,10 +472,6 @@ struct lan78xx_net {
struct irq_domain_data domain_data;
};
-/* define external phy id */
-#define PHY_LAN8835 (0x0007C130)
-#define PHY_KSZ9031RNX (0x00221620)
-
/* use ethtool to change the level for any given device */
static int msg_level = -1;
module_param(msg_level, int, 0);
@@ -625,8 +621,8 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
*data = *buf;
} else if (net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to read register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to read register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
@@ -656,8 +652,8 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
if (unlikely(ret < 0) &&
net_ratelimit()) {
netdev_warn(dev->net,
- "Failed to write register index 0x%08x. ret = %d",
- index, ret);
+ "Failed to write register index 0x%08x. ret = %pe",
+ index, ERR_PTR(ret));
}
kfree(buf);
@@ -678,11 +674,7 @@ static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
buf &= ~mask;
buf |= (mask & data);
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan78xx_write_reg(dev, reg, buf);
}
static int lan78xx_read_stats(struct lan78xx_net *dev,
@@ -812,8 +804,156 @@ static void lan78xx_update_stats(struct lan78xx_net *dev)
usb_autopm_put_interface(dev->intf);
}
-/* Loop until the read is completed with timeout called with phy_mutex held */
-static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
+{
+ return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
+}
+
+static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
+ u32 hw_disabled)
+{
+ unsigned long timeout;
+ bool stopped = true;
+ int ret;
+ u32 buf;
+
+ /* Stop the h/w block (if not already stopped) */
+
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_enabled) {
+ buf &= ~hw_enabled;
+
+ ret = lan78xx_write_reg(dev, reg, buf);
+ if (ret < 0)
+ return ret;
+
+ stopped = false;
+ timeout = jiffies + HW_DISABLE_TIMEOUT;
+ do {
+ ret = lan78xx_read_reg(dev, reg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf & hw_disabled)
+ stopped = true;
+ else
+ msleep(HW_DISABLE_DELAY_MS);
+ } while (!stopped && !time_after(jiffies, timeout));
+ }
+
+ return stopped ? 0 : -ETIMEDOUT;
+}
+
+static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
+{
+ return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
+}
+
+static int lan78xx_start_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start tx path");
+
+ /* Start the MAC transmitter */
+
+ ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the Tx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop tx path");
+
+ /* Stop the Tx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the MAC transmitter */
+
+ ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Tx path is stopped before calling
+ * lan78xx_flush_tx_fifo().
+ */
+static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
+}
+
+static int lan78xx_start_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "start rx path");
+
+ /* Start the Rx FIFO */
+
+ ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
+ if (ret < 0)
+ return ret;
+
+ /* Start the MAC receiver*/
+
+ ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
+{
+ int ret;
+
+ netif_dbg(dev, drv, dev->net, "stop rx path");
+
+ /* Stop the MAC receiver */
+
+ ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the Rx FIFO */
+
+ ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* The caller must ensure the Rx path is stopped before calling
+ * lan78xx_flush_rx_fifo().
+ */
+static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
+{
+ return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
+}
+
+/* Loop until the read is completed with timeout called with mdiobus_mutex held */
+static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
{
unsigned long start_time = jiffies;
u32 val;
@@ -821,14 +961,14 @@ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MII_ACC, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & MII_ACC_MII_BUSY_))
return 0;
} while (!time_after(jiffies, start_time + HZ));
- return -EIO;
+ return -ETIMEDOUT;
}
static inline u32 mii_access(int id, int index, int read)
@@ -854,8 +994,8 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_) ||
(val & E2P_CMD_EPC_TIMEOUT_))
@@ -865,7 +1005,7 @@ static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
netdev_warn(dev->net, "EEPROM read operation timeout");
- return -EIO;
+ return -ETIMEDOUT;
}
return 0;
@@ -879,8 +1019,8 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, E2P_CMD, &val);
- if (unlikely(ret < 0))
- return -EIO;
+ if (ret < 0)
+ return ret;
if (!(val & E2P_CMD_EPC_BUSY_))
return 0;
@@ -889,75 +1029,81 @@ static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
} while (!time_after(jiffies, start_time + HZ));
netdev_warn(dev->net, "EEPROM is busy");
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u32 val;
- u32 saved;
+ u32 val, saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- return retval;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto read_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
ret = lan78xx_read_reg(dev, E2P_DATA, &val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
data[i] = val & 0xFF;
offset++;
}
- retval = 0;
-exit:
+read_raw_eeprom_done:
if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
+ return lan78xx_write_reg(dev, HW_CFG, saved);
- return retval;
+ return 0;
}
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- u8 sig;
int ret;
+ u8 sig;
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
- if ((ret == 0) && (sig == EEPROM_INDICATOR))
- ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
- else
- ret = -EINVAL;
+ if (ret < 0)
+ return ret;
- return ret;
+ if (sig != EEPROM_INDICATOR)
+ return -ENODATA;
+
+ return lan78xx_read_raw_eeprom(dev, offset, length, data);
}
static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -966,113 +1112,144 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 val;
u32 saved;
int i, ret;
- int retval;
/* depends on chip, some EEPROM pins are muxed with LED function.
* disable & restore LED function to access EEPROM.
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ return ret;
+
saved = val;
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
+ if (ret < 0)
+ return ret;
}
- retval = lan78xx_eeprom_confirm_not_busy(dev);
- if (retval)
- goto exit;
+ ret = lan78xx_eeprom_confirm_not_busy(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
/* Issue write/erase enable command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (unlikely(ret < 0)) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
/* Fill data register */
val = data[i];
ret = lan78xx_write_reg(dev, E2P_DATA, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
/* Send "write" command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val);
- if (ret < 0) {
- retval = -EIO;
- goto exit;
- }
+ if (ret < 0)
+ return ret;
- retval = lan78xx_wait_eeprom(dev);
- if (retval < 0)
- goto exit;
+ ret = lan78xx_wait_eeprom(dev);
+ /* Looks like not USB specific error, try to recover */
+ if (ret == -ETIMEDOUT)
+ goto write_raw_eeprom_done;
+ /* If USB fails, there is nothing to do */
+ if (ret < 0)
+ return ret;
offset++;
}
- retval = 0;
-exit:
+write_raw_eeprom_done:
if (dev->chipid == ID_REV_CHIP_ID_7800_)
- ret = lan78xx_write_reg(dev, HW_CFG, saved);
+ return lan78xx_write_reg(dev, HW_CFG, saved);
- return retval;
+ return 0;
}
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
- int i;
- u32 buf;
unsigned long timeout;
+ int ret, i;
+ u32 buf;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
- lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
- lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ if (ret < 0)
+ return ret;
data[i] = (u8)(buf & 0xFF);
}
@@ -1086,45 +1263,72 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
int i;
u32 buf;
unsigned long timeout;
+ int ret;
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_PWR_DN_PWRDN_N_);
}
/* set to BYTE program mode */
- lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ if (ret < 0)
+ return ret;
for (i = 0; i < length; i++) {
- lan78xx_write_reg(dev, OTP_ADDR1,
- ((offset + i) >> 8) & OTP_ADDR1_15_11);
- lan78xx_write_reg(dev, OTP_ADDR2,
- ((offset + i) & OTP_ADDR2_10_3));
- lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ ((offset + i) >> 8) & OTP_ADDR1_15_11);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ ((offset + i) & OTP_ADDR2_10_3));
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ if (ret < 0)
+ return ret;
timeout = jiffies + HZ;
do {
udelay(1);
- lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ if (ret < 0)
+ return ret;
+
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
- return -EIO;
+ return -ETIMEDOUT;
}
} while (buf & OTP_STATUS_BUSY_);
}
@@ -1161,7 +1365,7 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
if (unlikely(ret < 0))
- return -EIO;
+ return ret;
if (dp_sel & DP_SEL_DPRDY_)
return 0;
@@ -1171,44 +1375,51 @@ static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
netdev_warn(dev->net, "%s timed out", __func__);
- return -EIO;
+ return -ETIMEDOUT;
}
static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
u32 addr, u32 length, u32 *buf)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
- u32 dp_sel;
int i, ret;
- if (usb_autopm_get_interface(dev->intf) < 0)
- return 0;
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret < 0)
+ return ret;
mutex_lock(&pdata->dataport_mutex);
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
- ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
-
- dp_sel &= ~DP_SEL_RSEL_MASK_;
- dp_sel |= ram_select;
- ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+ ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
+ if (ret < 0)
+ goto dataport_write;
for (i = 0; i < length; i++) {
ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+ if (ret < 0)
+ goto dataport_write;
ret = lan78xx_dataport_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto dataport_write;
}
-done:
+dataport_write:
+ if (ret < 0)
+ netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
+
mutex_unlock(&pdata->dataport_mutex);
usb_autopm_put_interface(dev->intf);
@@ -1244,23 +1455,39 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
struct lan78xx_priv *pdata =
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
- int i;
+ int i, ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
- lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
- DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
+ DP_SEL_VHF_VLAN_LEN,
+ DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+ if (ret < 0)
+ goto multicast_write_done;
for (i = 1; i < NUM_OF_MAF; i++) {
- lan78xx_write_reg(dev, MAF_HI(i), 0);
- lan78xx_write_reg(dev, MAF_LO(i),
- pdata->pfilter_table[i][1]);
- lan78xx_write_reg(dev, MAF_HI(i),
- pdata->pfilter_table[i][0]);
+ ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_LO(i),
+ pdata->pfilter_table[i][1]);
+ if (ret < 0)
+ goto multicast_write_done;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(i),
+ pdata->pfilter_table[i][0]);
+ if (ret < 0)
+ goto multicast_write_done;
}
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+multicast_write_done:
+ if (ret < 0)
+ netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
+ return;
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1369,24 +1596,24 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
u32 val;
int ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* Resetting the device while there is activity on the MDIO
* bus can result in the MAC interface locking up and not
* completing register access transactions.
*/
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
- goto done;
+ goto exit_unlock;
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
val |= MAC_CR_RST_;
ret = lan78xx_write_reg(dev, MAC_CR, val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
/* Wait for the reset to complete before allowing any further
* MAC register accesses otherwise the MAC may lock up.
@@ -1394,17 +1621,17 @@ static int lan78xx_mac_reset(struct lan78xx_net *dev)
do {
ret = lan78xx_read_reg(dev, MAC_CR, &val);
if (ret < 0)
- goto done;
+ goto exit_unlock;
if (!(val & MAC_CR_RST_)) {
ret = 0;
- goto done;
+ goto exit_unlock;
}
} while (!time_after(jiffies, start_time + HZ));
ret = -ETIMEDOUT;
-done:
- mutex_unlock(&dev->phy_mutex);
+exit_unlock:
+ mutex_unlock(&dev->mdiobus_mutex);
return ret;
}
@@ -1630,6 +1857,7 @@ static void lan78xx_get_wol(struct net_device *netdev,
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (unlikely(ret < 0)) {
+ netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
wol->supported = 0;
wol->wolopts = 0;
} else {
@@ -1661,10 +1889,13 @@ static int lan78xx_set_wol(struct net_device *netdev,
pdata->wol = wol->wolopts;
- device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+ if (ret < 0)
+ goto exit_pm_put;
- phy_ethtool_set_wol(netdev->phydev, wol);
+ ret = phy_ethtool_set_wol(netdev->phydev, wol);
+exit_pm_put:
usb_autopm_put_interface(dev->intf);
return ret;
@@ -1869,30 +2100,35 @@ exit:
static int lan78xx_get_regs_len(struct net_device *netdev)
{
- if (!netdev->phydev)
- return (sizeof(lan78xx_regs));
- else
- return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
+ return sizeof(lan78xx_regs);
}
static void
lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *buf)
{
- u32 *data = buf;
- int i, j;
struct lan78xx_net *dev = netdev_priv(netdev);
+ unsigned int data_count = 0;
+ u32 *data = buf;
+ int i, ret;
/* Read Device/MAC registers */
- for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
- lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
+ ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
+ if (ret < 0) {
+ netdev_warn(dev->net,
+ "failed to read register 0x%08x\n",
+ lan78xx_regs[i]);
+ goto clean_data;
+ }
- if (!netdev->phydev)
- return;
+ data_count++;
+ }
- /* Read PHY registers */
- for (j = 0; j < 32; i++, j++)
- data[i] = phy_read(netdev->phydev, j);
+ return;
+
+clean_data:
+ memset(data, 0, data_count * sizeof(u32));
}
static const struct ethtool_ops lan78xx_ethtool_ops = {
@@ -1920,13 +2156,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
-static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+static int lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
u8 addr[6];
+ int ret;
- lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ if (ret < 0)
+ return ret;
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1959,14 +2201,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
}
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ if (ret < 0)
+ return ret;
eth_hw_addr_set(dev->net, addr);
+
+ return 0;
}
/* MDIO read and write wrappers for phylib */
@@ -1980,27 +2234,31 @@ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
/* set the address, index & direction (read from PHY) */
addr = mii_access(phy_id, idx, MII_READ);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
ret = lan78xx_read_reg(dev, MII_DATA, &val);
+ if (ret < 0)
+ goto done;
ret = (int)(val & 0xFFFF);
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
return ret;
@@ -2017,28 +2275,32 @@ static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
if (ret < 0)
return ret;
- mutex_lock(&dev->phy_mutex);
+ mutex_lock(&dev->mdiobus_mutex);
/* confirm MII not busy */
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
val = (u32)regval;
ret = lan78xx_write_reg(dev, MII_DATA, val);
+ if (ret < 0)
+ goto done;
/* set the address, index & direction (write to PHY) */
addr = mii_access(phy_id, idx, MII_WRITE);
ret = lan78xx_write_reg(dev, MII_ACC, addr);
+ if (ret < 0)
+ goto done;
- ret = lan78xx_phy_wait_not_busy(dev);
+ ret = lan78xx_mdiobus_wait_not_busy(dev);
if (ret < 0)
goto done;
done:
- mutex_unlock(&dev->phy_mutex);
+ mutex_unlock(&dev->mdiobus_mutex);
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static int lan78xx_mdio_init(struct lan78xx_net *dev)
@@ -2164,13 +2426,22 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
+ int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ goto irq_bus_sync_unlock;
+
if (buf != data->irqenable)
- lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+
+irq_bus_sync_unlock:
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
+ ERR_PTR(ret));
mutex_unlock(&data->irq_lock);
}
@@ -2195,7 +2466,10 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
mutex_init(&dev->domain_data.irq_lock);
- lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ if (ret < 0)
+ return ret;
+
dev->domain_data.irqenable = buf;
dev->domain_data.irqchip = &lan78xx_irqchip;
@@ -2234,46 +2508,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
-static int lan8835_fixup(struct phy_device *phydev)
-{
- int buf;
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
-
- /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
- buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
- buf &= ~0x1800;
- buf |= 0x0800;
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
-
- /* RGMII MAC TXC Delay Enable */
- lan78xx_write_reg(dev, MAC_RGMII_ID,
- MAC_RGMII_ID_TXC_DELAY_EN_);
-
- /* RGMII TX DLL Tune Adjust */
- lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
-
- dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
-
- return 1;
-}
-
-static int ksz9031rnx_fixup(struct phy_device *phydev)
-{
- struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
-
- /* Micrel9301RNX PHY configuration */
- /* RGMII Control Signal Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
- /* RGMII RX Data Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
- /* RGMII RX Clock Pad Skew */
- phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
-
- dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
-
- return 1;
-}
-
static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
{
u32 buf;
@@ -2307,22 +2541,11 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
netdev_err(dev->net, "no PHY driver found\n");
return NULL;
}
- dev->interface = PHY_INTERFACE_MODE_RGMII;
- /* external PHY fixup for KSZ9031RNX */
- ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
- ksz9031rnx_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
- return NULL;
- }
- /* external PHY fixup for LAN8835 */
- ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
- lan8835_fixup);
- if (ret < 0) {
- netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
- return NULL;
- }
- /* add more external PHY fixup here if needed */
+ dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ /* The PHY driver is responsible to configure proper RGMII
+ * interface delays. Disable RGMII delays on MAC side.
+ */
+ lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
phydev->is_internal = false;
}
@@ -2381,11 +2604,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (phy_is_pseudo_fixed_link(phydev)) {
fixed_phy_unregister(phydev);
phy_device_free(phydev);
- } else {
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
- 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835,
- 0xfffffff0);
}
}
return -EIO;
@@ -2437,27 +2655,36 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- u32 buf;
bool rxenabled;
+ u32 buf;
+ int ret;
- lan78xx_read_reg(dev, MAC_RX, &buf);
+ ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ if (ret < 0)
+ return ret;
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- lan78xx_write_reg(dev, MAC_RX, buf);
+ ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ if (ret < 0)
+ return ret;
}
return 0;
@@ -2523,7 +2750,10 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
- if (!ret)
+ if (ret < 0)
+ netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
+ new_mtu, netdev->mtu, ERR_PTR(ret));
+ else
WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
@@ -2536,6 +2766,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
+ int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2552,14 +2783,20 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ if (ret < 0)
+ return ret;
+
+ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ if (ret < 0)
+ return ret;
/* Added to support MAC address changes */
- lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ if (ret < 0)
+ return ret;
- return 0;
+ return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
}
/* Enable or disable Rx checksum offload engine */
@@ -2592,9 +2829,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-
- return 0;
+ return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_deferred_vlan_write(struct work_struct *param)
@@ -2645,13 +2880,16 @@ static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void lan78xx_init_ltm(struct lan78xx_net *dev)
+static int lan78xx_init_ltm(struct lan78xx_net *dev)
{
+ u32 regs[6] = { 0 };
int ret;
u32 buf;
- u32 regs[6] = { 0 };
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ goto init_ltm_failed;
+
if (buf & USB_CFG1_LTM_ENABLE_) {
u8 temp[2];
/* Get values from EEPROM first */
@@ -2662,7 +2900,7 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
if (temp[0] == 24) {
@@ -2671,17 +2909,40 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
24,
(u8 *)regs);
if (ret < 0)
- return;
+ return ret;
}
}
}
- lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
- lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
- lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
- lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
- lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
- lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+ if (ret < 0)
+ goto init_ltm_failed;
+
+ return 0;
+
+init_ltm_failed:
+ netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
+ return ret;
}
static int lan78xx_urb_config_init(struct lan78xx_net *dev)
@@ -2722,156 +2983,6 @@ static int lan78xx_urb_config_init(struct lan78xx_net *dev)
return result;
}
-static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
-{
- return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
-}
-
-static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
- u32 hw_disabled)
-{
- unsigned long timeout;
- bool stopped = true;
- int ret;
- u32 buf;
-
- /* Stop the h/w block (if not already stopped) */
-
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_enabled) {
- buf &= ~hw_enabled;
-
- ret = lan78xx_write_reg(dev, reg, buf);
- if (ret < 0)
- return ret;
-
- stopped = false;
- timeout = jiffies + HW_DISABLE_TIMEOUT;
- do {
- ret = lan78xx_read_reg(dev, reg, &buf);
- if (ret < 0)
- return ret;
-
- if (buf & hw_disabled)
- stopped = true;
- else
- msleep(HW_DISABLE_DELAY_MS);
- } while (!stopped && !time_after(jiffies, timeout));
- }
-
- ret = stopped ? 0 : -ETIME;
-
- return ret;
-}
-
-static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
-{
- return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
-}
-
-static int lan78xx_start_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start tx path");
-
- /* Start the MAC transmitter */
-
- ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
- if (ret < 0)
- return ret;
-
- /* Start the Tx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop tx path");
-
- /* Stop the Tx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- /* Stop the MAC transmitter */
-
- ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Tx path is stopped before calling
- * lan78xx_flush_tx_fifo().
- */
-static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
-}
-
-static int lan78xx_start_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "start rx path");
-
- /* Start the Rx FIFO */
-
- ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
- if (ret < 0)
- return ret;
-
- /* Start the MAC receiver*/
-
- ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
-{
- int ret;
-
- netif_dbg(dev, drv, dev->net, "stop rx path");
-
- /* Stop the MAC receiver */
-
- ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
- if (ret < 0)
- return ret;
-
- /* Stop the Rx FIFO */
-
- ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-/* The caller must ensure the Rx path is stopped before calling
- * lan78xx_flush_rx_fifo().
- */
-static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
-{
- return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
-}
-
static int lan78xx_reset(struct lan78xx_net *dev)
{
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -2905,7 +3016,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
- lan78xx_init_mac_address(dev);
+ ret = lan78xx_init_mac_address(dev);
+ if (ret < 0)
+ return ret;
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
@@ -2927,7 +3040,9 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
/* Init LTM */
- lan78xx_init_ltm(dev);
+ ret = lan78xx_init_ltm(dev);
+ if (ret < 0)
+ return ret;
ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
if (ret < 0)
@@ -4242,9 +4357,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
phydev = net->phydev;
- phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
- phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
-
phy_disconnect(net->phydev);
if (phy_is_pseudo_fixed_link(phydev)) {
@@ -4349,7 +4461,7 @@ static int lan78xx_probe(struct usb_interface *intf,
skb_queue_head_init(&dev->rxq_done);
skb_queue_head_init(&dev->txq_pend);
skb_queue_head_init(&dev->rxq_overflow);
- mutex_init(&dev->phy_mutex);
+ mutex_init(&dev->mdiobus_mutex);
mutex_init(&dev->dev_mutex);
ret = lan78xx_urb_config_init(dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 07ebb800edf1..01251868a9c2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -634,7 +634,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
break;
case XDP_TX:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
@@ -646,7 +646,7 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp->rxq->mem = frame->mem;
+ xdp->rxq->mem.type = frame->mem_type;
if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
frame = &orig_frame;
stats->rx_drops++;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 67d25f4f94ef..ca81b212a246 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -122,16 +122,6 @@ struct net_vrf {
int ifindex;
};
-static void vrf_rx_stats(struct net_device *dev, int len)
-{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_packets);
- u64_stats_add(&dstats->rx_bytes, len);
- u64_stats_update_end(&dstats->syncp);
-}
-
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
@@ -369,7 +359,7 @@ static bool qdisc_tx_is_default(const struct net_device *dev)
static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
struct dst_entry *dst)
{
- int len = skb->len;
+ unsigned int len = skb->len;
skb_orphan(skb);
@@ -382,15 +372,10 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
- vrf_rx_stats(dev, len);
- } else {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- u64_stats_inc(&dstats->rx_drops);
- u64_stats_update_end(&dstats->syncp);
- }
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ dev_dstats_rx_add(dev, len);
+ else
+ dev_dstats_rx_dropped(dev);
return NETDEV_TX_OK;
}
@@ -578,20 +563,14 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- int len = skb->len;
- netdev_tx_t ret = is_ip_tx_frame(skb, dev);
-
- u64_stats_update_begin(&dstats->syncp);
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ unsigned int len = skb->len;
+ netdev_tx_t ret;
- u64_stats_inc(&dstats->tx_packets);
- u64_stats_add(&dstats->tx_bytes, len);
- } else {
- u64_stats_inc(&dstats->tx_drops);
- }
- u64_stats_update_end(&dstats->syncp);
+ ret = is_ip_tx_frame(skb, dev);
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_dstats_tx_add(dev, len);
+ else
+ dev_dstats_tx_dropped(dev);
return ret;
}
@@ -1364,7 +1343,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (!is_ndisc) {
struct net_device *orig_dev = skb->dev;
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -1420,7 +1399,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
goto out;
}
- vrf_rx_stats(vrf_dev, skb->len);
+ dev_dstats_rx_add(vrf_dev, skb->len);
if (!list_empty(&vrf_dev->ptype_all)) {
int err;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 9ea63059d52d..05c10acb2a57 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -622,9 +622,9 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
-static bool vxlan_parse_gpe_proto(struct vxlanhdr *hdr, __be16 *protocol)
+static bool vxlan_parse_gpe_proto(const struct vxlanhdr *hdr, __be16 *protocol)
{
- struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)hdr;
+ const struct vxlanhdr_gpe *gpe = (const struct vxlanhdr_gpe *)hdr;
/* Need to have Next Protocol set for interfaces in GPE mode. */
if (!gpe->np_applied)
@@ -1352,6 +1352,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int *idx)
{
+ struct ndo_fdb_dump_context *ctx = (void *)cb->ctx;
struct vxlan_dev *vxlan = netdev_priv(dev);
unsigned int h;
int err = 0;
@@ -1364,7 +1365,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct vxlan_rdst *rd;
if (rcu_access_pointer(f->nh)) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip_nh;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
@@ -1381,7 +1382,7 @@ skip_nh:
}
list_for_each_entry_rcu(rd, &f->remotes, list) {
- if (*idx < cb->args[2])
+ if (*idx < ctx->fdb_idx)
goto skip;
err = vxlan_fdb_info(skb, vxlan, f,
@@ -1554,18 +1555,17 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
#endif
}
-static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
- struct sk_buff *skb,
- u32 vxflags)
+static enum skb_drop_reason vxlan_remcsum(struct sk_buff *skb, u32 vxflags)
{
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
enum skb_drop_reason reason;
size_t start, offset;
- if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
- goto out;
+ if (!(vh->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ return SKB_NOT_DROPPED_YET;
- start = vxlan_rco_start(unparsed->vx_vni);
- offset = start + vxlan_rco_offset(unparsed->vx_vni);
+ start = vxlan_rco_start(vh->vx_vni);
+ offset = start + vxlan_rco_offset(vh->vx_vni);
reason = pskb_may_pull_reason(skb, offset + sizeof(u16));
if (reason)
@@ -1573,22 +1573,20 @@ static enum skb_drop_reason vxlan_remcsum(struct vxlanhdr *unparsed,
skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
!!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
-out:
- unparsed->vx_flags &= ~VXLAN_HF_RCO;
- unparsed->vx_vni &= VXLAN_VNI_MASK;
-
return SKB_NOT_DROPPED_YET;
}
-static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
- struct sk_buff *skb, u32 vxflags,
+static void vxlan_parse_gbp_hdr(struct sk_buff *skb, u32 vxflags,
struct vxlan_metadata *md)
{
- struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+ const struct vxlanhdr *vh = vxlan_hdr(skb);
+ const struct vxlanhdr_gbp *gbp;
struct metadata_dst *tun_dst;
- if (!(unparsed->vx_flags & VXLAN_HF_GBP))
- goto out;
+ gbp = (const struct vxlanhdr_gbp *)vh;
+
+ if (!(vh->vx_flags & VXLAN_HF_GBP))
+ return;
md->gbp = ntohs(gbp->policy_id);
@@ -1607,8 +1605,6 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
/* In flow-based mode, GBP is carried in dst_metadata */
if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
-out:
- unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan,
@@ -1672,9 +1668,9 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
+ const struct vxlanhdr *vh;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
- struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 protocol = htons(ETH_P_TEB);
@@ -1689,24 +1685,21 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (reason)
goto drop;
- unparsed = *vxlan_hdr(skb);
+ vh = vxlan_hdr(skb);
/* VNI flag always required to be set */
- if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
+ if (!(vh->vx_flags & VXLAN_HF_VNI)) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxlan_hdr(skb)->vx_flags),
- ntohl(vxlan_hdr(skb)->vx_vni));
+ ntohl(vh->vx_flags), ntohl(vh->vx_vni));
reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
/* Return non vxlan pkt */
goto drop;
}
- unparsed.vx_flags &= ~VXLAN_HF_VNI;
- unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ vni = vxlan_vni(vh->vx_vni);
vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode);
if (!vxlan) {
@@ -1714,13 +1707,27 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- /* For backwards compatibility, only allow reserved fields to be
- * used by VXLAN extensions if explicitly requested.
- */
- if (vs->flags & VXLAN_F_GPE) {
- if (!vxlan_parse_gpe_proto(&unparsed, &protocol))
+ if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags ||
+ vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) {
+ /* If the header uses bits besides those enabled by the
+ * netdevice configuration, treat this as a malformed packet.
+ * This behavior diverges from VXLAN RFC (RFC7348) which
+ * stipulates that bits in reserved in reserved fields are to be
+ * ignored. The approach here maintains compatibility with
+ * previous stack code, and also is more robust and provides a
+ * little more security in adding extensions to VXLAN.
+ */
+ reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ if (vxlan->cfg.flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_proto(vh, &protocol))
goto drop;
- unparsed.vx_flags &= ~VXLAN_GPE_USED_BITS;
raw_proto = true;
}
@@ -1730,8 +1737,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (vs->flags & VXLAN_F_REMCSUM_RX) {
- reason = vxlan_remcsum(&unparsed, skb, vs->flags);
+ if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) {
+ reason = vxlan_remcsum(skb, vxlan->cfg.flags);
if (unlikely(reason))
goto drop;
}
@@ -1756,25 +1763,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- if (vs->flags & VXLAN_F_GBP)
- vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ if (vxlan->cfg.flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md);
/* Note that GBP and GPE can never be active together. This is
* ensured in vxlan_dev_configure.
*/
- if (unparsed.vx_flags || unparsed.vx_vni) {
- /* If there are any unprocessed flags remaining treat
- * this as a malformed packet. This behavior diverges from
- * VXLAN RFC (RFC7348) which stipulates that bits in reserved
- * in reserved fields are to be ignored. The approach here
- * maintains compatibility with previous stack code, and also
- * is more robust and provides a little more security in
- * adding extensions to VXLAN.
- */
- reason = SKB_DROP_REASON_VXLAN_INVALID_HDR;
- goto drop;
- }
-
if (!raw_proto) {
reason = vxlan_set_mac(vxlan, vs, skb, vni);
if (reason)
@@ -1818,14 +1812,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
- dev_core_stats_rx_dropped_inc(vxlan->dev);
+ dev_dstats_rx_dropped(vxlan->dev);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_DROPS, 0);
reason = SKB_DROP_REASON_DEV_READY;
goto drop;
}
- dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ dev_dstats_rx_add(vxlan->dev, skb->len);
vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
@@ -1880,7 +1874,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
@@ -1938,7 +1932,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2097,7 +2091,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2271,8 +2265,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
{
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
+ unsigned int len = skb->len;
struct net_device *dev;
- int len = skb->len;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
@@ -2299,16 +2293,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop)
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
- dev_sw_netstats_tx_add(src_vxlan->dev, 1, len);
+ dev_dstats_tx_add(src_vxlan->dev, len);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
- dev_sw_netstats_rx_add(dst_vxlan->dev, len);
+ dev_dstats_rx_add(dst_vxlan->dev, len);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
drop:
- dev_core_stats_rx_dropped_inc(dev);
+ dev_dstats_rx_dropped(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2621,7 +2615,7 @@ out_unlock:
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb_reason(skb, reason);
return;
@@ -2666,7 +2660,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2704,7 +2698,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2801,10 +2795,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev_core_stats_tx_dropped_inc(dev);
+ dev_dstats_tx_dropped(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
}
@@ -2827,7 +2821,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (fdst)
vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
}
return NETDEV_TX_OK;
@@ -3371,7 +3365,7 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
- dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -3435,6 +3429,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 },
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
[IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX),
+ [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -4070,6 +4065,10 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink, struct netlink_ext_ack *extack)
{
+ struct vxlanhdr used_bits = {
+ .vx_flags = VXLAN_HF_VNI,
+ .vx_vni = VXLAN_VNI_MASK,
+ };
struct vxlan_dev *vxlan = netdev_priv(dev);
int err = 0;
@@ -4296,6 +4295,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_HF_RCO;
+ used_bits.vx_vni |= ~VXLAN_VNI_MASK;
}
if (data[IFLA_VXLAN_GBP]) {
@@ -4303,6 +4304,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
VXLAN_F_GBP, changelink, false, extack);
if (err)
return err;
+ used_bits.vx_flags |= VXLAN_GBP_USED_BITS;
}
if (data[IFLA_VXLAN_GPE]) {
@@ -4311,6 +4313,46 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
extack);
if (err)
return err;
+
+ used_bits.vx_flags |= VXLAN_GPE_USED_BITS;
+ }
+
+ if (data[IFLA_VXLAN_RESERVED_BITS]) {
+ struct vxlanhdr reserved_bits;
+
+ if (changelink) {
+ NL_SET_ERR_MSG_ATTR(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Cannot change reserved_bits");
+ return -EOPNOTSUPP;
+ }
+
+ nla_memcpy(&reserved_bits, data[IFLA_VXLAN_RESERVED_BITS],
+ sizeof(reserved_bits));
+ if (used_bits.vx_flags & reserved_bits.vx_flags ||
+ used_bits.vx_vni & reserved_bits.vx_vni) {
+ __be64 ub_be64, rb_be64;
+
+ memcpy(&ub_be64, &used_bits, sizeof(ub_be64));
+ memcpy(&rb_be64, &reserved_bits, sizeof(rb_be64));
+
+ NL_SET_ERR_MSG_ATTR_FMT(extack,
+ data[IFLA_VXLAN_RESERVED_BITS],
+ "Used bits %#018llx cannot overlap reserved bits %#018llx",
+ be64_to_cpu(ub_be64),
+ be64_to_cpu(rb_be64));
+ return -EINVAL;
+ }
+
+ conf->reserved_bits = reserved_bits;
+ } else {
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ conf->reserved_bits = (struct vxlanhdr) {
+ .vx_flags = ~used_bits.vx_flags,
+ .vx_vni = ~used_bits.vx_vni,
+ };
}
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
@@ -4497,6 +4539,8 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(0) + /* IFLA_VXLAN_GPE */
nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
+ /* IFLA_VXLAN_RESERVED_BITS */
+ nla_total_size(sizeof(struct vxlanhdr)) +
0;
}
@@ -4599,6 +4643,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
!!(vxlan->cfg.flags & VXLAN_F_VNIFILTER)))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_VXLAN_RESERVED_BITS,
+ sizeof(vxlan->cfg.reserved_bits),
+ &vxlan->cfg.reserved_bits))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index 8735891ee128..816ab1aa0526 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -1712,7 +1712,7 @@ netdev_tx_t vxlan_mdb_xmit(struct vxlan_dev *vxlan,
vxlan_xmit_one(skb, vxlan->dev, src_vni,
rcu_dereference(fremote->rd), false);
else
- kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NO_TX_TARGET);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index f547c22e26ac..58f5143359df 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -732,8 +732,8 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
/**
* framer_provider_simple_of_xlate() - returns the framer instance from framer provider
- * @dev: the framer provider device
- * @args: of_phandle_args (not used here)
+ * @dev: the framer provider device (not used here)
+ * @args: of_phandle_args
*
* Intended to be used by framer provider for the common case where #framer-cells is
* 0. For other cases where #framer-cells is greater than '0', the framer provider
@@ -743,21 +743,14 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
struct framer *framer_provider_simple_of_xlate(struct device *dev,
const struct of_phandle_args *args)
{
- struct class_dev_iter iter;
- struct framer *framer;
-
- class_dev_iter_init(&iter, &framer_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- framer = dev_to_framer(dev);
- if (args->np != framer->dev.of_node)
- continue;
+ struct device *target_dev;
- class_dev_iter_exit(&iter);
- return framer;
- }
+ target_dev = class_find_device_by_of_node(&framer_class, args->np);
+ if (!target_dev)
+ return ERR_PTR(-ENODEV);
- class_dev_iter_exit(&iter);
- return ERR_PTR(-ENODEV);
+ put_device(target_dev);
+ return dev_to_framer(target_dev);
}
EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index be67382c00f6..c576bbba52bf 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -123,6 +123,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -207,6 +208,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca6390 hw2.0",
@@ -296,6 +298,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "qcn9074 hw1.0",
@@ -379,6 +382,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.0",
@@ -468,6 +472,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6855 hw2.1",
@@ -555,6 +560,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
.support_dual_stations = true,
+ .pdev_suspend = false,
},
{
.name = "wcn6750 hw1.0",
@@ -637,6 +643,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
.support_dual_stations = false,
+ .pdev_suspend = true,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -719,6 +726,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
.support_dual_stations = false,
+ .pdev_suspend = false,
},
{
.name = "qca2066 hw2.1",
@@ -809,6 +817,94 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.support_fw_mac_sequence = true,
.support_dual_stations = true,
},
+ {
+ .name = "qca6698aq hw2.1",
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ .fw = {
+ .dir = "QCA6698AQ/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
};
static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
@@ -1669,11 +1765,47 @@ err_pdev_debug:
return ret;
}
+static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ unsigned long time_left;
+ int ret;
+ int i;
+
+ if (!ab->hw_params.pdev_suspend)
+ return;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ab, "could not suspend target :%d\n", ret);
+ /* pointless to try other pdevs */
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+
+ if (!time_left) {
+ ath11k_warn(ab, "suspend timed out - target pause event never came\n");
+ /* pointless to try other pdevs */
+ return;
+ }
+ }
+}
+
static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
{
ath11k_spectral_deinit(ab);
ath11k_thermal_unregister(ab);
ath11k_mac_unregister(ab);
+ ath11k_core_pdev_suspend_target(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_debugfs_pdev_destroy(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 09c37e19a168..a9dc7fe7765a 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -148,6 +148,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
ATH11K_HW_QCA2066_HW21,
+ ATH11K_HW_QCA6698AQ_HW21,
};
enum ath11k_firmware_mode {
@@ -340,7 +341,6 @@ struct ath11k_chan_power_info {
* @ap_power_type: type of power (SP/LPI/VLP)
* @num_pwr_levels: number of power levels
* @reg_max: Array of maximum TX power (dBm) per PSD value
- * @ap_constraint_power: AP constraint power (dBm)
* @tpe: TPE values processed from TPE IE
* @chan_power_info: power info to send to firmware
*/
@@ -350,7 +350,6 @@ struct ath11k_reg_tpc_power_info {
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
u8 reg_max[ATH11K_NUM_PWR_LEVELS];
- u8 ap_constraint_power;
s8 tpe[ATH11K_NUM_PWR_LEVELS];
struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
@@ -370,7 +369,6 @@ struct ath11k_vif {
struct ath11k *ar;
struct ieee80211_vif *vif;
- u16 tx_seq_no;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
union {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 65d2bc0687c8..f777314db8b3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -165,7 +165,6 @@ struct ath11k_mon_data {
struct ath11k_pdev_mon_stats rx_mon_stats;
/* lock for monitor data */
spinlock_t mon_lock;
- struct sk_buff_head rx_status_q;
};
struct ath11k_pdev_dp {
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 40088e62572e..029ecf51c9ef 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -3872,6 +3872,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW1_BM &&
rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
@@ -4690,11 +4691,12 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
}
-static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
- void *ring_entry, struct sk_buff **head_msdu,
- struct sk_buff **tail_msdu, u32 *npackets,
- u32 *ppdu_id)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
@@ -5705,8 +5707,6 @@ static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- skb_queue_head_init(&pmon->rx_status_q);
-
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
memset(&pmon->rx_mon_stats, 0,
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index dc8bbe073017..601542410c75 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -700,7 +700,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
#define HAL_REO_CMD_UPD0_VLD BIT(9)
#define HAL_REO_CMD_UPD0_ALDC BIT(10)
@@ -725,7 +725,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
#define HAL_REO_CMD_UPD0_PN BIT(30)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
#define HAL_REO_CMD_UPD1_VLD BIT(16)
#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
@@ -741,7 +741,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
#define HAL_REO_CMD_UPD2_SVLD BIT(10)
#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 8f7dd43dc1bd..753bd93f0212 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -372,7 +372,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
wbm_desc->buf_addr_info.info1);
- if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
+ ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 300322535766..52d9f4c13b13 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -227,6 +227,7 @@ struct ath11k_hw_params {
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
bool support_dual_stations;
+ bool pdev_suspend;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e6acbff06749..1556392f7ad4 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1697,8 +1697,6 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
return;
}
- arvif->tx_seq_no = 0x1000;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
@@ -2230,7 +2228,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
- * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * VHT mcs rate 10 and 11 is not supported in 11ac standard.
* so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
*/
arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
@@ -6952,7 +6950,7 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- /* TODO: recal traffic pause state based on the available vdevs */
+ /* TODO: recalc traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
}
@@ -9356,6 +9354,7 @@ static int ath11k_fw_stats_request(struct ath11k *ar,
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
+ unsigned int link_id,
int *dbm)
{
struct ath11k *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6974a551883f..6e45f464a429 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -398,6 +398,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
case ATH11K_HW_QCA2066_HW21:
+ case ATH11K_HW_QCA6698AQ_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index be9d2c69cc41..b93f04973ad7 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -846,6 +846,9 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
case 0x1019D0E1:
ab->hw_rev = ATH11K_HW_QCA2066_HW21;
break;
+ case 0x001e60e1:
+ ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
+ break;
default:
ab->hw_rev = ATH11K_HW_WCN6855_HW21;
}
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index debe7c5919ef..3fe77310c71f 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -126,6 +126,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_QCA2066_HW21,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 7a22483b35cd..5759fc521316 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1704,7 +1704,9 @@ static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
},
};
-static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
@@ -2570,7 +2572,9 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab)
m3_mem->size = 0;
}
-static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index 827085a926b2..b6f08755129f 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -148,8 +148,10 @@ static int ath11k_wow_cleanup(struct ath11k *ar)
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
-static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
- const struct cfg80211_pkt_pattern *old)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index c57322221e1d..49d1ac15cb7a 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -22,6 +22,11 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+/* protected with ath12k_hw_group_mutex */
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+
+static DEFINE_MUTEX(ath12k_hw_group_mutex);
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -79,11 +84,17 @@ int ath12k_core_suspend(struct ath12k_base *ab)
ar = ab->pdevs[i].ar;
if (!ar)
continue;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks
@@ -593,14 +604,17 @@ u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
static void ath12k_core_stop(struct ath12k_base *ab)
{
+ ath12k_core_stopped(ab);
+
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
ath12k_acpi_stop(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
- ath12k_dp_rx_pdev_reo_cleanup(ab);
+ ath12k_dp_free(ab);
/* De-Init of components as needed */
}
@@ -702,7 +716,7 @@ err_qmi_deinit:
static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
- ath12k_dp_free(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_reg_free(ab);
ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
@@ -712,30 +726,17 @@ static int ath12k_core_pdev_create(struct ath12k_base *ab)
{
int ret;
- ret = ath12k_mac_register(ab);
- if (ret) {
- ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
- return ret;
- }
-
ret = ath12k_dp_pdev_alloc(ab);
if (ret) {
ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
- goto err_mac_unregister;
+ return ret;
}
return 0;
-
-err_mac_unregister:
- ath12k_mac_unregister(ab);
-
- return ret;
}
static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
{
- ath12k_mac_unregister(ab);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
}
@@ -744,6 +745,8 @@ static int ath12k_core_start(struct ath12k_base *ab,
{
int ret;
+ lockdep_assert_held(&ab->core_lock);
+
ret = ath12k_wmi_attach(ab);
if (ret) {
ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -793,19 +796,12 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_hif_stop;
}
- ret = ath12k_mac_allocate(ab);
- if (ret) {
- ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
- ret);
- goto err_hif_stop;
- }
-
ath12k_dp_cc_config(ab);
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
- goto err_mac_destroy;
+ goto err_hif_stop;
}
ath12k_dp_hal_rx_desc_init(ab);
@@ -844,12 +840,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
/* ACPI is optional so continue in case of an error */
ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+ if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
+ /* Indicate the core start in the appropriate group */
+ ath12k_core_started(ab);
+
return 0;
err_reo_cleanup:
ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
- ath12k_mac_destroy(ab);
err_hif_stop:
ath12k_hif_stop(ab);
err_wmi_detach:
@@ -857,6 +855,95 @@ err_wmi_detach:
return ret;
}
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_core_pdev_destroy(ab);
+
+ mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+ ath12k_mac_unregister(ag);
+
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+ ath12k_core_device_cleanup(ab);
+ }
+
+ ath12k_mac_destroy(ag);
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int ret, i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
+ goto core_pdev_create;
+
+ ret = ath12k_mac_allocate(ag);
+ if (WARN_ON(ret))
+ return ret;
+
+ ret = ath12k_mac_register(ag);
+ if (WARN_ON(ret))
+ goto err_mac_destroy;
+
+ set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+core_pdev_create:
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core %d\n", ret);
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_core_rfkill_config(ab);
+ if (ret && ret != -EOPNOTSUPP) {
+ mutex_unlock(&ab->core_lock);
+ goto err;
+ }
+
+ mutex_unlock(&ab->core_lock);
+ }
+
+ return 0;
+
+err:
+ ath12k_core_hw_group_stop(ag);
+ return ret;
+
+err_mac_destroy:
+ ath12k_mac_destroy(ag);
+
+ return ret;
+}
+
static int ath12k_core_start_firmware(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
{
@@ -874,9 +961,37 @@ static int ath12k_core_start_firmware(struct ath12k_base *ab,
return ret;
}
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ return (ag->num_started == ag->num_devices);
+}
+
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_base *partner_ab;
+ bool found = false;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+ if (!partner_ab)
+ continue;
+
+ if (found)
+ ath12k_qmi_trigger_host_cap(partner_ab);
+
+ found = (partner_ab == ab);
+ }
+}
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ int ret, i;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
@@ -896,41 +1011,52 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
goto err_firmware_stop;
}
+ mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
+
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
}
- ret = ath12k_core_pdev_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create pdev core: %d\n", ret);
- goto err_core_stop;
- }
- ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
- ret = ath12k_core_rfkill_config(ab);
- if (ret && ret != -EOPNOTSUPP) {
- ath12k_err(ab, "failed to config rfkill: %d\n", ret);
- goto err_core_pdev_destroy;
+ if (ath12k_core_hw_group_start_ready(ag)) {
+ ret = ath12k_core_hw_group_start(ag);
+ if (ret) {
+ ath12k_warn(ab, "unable to start hw group\n");
+ goto err_core_stop;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+ } else {
+ ath12k_core_trigger_partner(ab);
}
- mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
return 0;
-err_core_pdev_destroy:
- ath12k_core_pdev_destroy(ab);
err_core_stop:
- ath12k_core_stop(ab);
- ath12k_mac_destroy(ab);
+ for (i = ag->num_devices - 1; i >= 0; i--) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+ goto exit;
+
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
+exit:
+ mutex_unlock(&ag->mutex);
return ret;
}
@@ -982,8 +1108,8 @@ static void ath12k_rfkill_work(struct work_struct *work)
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ath12k_get_num_hw(ab); i++) {
+ ah = ath12k_ab_to_ah(ab, i);
if (!ah)
continue;
@@ -1034,8 +1160,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ath12k_get_num_hw(ab); i++) {
+ ah = ath12k_ab_to_ah(ab, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@@ -1073,8 +1199,8 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
struct ath12k *ar;
int i, j;
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ath12k_get_num_hw(ab); i++) {
+ ah = ath12k_ab_to_ah(ab, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
@@ -1127,8 +1253,16 @@ static void ath12k_core_restart(struct work_struct *work)
}
if (ab->is_reset) {
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
+
+ for (i = 0; i < ath12k_get_num_hw(ab); i++) {
+ ah = ath12k_ab_to_ah(ab, i);
ieee80211_restart_hw(ah->hw);
}
}
@@ -1142,7 +1276,7 @@ static void ath12k_core_reset(struct work_struct *work)
int reset_count, fail_cont_count;
long time_left;
- if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
@@ -1241,38 +1375,246 @@ static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
&ab->panic_nb);
}
-int ath12k_core_init(struct ath12k_base *ab)
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
{
- int ret;
+ lockdep_assert_held(&ag->mutex);
- ret = ath12k_core_soc_create(ab);
- if (ret) {
- ath12k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_alloc(u8 id, u8 max_devices)
+{
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+ if (!ag)
+ return NULL;
+
+ ag->id = id;
+ ag->num_devices = max_devices;
+ list_add(&ag->list, &ath12k_hw_group_list);
+ mutex_init(&ag->mutex);
+
+ return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ list_del(&ag->list);
+ kfree(ag);
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
+{
+ u32 group_id = ATH12K_INVALID_GROUP_ID;
+ struct ath12k_hw_group *ag;
+
+ lockdep_assert_held(&ath12k_hw_group_mutex);
+
+ /* The grouping of multiple devices will be done based on device tree file.
+ * TODO: device tree file parsing to know about the devices involved in group.
+ *
+ * The platforms that do not have any valid group information would have each
+ * device to be part of its own invalid group.
+ *
+ * Currently, we are not parsing any device tree information and hence, grouping
+ * of multiple devices is not involved. Thus, single device is added to device
+ * group.
+ */
+ ag = ath12k_core_hw_group_alloc(group_id, 1);
+ if (!ag) {
+ ath12k_warn(ab, "unable to create new hw group\n");
+ return NULL;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
+
+ ab->device_id = ag->num_probed++;
+ ag->ab[ab->device_id] = ab;
+ ab->ag = ag;
+ ag->mlo_capable = false;
+
+ return ag;
+}
+
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+ u8 device_id = ab->device_id;
+ int num_probed;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ if (WARN_ON(device_id >= ag->num_devices)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ if (WARN_ON(ag->ab[device_id] != ab)) {
+ mutex_unlock(&ag->mutex);
+ return;
+ }
+
+ ag->ab[device_id] = NULL;
+ ab->ag = NULL;
+ ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+ if (ag->num_probed)
+ ag->num_probed--;
+
+ num_probed = ag->num_probed;
+
+ mutex_unlock(&ag->mutex);
+
+ if (!num_probed)
+ ath12k_core_hw_group_free(ag);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (WARN_ON(!ag))
+ return;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ ath12k_core_soc_destroy(ab);
+ }
+}
+
+static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ if (!ag)
+ return;
+
+ mutex_lock(&ag->mutex);
+
+ ath12k_core_hw_group_stop(ag);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_core_stop(ab);
+ mutex_unlock(&ab->core_lock);
+ }
+
+ mutex_unlock(&ag->mutex);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i, ret;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ mutex_lock(&ab->core_lock);
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ mutex_unlock(&ab->core_lock);
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ mutex_unlock(&ab->core_lock);
}
+ return 0;
+}
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
+{
+ lockdep_assert_held(&ag->mutex);
+
+ /* If more than one devices are grouped, then inter MLO
+ * functionality can work still independent of whether internally
+ * each device supports single_chip_mlo or not.
+ * Only when there is one device, then it depends whether the
+ * device can support intra chip MLO or not
+ */
+ if (ag->num_devices > 1)
+ ag->mlo_capable = true;
+ else
+ ag->mlo_capable = ag->ab[0]->single_chip_mlo_supp;
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ struct ath12k_hw_group *ag;
+ int ret;
+
ret = ath12k_core_panic_notifier_register(ab);
if (ret)
ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+ mutex_lock(&ath12k_hw_group_mutex);
+
+ ag = ath12k_core_hw_group_assign(ab);
+ if (!ag) {
+ mutex_unlock(&ath12k_hw_group_mutex);
+ ath12k_warn(ab, "unable to get hw group\n");
+ return -ENODEV;
+ }
+
+ mutex_unlock(&ath12k_hw_group_mutex);
+
+ mutex_lock(&ag->mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
+ ag->num_devices, ag->num_probed);
+
+ if (ath12k_core_hw_group_create_ready(ag)) {
+ ret = ath12k_core_hw_group_create(ag);
+ if (ret) {
+ mutex_unlock(&ag->mutex);
+ ath12k_warn(ab, "unable to create hw group\n");
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ag->mutex);
+
return 0;
+
+err:
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
+ return ret;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
ath12k_core_panic_notifier_unregister(ab);
-
- mutex_lock(&ab->core_lock);
-
- ath12k_core_pdev_destroy(ab);
- ath12k_core_stop(ab);
-
- mutex_unlock(&ab->core_lock);
-
- ath12k_hif_power_down(ab, false);
- ath12k_mac_destroy(ab);
- ath12k_core_soc_destroy(ab);
- ath12k_fw_unmap(ab);
+ ath12k_core_hw_group_cleanup(ab->ag);
+ ath12k_core_hw_group_destroy(ab->ag);
+ ath12k_core_hw_group_unassign(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1322,7 +1664,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 3bf31ee5b9fa..458e3d0071a8 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -63,6 +63,14 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_MAX_SOCS 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS)
+#define ATH12K_INVALID_GROUP_ID 0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
+#define ATH12K_MAX_MLO_PEERS 256
+#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -115,6 +123,7 @@ struct ath12k_skb_cb {
dma_addr_t paddr_ext_desc;
u32 cipher;
u8 flags;
+ u8 link_id;
};
struct ath12k_skb_rxcb {
@@ -208,6 +217,10 @@ enum ath12k_scan_state {
ATH12K_SCAN_ABORTING,
};
+enum ath12k_hw_group_flags {
+ ATH12K_GROUP_FLAG_REGISTERED,
+};
+
enum ath12k_dev_flags {
ATH12K_CAC_RUNNING,
ATH12K_FLAG_CRASH_FLUSH,
@@ -220,6 +233,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
+ ATH12K_FLAG_QMI_FW_READY_COMPLETE,
};
struct ath12k_tx_conf {
@@ -314,10 +328,11 @@ struct ath12k_vif {
bool ps;
struct ath12k_link_vif deflink;
- struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link vif created in FW */
u16 links_map;
+ u8 last_scan_link;
/* Must be last - ends in a flexible-array member.
*
@@ -469,6 +484,9 @@ struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
+ /* link address similar to ieee80211_link_sta */
+ u8 addr[ETH_ALEN];
+
/* the following are protected by ar->data_lock */
u32 changed; /* IEEE80211_RC_* */
u32 bw;
@@ -485,14 +503,26 @@ struct ath12k_link_sta {
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+
+ /* For now the assoc link will be considered primary */
+ bool is_assoc_link;
+
+ /* for firmware use only */
+ u8 link_idx;
};
struct ath12k_sta {
+ struct ath12k_vif *ahvif;
enum hal_pn_type pn_type;
struct ath12k_link_sta deflink;
struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link sta created in FW */
u16 links_map;
+ u8 assoc_link_id;
+ u16 ml_peer_id;
+ u8 num_peer;
+
+ enum ieee80211_sta_state state;
};
#define ATH12K_MIN_5G_FREQ 4150
@@ -657,7 +687,7 @@ struct ath12k {
struct work_struct regd_update_work;
- struct work_struct wmi_mgmt_tx_work;
+ struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath12k_wow wow;
@@ -688,6 +718,8 @@ struct ath12k {
struct ath12k_hw {
struct ieee80211_hw *hw;
+ struct device *dev;
+
/* Protect the write operation of the hardware state ath12k_hw::state
* between hardware start<=>reconfigure<=>stop transitions.
*/
@@ -698,6 +730,11 @@ struct ath12k_hw {
u8 num_radio;
+ DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+ /* protected by wiphy_lock() */
+ struct list_head ml_peers;
+
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -784,19 +821,30 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
-/**
- * enum ath12k_link_capable_flags - link capable flags
- *
- * Single/Multi link capability information
- *
- * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present within a device.
- * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- * the links (radios) present across the devices.
+/* Holds info on the group of devices that are registered as a single
+ * wiphy, protected with struct ath12k_hw_group::mutex.
*/
-enum ath12k_link_capable_flags {
- ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
- ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
+struct ath12k_hw_group {
+ struct list_head list;
+ u8 id;
+ u8 num_devices;
+ u8 num_probed;
+ u8 num_started;
+ unsigned long flags;
+ struct ath12k_base *ab[ATH12K_MAX_SOCS];
+
+ /* protects access to this struct */
+ struct mutex mutex;
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+ u8 num_hw;
+ bool mlo_capable;
};
/* Master structure to hold the hw data which may be used in core module */
@@ -862,15 +910,6 @@ struct ath12k_base {
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
- /* Holds information of wiphy (hw) registration.
- *
- * In Multi/Single Link Operation case, all pdevs are registered as
- * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
- * registered as separate wiphys.
- */
- struct ath12k_hw *ah[MAX_RADIOS];
- u8 num_hw;
-
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -964,12 +1003,8 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* mlo_capable_flags denotes the single/multi link operation
- * capabilities of the Device.
- *
- * See enum ath12k_link_capable_flags
- */
- u8 mlo_capable_flags;
+ /* Denotes the whether MLO is possible within the chip */
+ bool single_chip_mlo_supp;
struct completion restart_completed;
@@ -992,6 +1027,8 @@ struct ath12k_base {
struct notifier_block panic_nb;
+ struct ath12k_hw_group *ag;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -1022,6 +1059,7 @@ int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -1029,6 +1067,8 @@ u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
switch (state) {
@@ -1129,4 +1169,40 @@ static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
#define for_each_ar(ah, ar, index) \
for ((index) = 0; ((index) < (ah)->num_radio && \
((ar) = &(ah)->radio[(index)])); (index)++)
+
+static inline struct ath12k_hw *ath12k_ab_to_ah(struct ath12k_base *ab, int idx)
+{
+ return ab->ag->ah[idx];
+}
+
+static inline void ath12k_ab_set_ah(struct ath12k_base *ab, int idx,
+ struct ath12k_hw *ah)
+{
+ ab->ag->ah[idx] = ah;
+}
+
+static inline int ath12k_get_num_hw(struct ath12k_base *ab)
+{
+ return ab->ag->num_hw;
+}
+
+static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
+{
+ return ab->ag;
+}
+
+static inline void ath12k_core_started(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started++;
+}
+
+static inline void ath12k_core_stopped(struct ath12k_base *ab)
+{
+ lockdep_assert_held(&ab->ag->mutex);
+
+ ab->ag->num_started--;
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index fe5a732ba9ec..ff6eaeafa092 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -36,7 +36,7 @@ void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
va_end(args);
}
-void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+void __ath12k_warn(struct device *dev, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
@@ -45,7 +45,7 @@ void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
va_start(args, fmt);
vaf.va = &args;
- dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ dev_warn_ratelimited(dev, "%pV", &vaf);
/* TODO: Trace the log */
va_end(args);
}
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index f7005917362c..90e801136bc6 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -31,7 +31,10 @@ enum ath12k_debug_mask {
__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
-__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
+
+#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
+#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index c9980c0193d1..d8f137bfba7b 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -48,6 +48,28 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
footer);
}
+static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
+ return "26";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
+ return "52";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
+ return "106";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
+ return "242";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
+ return "484";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
+ return "996";
+ case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
+ return "996x2";
+ default:
+ return "unknown";
+ }
+}
+
static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
@@ -88,6 +110,17 @@ static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
}
}
+static const char*
+ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
+{
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
+ else
+ return "unknown";
+}
+
static void
htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@@ -1562,7 +1595,8 @@ ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ac_mu_mimo_brpoll,
- ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n");
+ ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1,
+ "\n\n");
stats_req->buf_len = len;
}
@@ -1590,7 +1624,7 @@ ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len,
le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp));
len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1,
htt_stats_buf->ax_mu_mimo_brpoll,
- ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n");
len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
le32_to_cpu(htt_stats_buf->ax_basic_trigger));
len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n",
@@ -2276,9 +2310,9 @@ ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len,
len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
htt_stats_buf->ul_mumimo_grp_best_grp_size,
ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
- len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1,
- htt_stats_buf->ul_mumimo_grp_best_usrs,
- ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
+ htt_stats_buf->ul_mumimo_grp_best_usrs,
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
len += print_array_to_buf(buf, len,
"ul_mumimo_grp_tputs_observed (per bin = 300 mbps)",
htt_stats_buf->ul_mumimo_grp_tputs,
@@ -2542,6 +2576,573 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len;
}
+static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
+ int idx, char *str_buf)
+{
+ u64 page_timestamp;
+ u16 index = 0;
+
+ page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
+
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Index - %u ; Page Number - %u ; ",
+ idx, le32_to_cpu(pg_info->page_num));
+ index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ "Num of pages - %u ; Timestamp - %lluus\n",
+ le32_to_cpu(pg_info->num_pgs), page_timestamp);
+}
+
+static void
+ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 dword_lock, dword_unlock;
+ int i;
+ u8 *buf = stats_req->buf;
+ u8 pg_locked;
+ u8 pg_unlock;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+
+ if (tag_len < sizeof(*stat_buf))
+ return;
+
+ dword_lock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
+ dword_unlock = le32_get_bits(stat_buf->info2,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
+
+ pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
+ pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+ le32_get_bits(stat_buf->info0,
+ ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
+ len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
+ len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+ le32_get_bits(stat_buf->info1,
+ ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
+
+ len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+ dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
+ len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+ dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
+
+ for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
+ i, str_buf);
+ len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf, i;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
+ for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+ i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+ le32_to_cpu(htt_stats_buf->false_radar_cnt),
+ le32_to_cpu(htt_stats_buf->fw_run_time));
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->radar_cs_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
+ a_sle32_to_cpu(htt_stats_buf->ani_level));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
+ len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
+ htt_stats_buf->rx_pkt_crc_pass_cnt,
+ ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
+ len += print_array_to_buf(buf, len, "per_blk_err_cnt",
+ htt_stats_buf->per_blk_err_cnt,
+ ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
+ len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
+ htt_stats_buf->rx_ota_err_cnt,
+ ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_mhz));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_phy_mode));
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->chan_flags));
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ le32_to_cpu(htt_stats_buf->chan_num));
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->prev_reset_cause));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->xbar_val));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->phyrf_mode));
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_homechan));
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phybb_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_adfs_freq));
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cck_fir_settings));
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ le32_to_cpu(htt_stats_buf->cca_thresh));
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ le32_to_cpu(htt_stats_buf->dyn_cca_status));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
+ le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->force_calibration_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
+ len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale));
+ len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_power_scale_db));
+ len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
+ le32_to_cpu(htt_stats_buf->min_negative_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
+ le32_to_cpu(htt_stats_buf->reg_ctl_domain));
+ len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
+ le32_to_cpu(htt_stats_buf->twice_max_rd_power));
+ len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
+ le32_to_cpu(htt_stats_buf->home_max_tx_power));
+ len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
+ le32_to_cpu(htt_stats_buf->psd_power));
+ len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
+ le32_to_cpu(htt_stats_buf->eirp_power));
+ len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
+ le32_to_cpu(htt_stats_buf->power_type_6ghz));
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power",
+ htt_stats_buf->max_reg_allowed_power,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
+ htt_stats_buf->max_reg_allowed_power_6ghz,
+ ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_cfreq",
+ htt_stats_buf->sub_band_cfreq,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
+ len += print_array_to_buf(buf, len, "sub_band_txpower",
+ htt_stats_buf->sub_band_txpower,
+ ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+ u64 drop_count;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
+ htt_stats_buf->inv_peers_msdu_drop_count_lo);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
+ drop_count);
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 ru_size_cnt = 0;
+ u32 rc_mode, ru_type;
+ u8 *buf = stats_req->buf, i;
+ const char *mode_prefix;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ rc_mode = le32_to_cpu(stats_buf->rc_mode);
+ ru_type = le32_to_cpu(stats_buf->ru_type);
+
+ switch (rc_mode) {
+ case ATH12K_HTT_STATS_RC_MODE_DLSU:
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
+ mode_prefix = "su";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+ mode_prefix = "mu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
+ mode_prefix = "ofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
+ mode_prefix = "ulmu";
+ break;
+ case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
+ len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
+ mode_prefix = "ulofdma";
+ if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+ else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+ ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+ break;
+ default:
+ return;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+ le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+ le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+ le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+ len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+ for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+ le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
+ rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
+ ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
+ len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+ mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+ len += scnprintf(buf + len, buf_len - len,
+ "non_data_ppdus_%s = ", mode_prefix);
+ else
+ len += scnprintf(buf + len, buf_len - len,
+ "ppdus_ack_failed_%s = ", mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_tried));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
+ mode_prefix);
+ for (i = 0; i < ru_size_cnt; i++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_tx_ru_size_to_str(ru_type, i),
+ le32_to_cpu(stats_buf->ru[i].mpdus_failed));
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+ }
+
+ if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
+ len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n",
+ le32_to_cpu(stats_buf->last_probed_bw));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+ le32_to_cpu(stats_buf->last_probed_nss));
+ len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+ le32_to_cpu(stats_buf->last_probed_mcs));
+ len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
+ stats_buf->probe_cnt,
+ ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
static void
ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
@@ -2561,7 +3162,6 @@ ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
htt_stats_buf->reset_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
-
time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
htt_stats_buf->disengage_time_lo_ms);
len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
@@ -2680,7 +3280,7 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
len += scnprintf(buf + len, buf_len - len, "\n");
len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
htt_stats_buf->be_ofdma_tx_nss,
- ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+ ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
"\n");
len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
htt_stats_buf->be_ofdma_tx_bw,
@@ -2696,6 +3296,45 @@ ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_le
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
+ len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_trigger_within_bss = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -2869,6 +3508,30 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_OBSS_PD_TAG:
ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_DLPAGER_STATS_TAG:
+ ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_STATS_TAG:
+ ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+ ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_TPC_STATS_TAG:
+ ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
+ ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PER_RATE_STATS_TAG:
+ ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_DMAC_RESET_STATS_TAG:
ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
break;
@@ -2878,6 +3541,10 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
+ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index ac86cab234ec..cf3c88f8d1b2 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -135,9 +135,14 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
+ ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+ ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
+ ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
+ ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -194,6 +199,13 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
+ HTT_STATS_DLPAGER_STATS_TAG = 120,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
+ HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
+ HTT_STATS_PHY_RESET_STATS_TAG = 124,
+ HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125,
+ HTT_STATS_PER_RATE_STATS_TAG = 128,
HTT_STATS_MU_PPDU_DIST_TAG = 129,
HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130,
HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135,
@@ -201,7 +213,9 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
HTT_STATS_DMAC_RESET_STATS_TAG = 155,
+ HTT_STATS_PHY_TPC_STATS_TAG = 157,
HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
+ HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176,
HTT_STATS_MAX_TAG,
};
@@ -1054,6 +1068,132 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
+enum ath12k_htt_stats_page_lock_state {
+ ATH12K_HTT_STATS_PAGE_LOCKED = 0,
+ ATH12K_HTT_STATS_PAGE_UNLOCKED = 1,
+ ATH12K_NUM_PG_LOCK_STATE
+};
+
+#define ATH12K_PAGER_MAX 10
+
+#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0)
+#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16)
+
+struct ath12k_htt_pgs_info {
+ __le32 page_num;
+ __le32 num_pgs;
+ __le32 ts_lsb;
+ __le32 ts_msb;
+} __packed;
+
+struct ath12k_htt_dl_pager_stats_tlv {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX];
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_CHAINS 8
+#define ATH12K_HTT_MAX_RX_PKT_CNT 8
+#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20
+#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14
+#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16
+
+struct ath12k_htt_phy_stats_tlv {
+ a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 false_radar_cnt;
+ __le32 radar_cs_cnt;
+ a_sle32 ani_level;
+ __le32 fw_run_time;
+ a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+} __packed;
+
+struct ath12k_htt_phy_counters_tlv {
+ __le32 rx_ofdma_timing_err_cnt;
+ __le32 rx_cck_fail_cnt;
+ __le32 mactx_abort_cnt;
+ __le32 macrx_abort_cnt;
+ __le32 phytx_abort_cnt;
+ __le32 phyrx_abort_cnt;
+ __le32 phyrx_defer_abort_cnt;
+ __le32 rx_gain_adj_lstf_event_cnt;
+ __le32 rx_gain_adj_non_legacy_cnt;
+ __le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT];
+ __le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ __le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT];
+ __le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT];
+} __packed;
+
+struct ath12k_htt_phy_reset_stats_tlv {
+ __le32 pdev_id;
+ __le32 chan_mhz;
+ __le32 chan_band_center_freq1;
+ __le32 chan_band_center_freq2;
+ __le32 chan_phy_mode;
+ __le32 chan_flags;
+ __le32 chan_num;
+ __le32 reset_cause;
+ __le32 prev_reset_cause;
+ __le32 phy_warm_reset_src;
+ __le32 rx_gain_tbl_mode;
+ __le32 xbar_val;
+ __le32 force_calibration;
+ __le32 phyrf_mode;
+ __le32 phy_homechan;
+ __le32 phy_tx_ch_mask;
+ __le32 phy_rx_ch_mask;
+ __le32 phybb_ini_mask;
+ __le32 phyrf_ini_mask;
+ __le32 phy_dfs_en_mask;
+ __le32 phy_sscan_en_mask;
+ __le32 phy_synth_sel_mask;
+ __le32 phy_adfs_freq;
+ __le32 cck_fir_settings;
+ __le32 phy_dyn_pri_chan;
+ __le32 cca_thresh;
+ __le32 dyn_cca_status;
+ __le32 rxdesense_thresh_hw;
+ __le32 rxdesense_thresh_sw;
+} __packed;
+
+struct ath12k_htt_phy_reset_counters_tlv {
+ __le32 pdev_id;
+ __le32 cf_active_low_fail_cnt;
+ __le32 cf_active_low_pass_cnt;
+ __le32 phy_off_through_vreg_cnt;
+ __le32 force_calibration_cnt;
+ __le32 rf_mode_switch_phy_off_cnt;
+ __le32 temperature_recal_cnt;
+} __packed;
+
+struct ath12k_htt_phy_tpc_stats_tlv {
+ __le32 pdev_id;
+ __le32 tx_power_scale;
+ __le32 tx_power_scale_db;
+ __le32 min_negative_tx_power;
+ __le32 reg_ctl_domain;
+ __le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS];
+ __le32 twice_max_rd_power;
+ __le32 max_tx_power;
+ __le32 home_max_tx_power;
+ __le32 psd_power;
+ __le32 eirp_power;
+ __le32 power_type_6ghz;
+ __le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+ __le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+} __packed;
+
+struct ath12k_htt_t2h_soc_txrx_stats_common_tlv {
+ __le32 inv_peers_msdu_drop_count_hi;
+ __le32 inv_peers_msdu_drop_count_lo;
+} __packed;
+
struct ath12k_htt_dmac_reset_stats_tlv {
__le32 reset_count;
__le32 reset_time_lo_ms;
@@ -1085,6 +1225,10 @@ struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv {
__le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
} __packed;
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4
+#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14
+
enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
@@ -1105,7 +1249,65 @@ enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
};
-#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+enum ATH12K_HTT_RC_MODE {
+ ATH12K_HTT_RC_MODE_SU_OL,
+ ATH12K_HTT_RC_MODE_SU_BF,
+ ATH12K_HTT_RC_MODE_MU1_INTF,
+ ATH12K_HTT_RC_MODE_MU2_INTF,
+ ATH12K_HTT_RC_MODE_MU3_INTF,
+ ATH12K_HTT_RC_MODE_MU4_INTF,
+ ATH12K_HTT_RC_MODE_MU5_INTF,
+ ATH12K_HTT_RC_MODE_MU6_INTF,
+ ATH12K_HTT_RC_MODE_MU7_INTF,
+ ATH12K_HTT_RC_MODE_2D_COUNT
+};
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
+};
+
+enum ath12k_htt_stats_rc_mode {
+ ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
+ ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
+ ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2,
+ ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3,
+ ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4,
+};
+
+enum ath12k_htt_stats_ru_type {
+ ATH12K_HTT_STATS_RU_TYPE_INVALID,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY,
+ ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU,
+};
+
+struct ath12k_htt_tx_rate_stats {
+ __le32 ppdus_tried;
+ __le32 ppdus_ack_failed;
+ __le32 mpdus_tried;
+ __le32 mpdus_failed;
+} __packed;
+
+struct ath12k_htt_tx_per_rate_stats_tlv {
+ __le32 rc_mode;
+ __le32 last_probed_mcs;
+ __le32 last_probed_nss;
+ __le32 last_probed_bw;
+ struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
+ struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS];
+ struct ath12k_htt_tx_rate_stats per_bw320;
+ __le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT];
+ __le32 ru_type;
+ struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
+} __packed;
+
#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16
#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5
#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4
@@ -1115,11 +1317,23 @@ struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv {
__le32 mac_id__word;
__le32 be_ofdma_tx_ldpc;
__le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
- __le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
__le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
__le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
__le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
__le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
} __packed;
+struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
+ __le32 mac_id__word;
+ __le32 basic_trigger_across_bss;
+ __le32 basic_trigger_within_bss;
+ __le32 bsr_trigger_across_bss;
+ __le32 bsr_trigger_within_bss;
+ __le32 mu_rts_across_bss;
+ __le32 mu_rts_within_bss;
+ __le32 ul_mumimo_trigger_across_bss;
+ __le32 ul_mumimo_trigger_within_bss;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c99e9ceb1a6e..68abe9d4ab45 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -41,6 +41,11 @@ void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
return;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
crypto_free_shash(peer->tfm_mmic);
peer->dp_setup_done = false;
@@ -977,27 +982,23 @@ void ath12k_dp_pdev_free(struct ath12k_base *ab)
{
int i;
+ if (!ab->mon_reap_timer.function)
+ return;
+
del_timer_sync(&ab->mon_reap_timer);
for (i = 0; i < ab->num_radios; i++)
ath12k_dp_rx_pdev_free(ab, i);
}
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
{
- struct ath12k *ar;
- struct ath12k_pdev_dp *dp;
- int i;
+ struct ath12k_pdev_dp *dp = &ar->dp;
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- dp = &ar->dp;
- dp->mac_id = i;
- atomic_set(&dp->num_tx_pending, 0);
- init_waitqueue_head(&dp->tx_empty_waitq);
-
- /* TODO: Add any RXDMA setup required per pdev */
- }
+ dp->mac_id = ar->pdev_idx;
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ /* TODO: Add any RXDMA setup required per pdev */
}
bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
@@ -1260,15 +1261,23 @@ static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
if (!ab->hw_params->reoq_lut_support)
return;
- if (!dp->reoq_lut.vaddr)
- return;
-
- dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
- dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
- dp->reoq_lut.vaddr = NULL;
+ if (dp->reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ }
- ath12k_hif_write32(ab,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+ if (dp->ml_reoq_lut.vaddr) {
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG +
+ HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+ dp->ml_reoq_lut.vaddr = NULL;
+ }
}
void ath12k_dp_free(struct ath12k_base *ab)
@@ -1276,6 +1285,9 @@ void ath12k_dp_free(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
int i;
+ if (!dp->ab)
+ return;
+
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
@@ -1293,6 +1305,7 @@ void ath12k_dp_free(struct ath12k_base *ab)
ath12k_dp_rx_free(ab);
/* Deinit any SOC level resource */
+ dp->ab = NULL;
}
void ath12k_dp_cc_config(struct ath12k_base *ab)
@@ -1594,8 +1607,23 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return -ENOMEM;
}
+ dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->ml_reoq_lut.paddr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!dp->ml_reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+ return -ENOMEM;
+ }
+
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
dp->reoq_lut.paddr);
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+ dp->ml_reoq_lut.paddr >> 8);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 2e05fc19410e..021cd9e8ee1d 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -368,6 +368,7 @@ struct ath12k_dp {
struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
struct dp_rxdma_mon_ring tx_mon_buf_ring;
struct ath12k_reo_q_addr_lut reoq_lut;
+ struct ath12k_reo_q_addr_lut ml_reoq_lut;
};
/* HTT definitions */
@@ -1805,7 +1806,7 @@ void ath12k_dp_free(struct ath12k_base *ab);
int ath12k_dp_alloc(struct ath12k_base *ab);
void ath12k_dp_cc_config(struct ath12k_base *ab);
int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
void ath12k_dp_pdev_free(struct ath12k_base *ab);
int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 9ae579e50557..b24d1de4aabb 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -740,15 +740,22 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(lower_32_bits(paddr),
BUFFER_ADDR_INFO0_ADDR);
@@ -761,15 +768,22 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
{
struct ath12k_reo_queue_ref *qref;
struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
if (!ab->hw_params->reoq_lut_support)
return;
- /* TODO: based on ML peer or not, select the LUT. below assumes non
- * ML peer
- */
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
@@ -802,7 +816,10 @@ void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
rx_tid->vaddr = NULL;
}
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+ else
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
rx_tid->active = false;
}
@@ -940,7 +957,13 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return -ENOENT;
}
- if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ if (ab->hw_params->reoq_lut_support &&
+ (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "reo qref table is not setup\n");
return -EINVAL;
@@ -1021,7 +1044,11 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
*/
- ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ if (peer->mlo)
+ ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+ else
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+
spin_unlock_bh(&ab->base_lock);
} else {
spin_unlock_bh(&ab->base_lock);
@@ -1038,15 +1065,25 @@ err_mem_free:
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
int ret;
- ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
params->tid, params->buf_size,
params->ssn, arsta->ahsta->pn_type);
if (ret)
@@ -1056,19 +1093,29 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
}
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params)
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
- int vdev_id = arsta->arvif->vdev_id;
+ struct ath12k_link_sta *arsta;
+ int vdev_id;
bool active;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+ if (!arsta)
+ return -ENOLINK;
+
+ vdev_id = arsta->arvif->vdev_id;
+
spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -2781,6 +2828,11 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
return -ENOENT;
}
+ if (!peer->primary_link) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
rx_tid = &peer->rx_tid[i];
rx_tid->ab = ab;
@@ -3912,7 +3964,7 @@ void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
ath12k_hal_srng_access_begin(ab, srng);
while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
- tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+ tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
switch (tag) {
case HAL_REO_GET_QUEUE_STATS_STATUS:
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index bfd4f814553e..1ce82088c954 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -85,9 +85,11 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
}
int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
- struct ieee80211_ampdu_params *params);
+ struct ieee80211_ampdu_params *params,
+ u8 link_id);
int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 739f73370015..a460d432288f 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -581,7 +581,7 @@ struct hal_tlv_hdr {
#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
struct hal_tlv_64_hdr {
- u64 tl;
+ __le64 tl;
u8 value[];
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index f7c1aaa3b5d4..ac17d6223fa7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -26,8 +26,8 @@ static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_get_queue_stats *desc;
- tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_get_queue_stats *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
@@ -59,8 +59,8 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
hal->current_blk_index = avail_slot;
}
- tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_flush_cache *)tlv->value;
memset_startat(desc, 0, cache_addr_lo);
@@ -97,8 +97,8 @@ static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
{
struct hal_reo_update_rx_queue *desc;
- tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
- u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+ tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+ le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
desc = (struct hal_reo_update_rx_queue *)tlv->value;
memset_startat(desc, 0, queue_addr_lo);
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index d493ec812055..c4eab4c1c10e 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -501,6 +501,41 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
+static struct ieee80211_bss_conf *
+ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ vif->link_conf[arvif->link_id]);
+
+ return link_conf;
+}
+
+static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta)
+{
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+
+ lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy);
+
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy,
+ sta->link[arsta->link_id]);
+
+ return link_sta;
+}
+
static bool ath12k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -648,6 +683,18 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
}
+static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (ahvif->vif->valid_links & BIT(arvif->link_id))
+ return true;
+
+ return false;
+}
+
static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
struct ieee80211_channel *channel)
{
@@ -678,11 +725,14 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
}
static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u8 link_id)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(hw->wiphy);
/* If there is one pdev within ah, then we return
* ar directly.
@@ -690,7 +740,11 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
if (ah->num_radio == 1)
return ah->radio;
- if (arvif->is_created)
+ if (!(ahvif->links_map & BIT(link_id)))
+ return NULL;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (arvif && arvif->is_created)
return arvif->ar;
return NULL;
@@ -1239,7 +1293,7 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ath12k *ar = arvif->ar;
@@ -1486,7 +1540,7 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_bss_conf *bss_conf = &ahvif->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf;
struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
struct ieee80211_ema_beacons *beacons;
struct ath12k_link_vif *tx_arvif;
@@ -1495,10 +1549,19 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
int ret = 0;
u8 i;
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
tx_arvif = &tx_ahvif->deflink;
beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
- tx_ahvif->vif, 0);
+ tx_ahvif->vif,
+ tx_arvif->link_id);
if (!beacons || !beacons->cnt) {
ath12k_warn(arvif->ar->ab,
"failed to get ema beacon templates from mac80211\n");
@@ -1540,6 +1603,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
@@ -1552,18 +1616,25 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
if (ahvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
if (vif->mbssid_tx_vif) {
tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
tx_arvif = &tx_ahvif->deflink;
if (tx_arvif != arvif && arvif->is_up)
return 0;
- if (vif->bss_conf.ema_ap)
+ if (link_conf->ema_ap)
return ath12k_mac_setup_bcn_tmpl_ema(arvif);
}
bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
- &offs, 0);
+ &offs, tx_arvif->link_id);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
@@ -1573,7 +1644,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
} else {
ath12k_mac_set_arvif_ies(arvif, bcn,
- ahvif->vif->bss_conf.bssid_index,
+ link_conf->bssid_index,
&nontx_profile_found);
if (!nontx_profile_found)
ath12k_warn(ab,
@@ -1644,7 +1715,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
ahvif->aid = 0;
- ether_addr_copy(arvif->bssid, info->bssid);
+ ether_addr_copy(arvif->bssid, info->addr);
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
@@ -1749,6 +1820,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_bss_conf *bss_conf;
u32 aid;
lockdep_assert_wiphy(hw->wiphy);
@@ -1758,14 +1830,22 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
else
aid = sta->aid;
- ether_addr_copy(arg->peer_mac, sta->addr);
+ ether_addr_copy(arg->peer_mac, arsta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
- arg->peer_caps = vif->bss_conf.assoc_capability;
+
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ arg->peer_caps = bss_conf->assoc_capability;
}
static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
@@ -1775,7 +1855,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct ieee80211_bss_conf *info;
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@@ -1784,6 +1864,13 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
lockdep_assert_wiphy(hw->wiphy);
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
@@ -1839,6 +1926,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
@@ -1853,9 +1941,16 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
band = def.chan->band;
sband = hw->wiphy->bands[band];
- ratemask = sta->deflink.supp_rates[band];
+ ratemask = link_sta->supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -1902,7 +1997,8 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -1915,6 +2011,14 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ ht_cap = &link_sta->ht_cap;
if (!ht_cap->ht_supported)
return;
@@ -1938,7 +2042,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
arg->ldpc_flag = true;
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
arg->bw_40 = true;
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
@@ -1988,7 +2092,7 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -2064,7 +2168,8 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u16 *vht_mcs_mask;
@@ -2078,6 +2183,14 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ vht_cap = &link_sta->vht_cap;
if (!vht_cap->vht_supported)
return;
@@ -2110,10 +2223,10 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
/* Calculate peer NSS capability from VHT capabilities if STA
@@ -2127,7 +2240,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
vht_mcs_mask[i])
max_nss = i + 1;
}
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -2150,7 +2263,7 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
arg->tx_max_mcs_nss = 0xFF;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ arsta->addr, arg->peer_max_mpdu, arg->peer_flags);
/* TODO: rxnss_override */
}
@@ -2162,7 +2275,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
int i;
u8 ampdu_factor, max_nss;
u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
@@ -2171,6 +2286,21 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
bool support_160;
u16 v;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_cap = &link_sta->he_cap;
if (!he_cap->has_he)
return;
@@ -2208,13 +2338,13 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
else
max_nss = rx_mcs_80;
- arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
sizeof(he_cap->he_cap_elem.mac_cap_info));
memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
sizeof(he_cap->he_cap_elem.phy_cap_info));
- arg->peer_he_ops = vif->bss_conf.he_oper.params;
+ arg->peer_he_ops = link_conf->he_oper.params;
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
@@ -2235,10 +2365,10 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
if (ampdu_factor) {
- if (sta->deflink.vht_cap.vht_supported)
+ if (link_sta->vht_cap.vht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
- else if (sta->deflink.ht_cap.ht_supported)
+ else if (link_sta->ht_cap.ht_supported)
arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1;
}
@@ -2279,7 +2409,7 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
@@ -2319,7 +2449,8 @@ static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
u8 ampdu_factor, mpdu_density;
@@ -2329,22 +2460,31 @@ static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
band = def.chan->band;
- if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_cap = &link_sta->he_cap;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa)
return;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
arg->bw_40 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
arg->bw_320 = true;
- arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+ arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa);
mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -2388,10 +2528,23 @@ static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
- const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+ struct ath12k_link_vif *arvif = arsta->arvif;
+ const struct ieee80211_sta_ht_cap *ht_cap;
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k *ar = arvif->ar;
int smps;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ he_6ghz_capa = &link_sta->he_6ghz_capa;
+ ht_cap = &link_sta->ht_cap;
+
if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
return;
@@ -2446,7 +2599,7 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
- sta->addr, arg->qos_flag);
+ arsta->addr, arg->qos_flag);
}
static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
@@ -2486,26 +2639,26 @@ static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
arg.value = uapsd;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
arg.value = max_sp;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
/* TODO: revisit during testing */
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
- ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
if (ret)
goto err;
@@ -2517,17 +2670,17 @@ err:
return ret;
}
-static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta)
{
- return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH12K_MAC_FIRST_OFDM_RATE_IDX;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (sta->deflink.vht_cap.cap &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (link_sta->vht_cap.cap &
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
return MODE_11AC_VHT160;
@@ -2539,74 +2692,74 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
}
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AC_VHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AC_VHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AC_VHT20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11AX_HE80_80;
/* not sure if this is a valid case? */
return MODE_11AX_HE160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11AX_HE80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11AX_HE40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11AX_HE20;
return MODE_UNKNOWN;
}
static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
- if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] &
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
return MODE_11BE_EHT320;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11BE_EHT160;
- if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
return MODE_11BE_EHT80_80;
ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
- sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]);
+ link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
return MODE_11BE_EHT160;
}
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
return MODE_11BE_EHT80;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
return MODE_11BE_EHT40;
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
return MODE_11BE_EHT20;
return MODE_UNKNOWN;
@@ -2617,6 +2770,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
struct ath12k_link_sta *arsta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
+ struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
const u8 *ht_mcs_mask;
@@ -2635,33 +2789,40 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->deflink.eht_cap.has_eht) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->eht_cap.has_eht) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11BE_EHT40_2G;
else
phymode = MODE_11BE_EHT20_2G;
- } else if (sta->deflink.he_cap.has_he) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ } else if (link_sta->he_cap.has_he) {
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
- else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AX_HE40_2G;
else
phymode = MODE_11AX_HE20_2G;
- } else if (sta->deflink.vht_cap.vht_supported &&
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->deflink.ht_cap.ht_supported &&
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+ } else if (ath12k_mac_sta_has_ofdm_only(link_sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
@@ -2670,16 +2831,16 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check EHT first */
- if (sta->deflink.eht_cap.has_eht) {
- phymode = ath12k_mac_get_phymode_eht(ar, sta);
- } else if (sta->deflink.he_cap.has_he) {
- phymode = ath12k_mac_get_phymode_he(ar, sta);
- } else if (sta->deflink.vht_cap.vht_supported &&
+ if (link_sta->eht_cap.has_eht) {
+ phymode = ath12k_mac_get_phymode_eht(ar, link_sta);
+ } else if (link_sta->he_cap.has_he) {
+ phymode = ath12k_mac_get_phymode_he(ar, link_sta);
+ } else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
- phymode = ath12k_mac_get_phymode_vht(ar, sta);
- } else if (sta->deflink.ht_cap.ht_supported &&
+ phymode = ath12k_mac_get_phymode_vht(ar, link_sta);
+ } else if (link_sta->ht_cap.ht_supported &&
!ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
- if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -2692,7 +2853,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
- sta->addr, ath12k_mac_phymode_str(phymode));
+ arsta->addr, ath12k_mac_phymode_str(phymode));
arg->peer_phymode = phymode;
WARN_ON(phymode == MODE_UNKNOWN);
@@ -2767,15 +2928,25 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
- const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20;
const struct ieee80211_eht_mcs_nss_supp_bw *bw;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_link_sta *link_sta;
u32 *rx_mcs, *tx_mcs;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht)
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ eht_cap = &link_sta->eht_cap;
+ he_cap = &link_sta->he_cap;
+ if (!he_cap->has_he || !eht_cap->has_eht)
return;
arg->eht_flag = true;
@@ -2794,7 +2965,7 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
rx_mcs = arg->peer_eht_rx_mcs_set;
tx_mcs = arg->peer_eht_tx_mcs_set;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_320:
bw = &eht_cap->eht_mcs_nss_supp.bw._320;
ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
@@ -2846,6 +3017,67 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
arg->punct_bitmap = ~arvif->punct_bitmap;
}
+static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct peer_assoc_mlo_params *ml = &arg->ml;
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ath12k_link_sta *arsta_p;
+ struct ath12k_link_vif *arvif;
+ unsigned long links;
+ u8 link_id;
+ int i;
+
+ if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID)
+ return;
+
+ ml->enabled = true;
+ ml->assoc_link = arsta->is_assoc_link;
+
+ /* For now considering the primary umac based on assoc link */
+ ml->primary_umac = arsta->is_assoc_link;
+ ml->peer_id_valid = true;
+ ml->logical_link_idx_valid = true;
+
+ ether_addr_copy(ml->mld_addr, sta->addr);
+ ml->logical_link_idx = arsta->link_idx;
+ ml->ml_peer_id = ahsta->ml_peer_id;
+ ml->ieee_link_id = arsta->link_id;
+ ml->num_partner_links = 0;
+ links = ahsta->links_map;
+
+ rcu_read_lock();
+
+ i = 0;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (i >= ATH12K_WMI_MLO_MAX_LINKS)
+ break;
+
+ arsta_p = rcu_dereference(ahsta->link[link_id]);
+ arvif = rcu_dereference(ahsta->ahvif->link[link_id]);
+
+ if (arsta_p == arsta)
+ continue;
+
+ if (!arvif->is_started)
+ continue;
+
+ ml->partner_info[i].vdev_id = arvif->vdev_id;
+ ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id;
+ ml->partner_info[i].assoc_link = arsta_p->is_assoc_link;
+ ml->partner_info[i].primary_umac = arsta_p->is_assoc_link;
+ ml->partner_info[i].logical_link_idx_valid = true;
+ ml->partner_info[i].logical_link_idx = arsta_p->link_idx;
+ ml->num_partner_links++;
+
+ i++;
+ }
+
+ rcu_read_unlock();
+}
+
static void ath12k_peer_assoc_prepare(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta,
@@ -2870,6 +3102,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg);
ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg);
ath12k_peer_assoc_h_smps(arsta, arg);
+ ath12k_peer_assoc_h_mlo(arsta, arg);
/* TODO: amsdu_disable req? */
}
@@ -2900,7 +3133,9 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_up_params params = {};
- struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ath12k_wmi_peer_assoc_arg peer_arg = {};
+ struct ieee80211_link_sta *link_sta;
+ u8 link_id = bss_conf->link_id;
struct ath12k_link_sta *arsta;
struct ieee80211_sta *ap_sta;
struct ath12k_sta *ahsta;
@@ -2910,27 +3145,38 @@ static void ath12k_bss_assoc(struct ath12k *ar,
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
- arvif->vdev_id, arvif->bssid, ahvif->aid);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %i link id %u assoc bssid %pM aid %d\n",
+ arvif->vdev_id, link_id, arvif->bssid, ahvif->aid);
rcu_read_lock();
- ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ /* During ML connection, cfg.ap_addr has the MLD address. For
+ * non-ML connection, it has the BSSID.
+ */
+ ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!ap_sta) {
ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
- bss_conf->bssid, arvif->vdev_id);
+ vif->cfg.ap_addr, arvif->vdev_id);
rcu_read_unlock();
return;
}
ahsta = ath12k_sta_to_ahsta(ap_sta);
- arsta = &ahsta->deflink;
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
if (WARN_ON(!arsta)) {
rcu_read_unlock();
return;
}
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (WARN_ON(!link_sta)) {
+ rcu_read_unlock();
+ return;
+ }
+
ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false);
rcu_read_unlock();
@@ -2949,8 +3195,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap,
- &ap_sta->deflink.he_6ghz_capa);
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3058,6 +3303,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
+ struct ieee80211_bss_conf *bss_conf;
u8 basic_rate_idx;
int hw_rate_code;
u32 vdev_param;
@@ -3066,8 +3312,15 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_wiphy(hw->wiphy);
+ bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!bss_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
sband = hw->wiphy->bands[def->chan->band];
- basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ basic_rate_idx = ffs(bss_conf->basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -3151,6 +3404,7 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
+ struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
u8 link_id;
@@ -3171,10 +3425,15 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
ar = arvif->ar;
- if (vif->cfg.assoc)
- ath12k_bss_assoc(ar, arvif, &vif->bss_conf);
- else
+ if (vif->cfg.assoc) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info)
+ continue;
+
+ ath12k_bss_assoc(ar, arvif, info);
+ } else {
ath12k_bss_disassoc(ar, arvif);
+ }
}
}
}
@@ -3185,6 +3444,7 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
struct ieee80211_vif *vif = arvif->ahvif->vif;
struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
enum wmi_sta_ps_mode psmode;
int ret;
int timeout;
@@ -3202,8 +3462,15 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
timeout = conf->dynamic_ps_timeout;
if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
/* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
@@ -3314,8 +3581,8 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
ath12k_control_beaconing(arvif, info);
- if (arvif->is_up && vif->bss_conf.he_support &&
- vif->bss_conf.he_oper.params) {
+ if (arvif->is_up && info->he_support &&
+ info->he_oper.params) {
/* TODO: Extend to support 1024 BA Bitmap size */
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BA_MODE,
@@ -3326,7 +3593,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
arvif->vdev_id);
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
- param_value = vif->bss_conf.he_oper.params;
+ param_value = info->he_oper.params;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -3418,12 +3685,12 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
if (changed & BSS_CHANGED_MCAST_RATE &&
!ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) {
band = def.chan->band;
- mcast_rate = vif->bss_conf.mcast_rate[band];
+ mcast_rate = info->mcast_rate[band];
if (mcast_rate > 0)
rateidx = mcast_rate - 1;
else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ rateidx = ffs(info->basic_rates) - 1;
if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
@@ -3537,6 +3804,9 @@ static void ath12k_ahvif_put_link_key_cache(struct ath12k_vif_cache *cache)
static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id)
{
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return;
+
ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]);
kfree(ahvif->cache[link_id]);
ahvif->cache[link_id] = NULL;
@@ -3597,9 +3867,9 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
arvif = &ahvif->deflink;
} else {
/* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory
+ * use the preallocated deflink memory except for scan arvifs
*/
- if (!ahvif->links_map) {
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
arvif = &ahvif->deflink;
} else {
arvif = (struct ath12k_link_vif *)
@@ -3899,10 +4169,10 @@ ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
return link_id;
}
- /* input ar is not assigned to any of the links, use link id
- * 0 for scan vdev creation.
+ /* input ar is not assigned to any of the links of ML VIF, use scan
+ * link (15) for scan vdev creation.
*/
- return 0;
+ return ATH12K_DEFAULT_SCAN_LINK;
}
static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
@@ -3933,7 +4203,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
/* check if any of the links of ML VIF is already started on
* radio(ar) correpsondig to given scan frequency and use it,
- * if not use deflink(link 0) for scan purpose.
+ * if not use scan link (link 15) for scan purpose.
*/
link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
@@ -4043,6 +4313,13 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ /* As per cfg80211/mac80211 scan design, it allows only one
+ * scan at a time. Hence last_scan link id is used for
+ * tracking the link id on which the scan is been done on
+ * this vif.
+ */
+ ahvif->last_scan_link = arvif->link_id;
+
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time +
@@ -4062,14 +4339,14 @@ static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u16 link_id = ahvif->last_scan_link;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
-
- if (!arvif->is_created)
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->is_created)
return;
ar = arvif->ar;
@@ -4203,6 +4480,7 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
@@ -4219,12 +4497,19 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
if (sta)
- peer_addr = sta->addr;
+ peer_addr = arsta->addr;
else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = vif->bss_conf.bssid;
+ peer_addr = link_conf->bssid;
else
- peer_addr = vif->addr;
+ peer_addr = link_conf->addr;
key->hw_key_idx = key->keyidx;
@@ -4372,6 +4657,7 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (sta) {
ahsta = ath12k_sta_to_ahsta(sta);
+
/* For an ML STA Pairwise key is same for all associated link Stations,
* hence do set key for all link STAs which are active.
*/
@@ -4394,41 +4680,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (ret)
break;
}
- } else {
- arsta = &ahsta->deflink;
- arvif = arsta->arvif;
- if (WARN_ON(!arvif)) {
- ret = -EINVAL;
- goto out;
- }
- ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
- }
- } else {
- if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
- link_id = key->link_id;
- arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- } else {
- link_id = 0;
- arvif = &ahvif->deflink;
+ return 0;
}
- if (!arvif || !arvif->is_created) {
- cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
- if (!cache)
- return -ENOSPC;
+ arsta = &ahsta->deflink;
+ arvif = arsta->arvif;
+ if (WARN_ON(!arvif))
+ return -EINVAL;
+
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
- ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_id = key->link_id;
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ } else {
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ }
+ if (!arvif || !arvif->is_created) {
+ cache = ath12k_ahvif_get_link_cache(ahvif, link_id);
+ if (!cache)
+ return -ENOSPC;
+
+ ret = ath12k_mac_update_key_cache(cache, cmd, sta, key);
+ if (ret)
return ret;
- }
- ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ return 0;
}
-out:
+ ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
static int
@@ -4451,7 +4743,6 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
const struct cfg80211_bitrate_mask *mask,
enum nl80211_band band)
{
- struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
u8 vht_rate, nss;
u32 rate_code;
@@ -4470,47 +4761,52 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
if (!nss) {
ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
- sta->addr);
+ arsta->addr);
return -EINVAL;
}
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
- sta->addr);
+ arsta->addr);
rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
WMI_RATE_PREAMBLE_VHT);
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
rate_code);
if (ret)
ath12k_warn(ar->ab,
"failed to update STA %pM Fixed Rate %d: %d\n",
- sta->addr, rate_code, ret);
+ arsta->addr, rate_code, ret);
return ret;
}
-static int ath12k_station_assoc(struct ath12k *ar,
- struct ath12k_link_vif *arvif,
- struct ath12k_link_sta *arsta,
- bool reassoc)
+static int ath12k_mac_station_assoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ bool reassoc)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_link_sta *link_sta;
int ret;
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
u8 num_vht_rates;
+ u8 link_id = arvif->link_id;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
return -EPERM;
+ if (WARN_ON(!rcu_access_pointer(sta->link[link_id])))
+ return -EINVAL;
+
band = def.chan->band;
mask = &arvif->bitrate_mask;
@@ -4524,13 +4820,13 @@ static int ath12k_station_assoc(struct ath12k *ar,
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return -ETIMEDOUT;
}
@@ -4541,7 +4837,13 @@ static int ath12k_station_assoc(struct ath12k *ar,
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in station assoc\n");
+ return -EINVAL;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
if (ret)
@@ -4554,9 +4856,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
if (reassoc)
return 0;
- ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap,
- &sta->deflink.he_6ghz_capa);
+ ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr,
+ &link_sta->ht_cap, &link_sta->he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -4574,7 +4875,7 @@ static int ath12k_station_assoc(struct ath12k *ar,
ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta);
if (ret) {
ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
return ret;
}
}
@@ -4582,33 +4883,25 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
}
-static int ath12k_station_disassoc(struct ath12k *ar,
- struct ath12k_link_vif *arvif,
- struct ath12k_link_sta *arsta)
+static int ath12k_mac_station_disassoc(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
if (!sta->wme) {
arvif->num_legacy_stations--;
- ret = ath12k_recalc_rtscts_prot(arvif);
- if (ret)
- return ret;
+ return ath12k_recalc_rtscts_prot(arvif);
}
- ret = ath12k_clear_peer_keys(arvif, sta->addr);
- if (ret) {
- ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
return 0;
}
static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
{
+ struct ieee80211_link_sta *link_sta;
struct ath12k *ar;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4666,65 +4959,65 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* WMI_PEER_CHWIDTH
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
} else {
/* When we downgrade bandwidth this will conflict with phymode
* and cause to trigger firmware crash. In this case we send
* WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
*/
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
- sta->addr, bw, bw_prev);
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arsta->addr, bw, bw_prev);
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_CHWIDTH,
bw);
if (err) {
ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
- sta->addr, bw, err);
+ arsta->addr, bw, err);
return;
}
- err = ath12k_wmi_set_peer_param(ar, sta->addr,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id, WMI_PEER_PHYMODE,
peer_phymode);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
- sta->addr, peer_phymode, err);
+ arsta->addr, peer_phymode, err);
}
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
- sta->addr, nss);
+ arsta->addr, nss);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_NSS, nss);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
- sta->addr, nss, err);
+ arsta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
- sta->addr, smps);
+ arsta->addr, smps);
- err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE, smps);
if (err)
ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
- sta->addr, smps, err);
+ arsta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
@@ -4743,7 +5036,14 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
* TODO: Check RATEMASK_CMDID to support auto rates selection
* across HT/VHT and for multiple VHT MCS support.
*/
- if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ link_sta = ath12k_mac_get_link_sta(arsta);
+ if (!link_sta) {
+ ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
+ if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
} else {
@@ -4757,15 +5057,44 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (err)
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
- sta->addr, arvif->vdev_id, err);
+ arsta->addr, arvif->vdev_id, err);
if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
}
}
}
+static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+ return;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (WARN_ON(!arsta))
+ return;
+
+ ahsta->links_map &= ~BIT(link_id);
+ rcu_assign_pointer(ahsta->link[link_id], NULL);
+ synchronize_rcu();
+
+ if (arsta == &ahsta->deflink) {
+ arsta->link_id = ATH12K_INVALID_LINK_ID;
+ arsta->ahsta = NULL;
+ arsta->arvif = NULL;
+ return;
+ }
+
+ kfree(arsta);
+}
+
static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
{
@@ -4799,6 +5128,144 @@ static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif,
ar->num_stations--;
}
+static void ath12k_mac_station_post_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_peer *peer;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ath12k_mac_dec_num_stations(arvif, arsta);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer && peer->sta == sta) {
+ ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+}
+
+static int ath12k_mac_station_unauthorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ /* Driver must clear the keys during the state change from
+ * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after
+ * returning from here, mac80211 is going to delete the keys
+ * in __sta_info_destroy_part2(). This will ensure that the driver does
+ * not retain stale key references after mac80211 deletes the keys.
+ */
+ ret = ath12k_clear_peer_keys(arvif, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_authorize(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ath12k_peer *peer;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ arsta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_station_remove(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ int ret = 0;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (sta->mlo)
+ return ret;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ if (sta->valid_links)
+ ath12k_mac_free_unassign_link_sta(ahvif->ah,
+ arsta->ahsta, arsta->link_id);
+
+ return ret;
+}
+
static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta)
@@ -4806,7 +5273,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -4824,34 +5291,35 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
+ peer_param.peer_addr = arsta->addr;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ peer_param.ml_enabled = sta->mlo;
ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
if (ret) {
ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
goto free_peer;
}
ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
if (ieee80211_vif_is_mesh(vif)) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
goto free_peer;
}
}
- ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr);
if (ret) {
ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
+ arsta->addr, arvif->vdev_id, ret);
goto free_peer;
}
@@ -4868,7 +5336,9 @@ static int ath12k_mac_station_add(struct ath12k *ar,
return 0;
free_peer:
- ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
dec_num_station:
ath12k_mac_dec_num_stations(arvif, arsta);
exit:
@@ -4906,101 +5376,131 @@ static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
return bw;
}
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_link_sta *arsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
{
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k *ar;
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return -EINVAL;
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (!arvif)
+ return -EINVAL;
+
+ memset(arsta, 0, sizeof(*arsta));
+
+ link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ ether_addr_copy(arsta->addr, link_sta->addr);
+
+ /* logical index of the link sta in order of creation */
+ arsta->link_idx = ahsta->num_peer++;
+
+ arsta->link_id = link_id;
+ ahsta->links_map |= BIT(arsta->link_id);
+ arsta->arvif = arvif;
+ arsta->ahsta = ahsta;
+ ahsta->ahvif = ahvif;
+
+ wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+ rcu_assign_pointer(ahsta->link[link_id], arsta);
+
+ return 0;
+}
+
+static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
+ struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
- struct ath12k_peer *peer;
+ unsigned long links;
+ struct ath12k *ar;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ ath12k_peer_mlo_link_peers_delete(ahvif, ahsta);
+
+ /* validate link station removal and clear arsta links */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+
+ ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ }
+
+ ath12k_peer_ml_delete(ah, sta);
+}
+
+static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+ struct ath12k *ar = arvif->ar;
int ret = 0;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
- arsta = &ahsta->deflink;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+ arsta->link_id, arsta->addr, old_state, new_state);
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar) {
- WARN_ON_ONCE(1);
- return -EINVAL;
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
+ * from driver
+ */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath12k_mac_station_remove(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ goto exit;
+ }
}
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- rcu_assign_pointer(ahsta->link[0], arsta);
- /* TODO use appropriate link id once MLO support is added */
- arsta->link_id = ATH12K_DEFAULT_LINK_ID;
- ahsta->links_map = BIT(arsta->link_id);
- arsta->ahsta = ahsta;
- arsta->arvif = arvif;
- wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
-
- synchronize_rcu();
-
ret = ath12k_mac_station_add(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- wiphy_work_cancel(hw->wiphy, &arsta->update_wk);
-
- if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ath12k_bss_disassoc(ar, arvif);
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
- }
- ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+ arsta->addr, arvif->vdev_id);
- ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- ath12k_mac_dec_num_stations(arvif, arsta);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
- ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-
- if (arsta->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
- rcu_assign_pointer(ahsta->link[arsta->link_id], NULL);
- synchronize_rcu();
- ahsta->links_map &= ~(BIT(arsta->link_id));
- arsta->link_id = ATH12K_INVALID_LINK_ID;
- arsta->ahsta = NULL;
- }
+ /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
+ * peer associated to AP/Mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_assoc(ar, arvif, arsta, false);
+ ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
if (ret)
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
+ arsta->addr);
spin_lock_bh(&ar->data_lock);
@@ -5008,45 +5508,141 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
arsta->bw_prev = sta->deflink.bandwidth;
spin_unlock_bh(&ar->data_lock);
+
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
+ * authorized
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
+ ret = ath12k_mac_station_authorize(ar, arvif, arsta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+ arsta->addr);
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
+ /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
+ * deauthorize it.
+ */
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
+ ath12k_mac_station_unauthorize(ar, arvif, arsta);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
-
- spin_unlock_bh(&ar->ab->base_lock);
+ /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to
+ * AP/mesh/ADHOC vif type.
+ */
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath12k_station_disassoc(ar, arvif, arsta);
+ ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
if (ret)
ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
+ arsta->addr);
+ }
+
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ u8 link_id = 0;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (ieee80211_vif_is_mld(vif) && sta->valid_links) {
+ WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1);
+ link_id = ffs(sta->valid_links) - 1;
+ }
+
+ /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE:
+ * New station add received. If this is a ML station then
+ * ahsta->links_map will be zero and sta->valid_links will be 1.
+ * Assign default link to the first link sta.
+ */
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(ahsta, 0, sizeof(*ahsta));
+
+ arsta = &ahsta->deflink;
+
+ /* ML sta */
+ if (sta->mlo && !ahsta->links_map &&
+ (hweight16(sta->valid_links) == 1)) {
+ ret = ath12k_peer_ml_create(ah, sta);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
+ sta->addr);
+ goto exit;
+ }
+ }
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
+ link_id);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
+ link_id, sta->addr);
+ goto exit;
+ }
+
+ /* above arsta will get memset, hence do this after assign
+ * link sta
+ */
+ if (sta->mlo) {
+ arsta->is_assoc_link = true;
+ ahsta->assoc_link_id = link_id;
+ }
}
+ /* Handle all the other state transitions in generic way */
+ valid_links = ahsta->links_map;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
+ /* some assumptions went wrong! */
+ if (WARN_ON(!arvif || !arsta))
+ continue;
+
+ /* vdev might be in deleted */
+ if (WARN_ON(!arvif->ar))
+ continue;
+
+ ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+ old_state, new_state);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
+ link_id, arsta->addr, old_state, new_state);
+ goto exit;
+ }
+ }
+
+ /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
+ * Remove the station from driver (handle ML sta here since that
+ * needs special handling. Normal sta will be handled in generic
+ * handler below
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
+ ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+ ret = 0;
+
+exit:
+ /* update the state if everything went well */
+ if (!ret)
+ ahsta->state = new_state;
+
return ret;
}
@@ -5054,16 +5650,22 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k *ar;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ u8 link_id;
int ret;
s16 txpwr;
lockdep_assert_wiphy(hw->wiphy);
- arvif = &ahvif->deflink;
+ /* TODO: use link id from mac80211 once that's implemented */
+ link_id = 0;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
txpwr = 0;
@@ -5080,9 +5682,9 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
goto out;
}
- ar = ath12k_ah_to_ar(ah, 0);
+ ar = arvif->ar;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
WMI_PEER_USE_FIXED_PWR, txpwr);
if (ret) {
ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
@@ -5094,62 +5696,69 @@ out:
return ret;
}
-static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- u32 changed)
+static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
struct ieee80211_sta *sta = link_sta->sta;
struct ath12k *ar;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_link_sta *arsta;
struct ath12k_link_vif *arvif;
struct ath12k_peer *peer;
u32 bw, smps;
- /* TODO: use proper link id once link sta specific rc update support is
- * available in mac80211.
- */
- u8 link_id = ATH12K_DEFAULT_LINK_ID;
-
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar) {
- WARN_ON_ONCE(1);
- return;
- }
rcu_read_lock();
- arvif = rcu_dereference(ahvif->link[link_id]);
+ arvif = rcu_dereference(ahvif->link[link_sta->link_id]);
if (!arvif) {
- ath12k_warn(ar->ab, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
- link_id, sta->addr);
+ ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
+ link_sta->link_id, sta->addr);
rcu_read_unlock();
return;
}
- arsta = rcu_dereference(ahsta->link[link_id]);
+
+ ar = arvif->ar;
+
+ arsta = rcu_dereference(ahsta->link[link_sta->link_id]);
if (!arsta) {
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n",
- link_id, sta->addr);
+ link_sta->link_id, sta->addr);
return;
}
spin_lock_bh(&ar->ab->base_lock);
- peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
if (!peer) {
spin_unlock_bh(&ar->ab->base_lock);
rcu_read_unlock();
ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
- sta->addr, arvif->vdev_id);
+ arsta->addr, arvif->vdev_id);
return;
}
spin_unlock_bh(&ar->ab->base_lock);
+ if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ rcu_read_unlock();
+ return;
+ }
+
+ link_sta = rcu_dereference(sta->link[arsta->link_id]);
+ if (!link_sta) {
+ rcu_read_unlock();
+ ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n",
+ sta->addr, arsta->link_id);
+ return;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
- sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
- sta->deflink.smps_mode);
+ arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss,
+ link_sta->smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -5160,12 +5769,12 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
}
if (changed & IEEE80211_RC_NSS_CHANGED)
- arsta->nss = sta->deflink.rx_nss;
+ arsta->nss = link_sta->rx_nss;
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -5177,8 +5786,8 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
smps = WMI_PEER_SMPS_DYNAMIC;
break;
default:
- ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->deflink.smps_mode, sta->addr);
+ ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n",
+ link_sta->smps_mode, arsta->addr, link_sta->link_id);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -5195,6 +5804,101 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
rcu_read_unlock();
}
+static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah,
+ struct ath12k_sta *ahsta,
+ struct ath12k_vif *ahvif,
+ u8 link_id)
+{
+ struct ath12k_link_sta *arsta;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+ return NULL;
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (arsta)
+ return NULL;
+
+ arsta = kmalloc(sizeof(*arsta), GFP_KERNEL);
+ if (!arsta)
+ return NULL;
+
+ ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id);
+ if (ret) {
+ kfree(arsta);
+ return NULL;
+ }
+
+ return arsta;
+}
+
+static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long valid_links;
+ struct ath12k *ar;
+ u8 link_id;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ if (!sta->valid_links)
+ return -EINVAL;
+
+ /* Firmware does not support removal of one of link stas. All sta
+ * would be removed during ML STA delete in sta_state(), hence link
+ * sta removal is not handled here.
+ */
+ if (new_links < old_links)
+ return 0;
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr);
+ return -EINVAL;
+ }
+
+ /* this op is expected only after initial sta insertion with default link */
+ if (WARN_ON(ahsta->links_map == 0))
+ return -EINVAL;
+
+ valid_links = new_links;
+ for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (ahsta->links_map & BIT(link_id))
+ continue;
+
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id);
+
+ if (!arvif || !arsta) {
+ ath12k_hw_warn(ah, "Failed to alloc/assign link sta");
+ continue;
+ }
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_mac_station_add(ar, arvif, arsta);
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ arsta->addr, arvif->vdev_id);
+ ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif,
u16 ac, bool enable)
{
@@ -6054,6 +6758,8 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6115,6 +6821,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
int buf_id;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ATH12K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
@@ -6169,15 +6877,18 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
-static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_skb_cb *skb_cb;
struct ath12k_vif *ahvif;
struct ath12k_link_vif *arvif;
struct sk_buff *skb;
int ret;
+ lockdep_assert_wiphy(wiphy);
+
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH12K_SKB_CB(skb);
if (!skb_cb->vif) {
@@ -6187,7 +6898,15 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
ahvif = ath12k_vif_to_ahvif(skb_cb->vif);
- arvif = &ahvif->deflink;
+ if (!(ahvif->links_map & BIT(skb_cb->link_id))) {
+ ath12k_warn(ar->ab,
+ "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n",
+ skb_cb->link_id, ahvif->links_map);
+ ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ continue;
+ }
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
@@ -6197,8 +6916,9 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
} else {
ath12k_warn(ar->ab,
- "dropping mgmt frame for vdev %d, is_started %d\n",
+ "dropping mgmt frame for vdev %d link %u is_started %d\n",
arvif->vdev_id,
+ skb_cb->link_id,
arvif->is_started);
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
@@ -6232,7 +6952,7 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
+ wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -6258,6 +6978,105 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
}
+/* Note: called under rcu_read_lock() */
+static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ u8 link, struct sk_buff *skb, u32 info_flags)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath12k_sta *ahsta;
+
+ /* Use the link id passed or the default vif link */
+ if (!sta) {
+ if (link != IEEE80211_LINK_UNSPECIFIED)
+ return link;
+
+ return ahvif->deflink.link_id;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+
+ /* Below translation ensures we pass proper A2 & A3 for non ML clients.
+ * Also it assumes for now support only for MLO AP in this path
+ */
+ if (!sta->mlo) {
+ link = ahsta->deflink.link_id;
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return link;
+
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ if (bss_conf) {
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+ if (!ieee80211_has_tods(hdr->frame_control) &&
+ !ieee80211_has_fromds(hdr->frame_control))
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+ }
+
+ /* enqueue eth enacap & data frames on primary link, FW does link
+ * selection and address translation.
+ */
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+ ieee80211_is_data(hdr->frame_control))
+ return ahsta->assoc_link_id;
+
+ /* 802.11 frame cases */
+ if (link == IEEE80211_LINK_UNSPECIFIED)
+ link = ahsta->deflink.link_id;
+
+ if (!ieee80211_is_mgmt(hdr->frame_control))
+ return link;
+
+ /* Perform address conversion for ML STA Tx */
+ bss_conf = rcu_dereference(vif->link_conf[link]);
+ link_sta = rcu_dereference(sta->link[link]);
+
+ if (bss_conf && link_sta) {
+ ether_addr_copy(hdr->addr1, link_sta->addr);
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+
+ return link;
+ }
+
+ if (bss_conf) {
+ /* In certain cases where a ML sta associated and added subset of
+ * links on which the ML AP is active, but now sends some frame
+ * (ex. Probe request) on a different link which is active in our
+ * MLD but was not added during previous association, we can
+ * still honor the Tx to that ML STA via the requested link.
+ * The control would reach here in such case only when that link
+ * address is same as the MLD address or in worst case clients
+ * used MLD address at TA wrongly which would have helped
+ * identify the ML sta object and pass it here.
+ * If the link address of that STA is different from MLD address,
+ * then the sta object would be NULL and control won't reach
+ * here but return at the start of the function itself with !sta
+ * check. Also this would not need any translation at hdr->addr1
+ * from MLD to link address since the RA is the MLD address
+ * (same as that link address ideally) already.
+ */
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ ether_addr_copy(hdr->addr3, bss_conf->addr);
+ }
+
+ return link;
+}
+
+/* Note: called under rcu_read_lock() */
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -6267,13 +7086,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_sta *sta = control->sta;
u32 info_flags = info->flags;
+ struct ath12k *ar;
bool is_prb_rsp;
+ u8 link_id;
int ret;
+ link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
memset(skb_cb, 0, sizeof(*skb_cb));
skb_cb->vif = vif;
@@ -6282,6 +7104,27 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ /* handle only for MLO case, use deflink for non MLO case */
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
+ if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else {
+ link_id = 0;
+ }
+
+ arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!arvif || !arvif->ar) {
+ ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
+ link_id);
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ ar = arvif->ar;
+ skb_cb->link_id = link_id;
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
@@ -6309,10 +7152,12 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
void ath12k_mac_drain_tx(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
ath12k_mgmt_over_wmi_tx_purge(ar);
}
@@ -6429,6 +7274,8 @@ static void ath12k_drain_tx(struct ath12k_hw *ah)
struct ath12k *ar;
int i;
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
for_each_ar(ah, ar, i)
ath12k_mac_drain_tx(ar);
}
@@ -6622,6 +7469,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_link_vif *tx_arvif;
struct ath12k_vif *tx_ahvif;
@@ -6629,10 +7477,17 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
if (!tx_vif)
return 0;
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
tx_arvif = &tx_ahvif->deflink;
- if (ahvif->vif->bss_conf.nontransmitted) {
+ if (link_conf->nontransmitted) {
if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
return -EINVAL;
@@ -6644,7 +7499,7 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -EINVAL;
}
- if (ahvif->vif->bss_conf.ema_ap)
+ if (link_conf->ema_ap)
*flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
return 0;
@@ -6658,6 +7513,8 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
struct ath12k_vif *ahvif = arvif->ahvif;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
arg->if_id = arvif->vdev_id;
arg->type = ahvif->vdev_type;
arg->subtype = ahvif->vdev_subtype;
@@ -6689,6 +7546,17 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+
+ if (ath12k_mac_is_ml_arvif(arvif)) {
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) {
+ ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d",
+ ahvif->vif->valid_links);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(arg->mld_addr, ahvif->vif->addr);
+ }
+
return 0;
}
@@ -6839,16 +7707,25 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param;
+ struct ath12k_wmi_peer_create_arg peer_param = {0};
struct ieee80211_bss_conf *link_conf;
u32 param_id, param_value;
u16 nss;
int i;
int ret, vdev_id;
+ u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[arvif->link_id]);
+ /* If no link is active and scan vdev is requested
+ * use a default link conf for scan address purpose.
+ */
+ if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
+ link_id = ffs(vif->valid_links) - 1;
+ else
+ link_id = arvif->link_id;
+
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
vif->addr, arvif->link_id);
@@ -6999,7 +7876,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
break;
}
- arvif->txpower = vif->bss_conf.txpower;
+ arvif->txpower = link_conf->txpower;
ret = ath12k_mac_txpower_recalc(ar);
if (ret)
goto err_peer_del;
@@ -7034,8 +7911,7 @@ err_peer_del:
ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
arvif->bssid);
if (ret)
- /* KVALO: why not goto err? */
- return ret;
+ goto err_vdev_del;
ar->num_peers--;
}
@@ -7129,7 +8005,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+ struct ath12k_link_vif *scan_arvif;
struct ath12k_hw *ah = hw->priv;
struct ath12k *ar;
struct ath12k_base *ab;
@@ -7148,6 +8026,19 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
if (!ar)
return NULL;
+ /* cleanup the scan vdev if we are done scan on that ar
+ * and now we want to create for actual usage.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ scan_arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[ATH12K_DEFAULT_SCAN_LINK]);
+ if (scan_arvif && scan_arvif->ar == ar) {
+ ar->scan.vdev_id = -1;
+ ath12k_mac_remove_link_interface(hw, scan_arvif);
+ ath12k_mac_unassign_link_vif(scan_arvif);
+ }
+ }
+
if (arvif->ar) {
/* This is not expected really */
if (WARN_ON(!arvif->is_created)) {
@@ -7243,14 +8134,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
- /* For non-ml vifs, vif->addr is the actual vdev address but for
- * ML vif link(link BSSID) address is the vdev address and it can be a
- * different one from vif->addr (i.e ML address).
- * Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
+ /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
* will not know if this interface is an ML vif at this point.
*/
- ath12k_mac_assign_vif_to_vdev(hw, arvif, NULL);
-
return 0;
}
@@ -7352,7 +8238,7 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
- for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) {
/* if we cached some config but never received assign chanctx,
* free the allocated cache.
*/
@@ -7453,20 +8339,26 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params,
+ u8 link_id)
{
- struct ath12k *ar = arvif->ar;
+ struct ath12k *ar;
int ret = -EINVAL;
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+ if (!ar)
+ return -EINVAL;
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
- ret = ath12k_dp_rx_ampdu_start(ar, params);
+ ret = ath12k_dp_rx_ampdu_start(ar, params, link_id);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = ath12k_dp_rx_ampdu_stop(ar, params);
+ ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id);
break;
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -7480,6 +8372,10 @@ static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
break;
}
+ if (ret)
+ ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n",
+ params->action, vif->addr, link_id, ret);
+
return ret;
}
@@ -7487,27 +8383,24 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta = params->sta;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ unsigned long links_map = ahsta->links_map;
int ret = -EINVAL;
+ u8 link_id;
lockdep_assert_wiphy(hw->wiphy);
- ar = ath12k_get_ar_by_vif(hw, vif);
- if (!ar)
- return -EINVAL;
-
- ar = ath12k_ah_to_ar(ah, 0);
- arvif = &ahvif->deflink;
+ if (WARN_ON(!links_map))
+ return ret;
- ret = ath12k_mac_ampdu_action(arvif, params);
- if (ret)
- ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
- ar->pdev_idx, params->action, ret);
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ ret = ath12k_mac_ampdu_action(hw, vif, params, link_id);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
@@ -7627,6 +8520,58 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
return down_mode;
}
+static void
+ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
+ struct wmi_ml_arg *ml_arg)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct wmi_ml_partner_info *partner_info;
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *arvif_p;
+ unsigned long links;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+ if (!ath12k_mac_is_ml_arvif(arvif))
+ return;
+
+ if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS)
+ return;
+
+ ml_arg->enabled = true;
+
+ /* Driver always add a new link via VDEV START, FW takes
+ * care of internally adding this link to existing
+ * link vdevs which are advertised as partners below
+ */
+ ml_arg->link_add = true;
+ partner_info = ml_arg->partner_info;
+
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+
+ if (WARN_ON(!arvif_p))
+ continue;
+
+ if (arvif == arvif_p)
+ continue;
+
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ ahvif->vif->link_conf[arvif_p->link_id]);
+
+ if (!link_conf)
+ continue;
+
+ partner_info->vdev_id = arvif_p->vdev_id;
+ partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id;
+ ether_addr_copy(partner_info->addr, link_conf->addr);
+ ml_arg->num_partner_links++;
+ partner_info++;
+ }
+}
+
static int
ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct ieee80211_chanctx_conf *ctx,
@@ -7637,11 +8582,18 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
struct wmi_vdev_start_req_arg arg = {};
const struct cfg80211_chan_def *chandef = &ctx->def;
struct ath12k_vif *ahvif = arvif->ahvif;
- int he_support = ahvif->vif->bss_conf.he_support;
+ struct ieee80211_bss_conf *link_conf;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -ENOLINK;
+ }
+
reinit_completion(&ar->vdev_setup_done);
arg.vdev_id = arvif->vdev_id;
@@ -7693,7 +8645,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
spin_unlock_bh(&ab->base_lock);
/* TODO: Notify if secondary 80Mhz also needs radar detection */
- if (he_support) {
+ if (link_conf->he_support) {
ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
if (ret) {
ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
@@ -7705,6 +8657,9 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+ if (!restart)
+ ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml);
+
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n",
arg.vdev_id, arg.freq,
@@ -7778,19 +8733,32 @@ ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- arvif = &ahvif->deflink;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
- if (arvif->ar != arg->ar)
- return;
+ if (arvif->ar != arg->ar)
+ continue;
- if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
- return;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
+
+ if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx)
+ continue;
- arg->n_vifs++;
+ arg->n_vifs++;
+ }
}
static void
@@ -7799,27 +8767,41 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_bss_conf *link_conf;
struct ieee80211_chanctx_conf *ctx;
struct ath12k_link_vif *arvif;
+ unsigned long links_map;
+ u8 link_id;
lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
- arvif = &ahvif->deflink;
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ continue;
- if (arvif->ar != arg->ar)
- return;
+ if (arvif->ar != arg->ar)
+ continue;
- ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
- if (ctx != arg->ctx)
- return;
+ link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+ vif->link_conf[arvif->link_id]);
+ if (WARN_ON(!link_conf))
+ continue;
- if (WARN_ON(arg->next_vif == arg->n_vifs))
- return;
+ ctx = rcu_access_pointer(link_conf->chanctx_conf);
+ if (ctx != arg->ctx)
+ continue;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
- arg->vifs[arg->next_vif].vif = vif;
- arg->vifs[arg->next_vif].old_ctx = ctx;
- arg->vifs[arg->next_vif].new_ctx = ctx;
- arg->next_vif++;
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->vifs[arg->next_vif].link_conf = link_conf;
+ arg->next_vif++;
+ }
}
static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
@@ -7879,10 +8861,12 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
int n_vifs)
{
struct ath12k_wmi_vdev_up_params params = {};
+ struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
struct ath12k_link_vif *arvif;
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
+ u8 link_id;
int ret;
int i;
bool monitor_vif = false;
@@ -7892,7 +8876,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
for (i = 0; i < n_vifs; i++) {
vif = vifs[i].vif;
ahvif = ath12k_vif_to_ahvif(vif);
- arvif = &ahvif->deflink;
+ link_conf = vifs[i].link_conf;
+ link_id = link_conf->link_id;
+ arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahvif->link[link_id]);
if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
@@ -7945,13 +8932,13 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
if (vif->mbssid_tx_vif) {
- struct ath12k_vif *ahvif =
+ struct ath12k_vif *tx_ahvif =
ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
+ struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
- params.tx_bssid = arvif->bssid;
- params.nontx_profile_idx = vif->bss_conf.bssid_index;
- params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+ params.tx_bssid = tx_arvif->bssid;
+ params.nontx_profile_idx = link_conf->bssid_index;
+ params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
}
ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
@@ -8099,11 +9086,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
return -ENOMEM;
}
- if (!arvif->is_started) {
- ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
- if (!ar)
- return -EINVAL;
- } else {
+ ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
+ if (!ar) {
ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
vif->addr, link_id);
return -EINVAL;
@@ -8348,6 +9332,8 @@ static int ath12k_mac_flush(struct ath12k *ar)
int ath12k_mac_wait_tx_complete(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ath12k_mac_drain_tx(ar);
return ath12k_mac_flush(ar);
}
@@ -8356,7 +9342,11 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ struct ath12k_vif *ahvif;
+ unsigned long links;
struct ath12k *ar;
+ u8 link_id;
int i;
lockdep_assert_wiphy(hw->wiphy);
@@ -8371,12 +9361,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
return;
}
- ar = ath12k_get_ar_by_vif(hw, vif);
+ for_each_ar(ah, ar, i)
+ wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
- if (!ar)
- return;
+ ahvif = ath12k_vif_to_ahvif(vif);
+ links = ahvif->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!(arvif && arvif->ar))
+ continue;
- ath12k_mac_flush(ar);
+ ath12k_mac_flush(arvif->ar);
+ }
}
static int
@@ -8575,10 +9571,11 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
{
struct ath12k_link_vif *arvif = data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
- if (arsta->arvif != arvif)
+ arsta = rcu_dereference(ahsta->link[arvif->link_id]);
+ if (!arsta || arsta->arvif != arvif)
return;
spin_lock_bh(&ar->data_lock);
@@ -8593,21 +9590,26 @@ static void ath12k_mac_disable_peer_fixed_rate(void *data,
{
struct ath12k_link_vif *arvif = data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_link_sta *arsta = &ahsta->deflink;
+ struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
int ret;
- if (arsta->arvif != arvif)
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[arvif->link_id]);
+
+ if (!arsta || arsta->arvif != arvif)
return;
- ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
WMI_FIXED_RATE_NONE);
if (ret)
ath12k_warn(ar->ab,
"failed to disable peer fixed rate for STA %pM ret %d\n",
- sta->addr, ret);
+ arsta->addr, ret);
}
static int
@@ -9159,7 +10161,7 @@ static const struct ieee80211_ops ath12k_ops = {
.set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
- .link_sta_rc_update = ath12k_mac_op_sta_rc_update,
+ .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update,
.conf_tx = ath12k_mac_op_conf_tx,
.set_antenna = ath12k_mac_op_set_antenna,
.get_antenna = ath12k_mac_op_get_antenna,
@@ -9178,7 +10180,7 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
-
+ .change_sta_links = ath12k_mac_op_change_sta_links,
#ifdef CONFIG_PM
.suspend = ath12k_wow_op_suspend,
.resume = ath12k_wow_op_resume,
@@ -9812,36 +10814,32 @@ static void ath12k_mac_setup(struct ath12k *ar)
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
}
-int ath12k_mac_register(struct ath12k_base *ab)
+int ath12k_mac_register(struct ath12k_hw_group *ag)
{
+ struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
int ret;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
-
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = 320000;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
-
- for (i = 0; i < ab->num_hw; i++) {
- ah = ab->ah[i];
+ for (i = 0; i < ath12k_get_num_hw(ab); i++) {
+ ah = ath12k_ab_to_ah(ab, i);
ret = ath12k_mac_hw_register(ah);
if (ret)
goto err;
}
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ ah = ath12k_ab_to_ah(ab, i);
if (!ah)
continue;
@@ -9851,13 +10849,16 @@ err:
return ret;
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_hw_group *ag)
{
+ struct ath12k_base *ab = ag->ab[0];
struct ath12k_hw *ah;
int i;
- for (i = ab->num_hw - 1; i >= 0; i--) {
- ah = ab->ah[i];
+ clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ for (i = ath12k_get_num_hw(ab) - 1; i >= 0; i--) {
+ ah = ath12k_ab_to_ah(ab, i);
if (!ah)
continue;
@@ -9870,12 +10871,13 @@ static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
ieee80211_free_hw(ah->hw);
}
-static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
struct ath12k_pdev_map *pdev_map,
u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
int i;
@@ -9891,6 +10893,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->num_radio = num_pdev_map;
mutex_init(&ah->hw_mutex);
+ INIT_LIST_HEAD(&ah->ml_peers);
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
@@ -9906,53 +10909,97 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
pdev->ar = ar;
ath12k_mac_setup(ar);
+ ath12k_dp_pdev_pre_alloc(ar);
}
return ah;
}
-void ath12k_mac_destroy(struct ath12k_base *ab)
+void ath12k_mac_destroy(struct ath12k_hw_group *ag)
{
struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_base *ab = ag->ab[0];
+ int i, j;
+ struct ath12k_hw *ah;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- if (!pdev->ar)
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
continue;
- pdev->ar = NULL;
+ for (j = 0; j < ab->num_radios; j++) {
+ pdev = &ab->pdevs[j];
+ if (!pdev->ar)
+ continue;
+ pdev->ar = NULL;
+ }
}
- for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ for (i = 0; i < ath12k_get_num_hw(ab); i++) {
+ ah = ath12k_ab_to_ah(ab, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ab_set_ah(ab, i, NULL);
}
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+static void ath12k_mac_set_device_defaults(struct ath12k_base *ab)
+{
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = 320000;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+}
+
+int ath12k_mac_allocate(struct ath12k_hw_group *ag)
{
+ struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO];
+ int mac_id, device_id, total_radio, num_hw;
+ struct ath12k_base *ab;
struct ath12k_hw *ah;
- struct ath12k_pdev_map pdev_map[MAX_RADIOS];
int ret, i, j;
u8 radio_per_hw;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ total_radio = 0;
+ for (i = 0; i < ag->num_devices; i++)
+ total_radio += ag->ab[i]->num_radios;
+
+ /* All pdev get combined and register as single wiphy based on
+ * hardware group which participate in multi-link operation else
+ * each pdev get register separately.
+ */
+ if (ag->mlo_capable)
+ radio_per_hw = total_radio;
+ else
+ radio_per_hw = 1;
+
+ num_hw = total_radio / radio_per_hw;
- ab->num_hw = ab->num_radios;
- radio_per_hw = 1;
+ if (WARN_ON(num_hw >= ATH12K_GROUP_MAX_RADIO))
+ return -ENOSPC;
- for (i = 0; i < ab->num_hw; i++) {
+ ag->num_hw = 0;
+ device_id = 0;
+ mac_id = 0;
+ for (i = 0; i < num_hw; i++) {
for (j = 0; j < radio_per_hw; j++) {
+ ab = ag->ab[device_id];
pdev_map[j].ab = ab;
- pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ pdev_map[j].pdev_idx = mac_id;
+ mac_id++;
+
+ /* If mac_id falls beyond the current device MACs then
+ * move to next device
+ */
+ if (mac_id >= ab->num_radios) {
+ mac_id = 0;
+ device_id++;
+ ath12k_mac_set_device_defaults(ab);
+ }
}
- ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw);
if (!ah) {
ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
i);
@@ -9960,20 +11007,22 @@ int ath12k_mac_allocate(struct ath12k_base *ab)
goto err;
}
- ab->ah[i] = ah;
- }
+ ah->dev = ab->dev;
- ath12k_dp_pdev_pre_alloc(ab);
+ ag->ah[i] = ah;
+ ag->num_hw++;
+ }
return 0;
err:
for (i = i - 1; i >= 0; i--) {
- if (!ab->ah[i])
+ ah = ath12k_ab_to_ah(ab, i);
+ if (!ah)
continue;
- ath12k_mac_hw_destroy(ab->ah[i]);
- ab->ah[i] = NULL;
+ ath12k_mac_hw_destroy(ah);
+ ath12k_ab_set_ah(ab, i, NULL);
}
return ret;
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index d382337ba649..ccfc215d83ff 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -14,6 +14,7 @@
struct ath12k;
struct ath12k_base;
struct ath12k_hw;
+struct ath12k_hw_group;
struct ath12k_pdev_map;
struct ath12k_generic_iter {
@@ -44,6 +45,12 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_LINK_ID 0
#define ATH12K_INVALID_LINK_ID 255
+/* Default link after the IEEE802.11 defined Max link id limit
+ * for driver usage purpose.
+ */
+#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
+#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
@@ -54,10 +61,10 @@ enum ath12k_supported_bw {
extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
-void ath12k_mac_destroy(struct ath12k_base *ab);
-void ath12k_mac_unregister(struct ath12k_base *ab);
-int ath12k_mac_register(struct ath12k_base *ab);
-int ath12k_mac_allocate(struct ath12k_base *ab);
+void ath12k_mac_destroy(struct ath12k_hw_group *ag);
+void ath12k_mac_unregister(struct ath12k_hw_group *ag);
+int ath12k_mac_register(struct ath12k_hw_group *ag);
+int ath12k_mac_allocate(struct ath12k_hw_group *ag);
int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
u16 *rate);
u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@@ -89,5 +96,6 @@ int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index cf907550e6a4..06cff3849ab8 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1123,6 +1123,9 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
+ if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
+
__ath12k_pci_ext_irq_disable(ab);
ath12k_pci_sync_ext_irqs(ab);
}
@@ -1147,6 +1150,11 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab)
void ath12k_pci_stop(struct ath12k_base *ab)
{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+ return;
+
ath12k_pci_ce_irq_disable_sync(ab);
ath12k_ce_cleanup_pipes(ab);
}
@@ -1717,6 +1725,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
+ ath12k_core_hw_group_unassign(ab);
goto qmi_fail;
}
@@ -1725,6 +1734,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
+ ath12k_fw_unmap(ab);
qmi_fail:
ath12k_mhi_unregister(ab_pci);
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index 7a62665b8af9..5763c5a40cfc 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -8,6 +8,22 @@
#include "peer.h"
#include "debug.h"
+static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+{
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ list_for_each_entry(ml_peer, &ah->ml_peers, list) {
+ if (!ether_addr_equal(ml_peer->addr, addr))
+ continue;
+
+ return ml_peer;
+ }
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
const u8 *addr)
{
@@ -63,6 +79,20 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
return NULL;
}
+static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
+ int ml_peer_id)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (ml_peer_id == peer->ml_id)
+ return peer;
+
+ return NULL;
+}
+
struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
int peer_id)
{
@@ -70,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
lockdep_assert_held(&ab->base_lock);
+ if (peer_id & ATH12K_PEER_ML_ID_VALID)
+ return ath12k_peer_find_by_ml_id(ab, peer_id);
+
list_for_each_entry(peer, &ab->peers, list)
if (peer_id == peer->peer_id)
return peer;
@@ -231,8 +264,9 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
return 0;
}
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
{
+ struct ath12k_base *ab = ar->ab;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -241,12 +275,25 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) {
- ath12k_warn(ar->ab,
+ ath12k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret);
return ret;
}
+ return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ ret = ath12k_peer_delete_send(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
if (ret)
return ret;
@@ -266,7 +313,11 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_link_sta *arsta;
+ u8 link_id = arvif->link_id;
struct ath12k_peer *peer;
+ struct ath12k_sta *ahsta;
+ u16 ml_peer_id;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -332,6 +383,27 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
arvif->ast_idx = peer->hw_peer_id;
}
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[link_id]);
+
+ /* Fill ML info into created peer */
+ if (sta->mlo) {
+ ml_peer_id = ahsta->ml_peer_id;
+ peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+ ether_addr_copy(peer->ml_addr, sta->addr);
+
+ /* the assoc link is considered primary for now */
+ peer->primary_link = arsta->is_assoc_link;
+ peer->mlo = true;
+ } else {
+ peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
+ peer->primary_link = true;
+ peer->mlo = false;
+ }
+ }
+
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
@@ -341,3 +413,150 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
return 0;
}
+
+static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+{
+ u16 ml_peer_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
+ if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
+ continue;
+
+ set_bit(ml_peer_id, ah->free_ml_peer_id_map);
+ break;
+ }
+
+ if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
+ ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ return ml_peer_id;
+}
+
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (ml_peer) {
+ ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
+ ml_peer->id, sta->addr);
+ return -EEXIST;
+ }
+
+ ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
+ if (!ml_peer)
+ return -ENOMEM;
+
+ ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
+ sta->addr);
+ kfree(ml_peer);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(ml_peer->addr, sta->addr);
+ ml_peer->id = ahsta->ml_peer_id;
+ list_add(&ml_peer->list, &ah->ml_peers);
+
+ return 0;
+}
+
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+ struct ath12k_ml_peer *ml_peer;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+ ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+ if (!ml_peer) {
+ ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
+ return -EINVAL;
+ }
+
+ list_del(&ml_peer->list);
+ kfree(ml_peer);
+
+ return 0;
+}
+
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
+{
+ struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+ struct ath12k_hw *ah = ahvif->ah;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ unsigned long links;
+ struct ath12k *ar;
+ int ret, err_ret = 0;
+ u8 link_id;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (!sta->mlo)
+ return -EINVAL;
+
+ /* FW expects delete of all link peers at once before waiting for reception
+ * of peer unmap or delete responses
+ */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+ ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ arvif->vdev_id, arsta->addr, ret);
+ err_ret = ret;
+ continue;
+ }
+ }
+
+ /* Ensure all link peers are deleted and unmapped */
+ links = ahsta->links_map;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arvif || !arsta)
+ continue;
+
+ ar = arvif->ar;
+ if (!ar)
+ continue;
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+ if (ret) {
+ err_ret = ret;
+ continue;
+ }
+ ar->num_peers--;
+ }
+
+ return err_ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index b955f0cdf598..7e6231cb2b52 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -19,6 +19,8 @@ struct ppdu_user_delayba {
u32 resp_rate_flags;
};
+#define ATH12K_PEER_ML_ID_VALID BIT(13)
+
struct ath12k_peer {
struct list_head list;
struct ieee80211_sta *sta;
@@ -44,9 +46,25 @@ struct ath12k_peer {
struct ppdu_user_delayba ppdu_stats_delayba;
bool delayba_flag;
bool is_authorized;
-
+ bool mlo;
/* protected by ab->data_lock */
bool dp_setup_done;
+
+ u16 ml_id;
+
+ /* any other ML info common for all partners can be added
+ * here and would be same for all partner peers.
+ */
+ u8 ml_addr[ETH_ALEN];
+
+ /* To ensure only certain work related to dp is done once */
+ bool primary_link;
+};
+
+struct ath12k_ml_peer {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u16 id;
};
void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@@ -66,5 +84,8 @@ int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index b93ce9f87f61..ba3cd2342465 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2023,14 +2023,14 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
u8 hw_link_id = 0;
int i;
- if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
+ if (!ab->ag->mlo_capable) {
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "intra device MLO is disabled hence skip QMI MLO cap");
+ "MLO is disabled hence skip QMI MLO cap");
return;
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
- ab->mlo_capable_flags = 0;
+ ab->single_chip_mlo_supp = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
@@ -2066,7 +2066,9 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_chip_info_valid = 1;
}
-static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
@@ -2174,12 +2176,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
- if (resp.single_chip_mlo_support_valid) {
- if (resp.single_chip_mlo_support)
- ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
- else
- ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
- }
+ if (resp.single_chip_mlo_support_valid &&
+ resp.single_chip_mlo_support)
+ ab->single_chip_mlo_supp = true;
if (!resp.num_phy_valid) {
ret = -ENODATA;
@@ -2275,7 +2274,9 @@ resp_out:
return ret;
}
-static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
@@ -2433,7 +2434,9 @@ this_chunk_done:
return 0;
}
-static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
struct qmi_wlanfw_cap_req_msg_v01 req = {};
struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
@@ -2619,8 +2622,10 @@ out:
return ret;
}
-static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
- enum ath12k_qmi_bdf_type type)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ enum ath12k_qmi_bdf_type type)
{
struct device *dev = ab->dev;
char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
@@ -2791,7 +2796,9 @@ out:
return ret;
}
-static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
@@ -3023,6 +3030,8 @@ void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
+ clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
+
ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan mode off\n");
@@ -3079,9 +3088,69 @@ ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
return 0;
}
-static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
{
- struct ath12k_base *ab = qmi->ab;
+ struct ath12k_qmi *qmi = &ab->qmi;
+
+ spin_lock(&qmi->event_lock);
+
+ if (ath12k_qmi_get_event_block(qmi))
+ ath12k_qmi_set_event_block(qmi, false);
+
+ spin_unlock(&qmi->event_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
+ ab->device_id);
+
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+
+ if (!(ab && ab->qmi.num_radios != U8_MAX))
+ return false;
+ }
+
+ return true;
+}
+
+static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
+{
+ struct ath12k_base *ab;
+ int i;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ ab = ag->ab[i];
+ if (!ab)
+ continue;
+
+ spin_lock(&ab->qmi.event_lock);
+
+ if (ath12k_qmi_get_event_block(&ab->qmi)) {
+ spin_unlock(&ab->qmi.event_lock);
+ return ab;
+ }
+
+ spin_unlock(&ab->qmi.event_lock);
+ }
+
+ return NULL;
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab, *block_ab;
+ struct ath12k_hw_group *ag = ab->ag;
int ret;
ath12k_qmi_phy_cap_send(ab);
@@ -3092,16 +3161,30 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
return ret;
}
- ret = ath12k_qmi_host_cap_send(ab);
- if (ret < 0) {
- ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
- return ret;
+ spin_lock(&qmi->event_lock);
+
+ ath12k_qmi_set_event_block(qmi, true);
+
+ spin_unlock(&qmi->event_lock);
+
+ mutex_lock(&ag->mutex);
+
+ if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
+ ath12k_core_hw_group_set_mlo_capable(ag);
+
+ block_ab = ath12k_qmi_hw_group_find_blocked(ag);
+ if (block_ab)
+ ath12k_qmi_trigger_host_cap(block_ab);
}
+ mutex_unlock(&ag->mutex);
+
return ret;
}
-static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3115,7 +3198,9 @@ static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
return ret;
}
-static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
{
struct ath12k_base *ab = qmi->ab;
int ret;
@@ -3280,6 +3365,21 @@ static const struct qmi_ops ath12k_qmi_ops = {
.del_server = ath12k_qmi_ops_del_server,
};
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
+ ab->device_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
static void ath12k_qmi_driver_event_work(struct work_struct *work)
{
struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@@ -3306,7 +3406,6 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_SERVER_EXIT:
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
- set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
break;
case ATH12K_QMI_EVENT_REQUEST_MEM:
ret = ath12k_qmi_event_mem_request(qmi);
@@ -3320,20 +3419,28 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
break;
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
if (ab->is_reset)
ath12k_hal_dump_srng_stats(ab);
+
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
clear_bit(ATH12K_FLAG_CRASH_FLUSH,
&ab->dev_flags);
- clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
- ath12k_core_qmi_firmware_ready(ab);
- set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+ ret = ath12k_core_qmi_firmware_ready(ab);
+ if (!ret)
+ set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ &ab->dev_flags);
break;
+ case ATH12K_QMI_EVENT_HOST_CAP:
+ ret = ath12k_qmi_event_host_cap(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
default:
ath12k_warn(ab, "invalid event type: %d", event->type);
break;
@@ -3386,11 +3493,15 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
void ath12k_qmi_deinit_service(struct ath12k_base *ab)
{
+ if (!ab->qmi.ab)
+ return;
+
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
ath12k_qmi_m3_free(ab);
ath12k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.ab = NULL;
}
void ath12k_qmi_free_resource(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 0dfcbd8cb59b..98f6009ab21e 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -68,6 +68,7 @@ enum ath12k_qmi_event_type {
ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
ATH12K_QMI_EVENT_POWER_UP,
ATH12K_QMI_EVENT_POWER_DOWN,
+ ATH12K_QMI_EVENT_HOST_CAP,
ATH12K_QMI_EVENT_MAX,
};
@@ -142,6 +143,10 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+
+ /* protected with struct ath12k_qmi::event_lock */
+ bool block_event;
+
u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
@@ -594,11 +599,26 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+ lockdep_assert_held(&qmi->event_lock);
+
+ return qmi->block_event;
+}
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
void ath12k_qmi_deinit_service(struct ath12k_base *ab);
int ath12k_qmi_init_service(struct ath12k_base *ab);
void ath12k_qmi_free_resource(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index dced2aa9ba1a..402ae477da61 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -821,6 +821,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
+ struct wmi_vdev_create_mlo_params *ml_params;
struct wmi_tlv *tlv;
int ret, len;
void *ptr;
@@ -830,7 +832,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
* both the bands.
*/
len = sizeof(*cmd) + TLV_HDR_SIZE +
- (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+ (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -879,6 +882,21 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
txrx_streams->supported_rx_streams =
cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
+ ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ if (is_ml_vdev) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
+ sizeof(*ml_params));
+ ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
args->if_id, args->type, args->subtype,
@@ -1020,19 +1038,27 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart)
{
+ struct wmi_vdev_start_mlo_params *ml_params;
+ struct wmi_partner_link_info *partner_info;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
struct ath12k_wmi_channel_params *chan;
struct wmi_tlv *tlv;
void *ptr;
- int ret, len;
+ int ret, len, i, ml_arg_size = 0;
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+ if (!restart && arg->ml.enabled) {
+ ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ len += ml_arg_size;
+ }
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1085,6 +1111,61 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
ptr += sizeof(*tlv);
+ if (ml_arg_size) {
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+
+ ml_params = ptr;
+
+ ml_params->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
+ sizeof(*ml_params));
+
+ ml_params->flags = le32_encode_bits(arg->ml.enabled,
+ ATH12K_WMI_FLAG_MLO_ENABLED) |
+ le32_encode_bits(arg->ml.assoc_link,
+ ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
+ le32_encode_bits(arg->ml.mcast_link,
+ ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
+ le32_encode_bits(arg->ml.link_add,
+ ATH12K_WMI_FLAG_MLO_LINK_ADD);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+ arg->vdev_id, ml_params->flags);
+
+ ptr += sizeof(*ml_params);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ arg->ml.num_partner_links *
+ sizeof(*partner_info));
+ ptr += TLV_HDR_SIZE;
+
+ partner_info = ptr;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ partner_info->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ sizeof(*partner_info));
+ partner_info->vdev_id =
+ cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ ether_addr_copy(partner_info->vdev_addr.addr,
+ arg->ml.partner_info[i].addr);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+ partner_info->vdev_id, partner_info->hw_link_id,
+ partner_info->vdev_addr.addr);
+
+ partner_info++;
+ }
+
+ ptr = partner_info;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
restart ? "restart" : "start", arg->vdev_id,
arg->freq, arg->mode);
@@ -1149,9 +1230,14 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ struct wmi_peer_create_mlo_params *ml_param;
+ void *ptr;
+ struct wmi_tlv *tlv;
- skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
@@ -1163,9 +1249,23 @@ int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
cmd->peer_type = cpu_to_le32(arg->peer_type);
cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ptr = skb->data + sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*ml_param));
+ ptr += TLV_HDR_SIZE;
+ ml_param = ptr;
+ ml_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
+ sizeof(*ml_param));
+ if (arg->ml_enabled)
+ ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ ptr += sizeof(*ml_param);
+
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
- "WMI peer create vdev_id %d peer_addr %pM\n",
- arg->vdev_id, arg->peer_addr);
+ "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
+ arg->vdev_id, arg->peer_addr, ml_param->flags);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
if (ret) {
@@ -2001,12 +2101,15 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
struct ath12k_wmi_vht_rate_set_params *mcs;
struct ath12k_wmi_he_rate_set_params *he_mcs;
struct ath12k_wmi_eht_rate_set_params *eht_mcs;
+ struct wmi_peer_assoc_mlo_params *ml_params;
+ struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
u32 peer_legacy_rates_align;
u32 peer_ht_rates_align;
int i, ret, len;
+ __le32 v;
peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
sizeof(u32));
@@ -2018,8 +2121,13 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
sizeof(*mcs) + TLV_HDR_SIZE +
(sizeof(*he_mcs) * arg->peer_he_mcs_count) +
- TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
- TLV_HDR_SIZE + TLV_HDR_SIZE;
+ TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
+
+ if (arg->ml.enabled)
+ len += TLV_HDR_SIZE + sizeof(*ml_params) +
+ TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+ else
+ len += (2 * TLV_HDR_SIZE);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -2143,12 +2251,38 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*he_mcs);
}
- /* MLO header tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? sizeof(*ml_params) : 0;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (!len)
+ goto skip_ml_params;
+
+ ml_params = ptr;
+ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
+ len);
+ ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.assoc_link)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.primary_umac)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.logical_link_idx_valid)
+ ml_params->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
+
+ if (arg->ml.peer_id_valid)
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
+ ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+ ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
+ ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
+ ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+ ptr += sizeof(*ml_params);
+
+skip_ml_params:
/* Loop through the EHT rate set */
len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
tlv = ptr;
@@ -2165,12 +2299,45 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
ptr += sizeof(*eht_mcs);
}
- /* ML partner links tag with 0 length */
- len = 0;
tlv = ptr;
+ len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
+ /* fill ML Partner links */
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
ptr += TLV_HDR_SIZE;
+ if (len == 0)
+ goto send;
+
+ for (i = 0; i < arg->ml.num_partner_links; i++) {
+ u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
+
+ partner_info = ptr;
+ partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
+ sizeof(*partner_info));
+ partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+ partner_info->hw_link_id =
+ cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+ partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+ if (arg->ml.partner_info[i].assoc_link)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+ if (arg->ml.partner_info[i].primary_umac)
+ partner_info->flags |=
+ cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+ if (arg->ml.partner_info[i].logical_link_idx_valid) {
+ v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
+ partner_info->flags |= v;
+ }
+
+ partner_info->logical_link_idx =
+ cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
+ ptr += sizeof(*partner_info);
+ }
+
+send:
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
@@ -6687,6 +6854,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const u32 *vdev_ids)
{
int i;
+ struct ieee80211_bss_conf *conf;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
@@ -6705,7 +6873,20 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
ahvif = arvif->ahvif;
- if (arvif->is_up && ahvif->vif->bss_conf.csa_active)
+ if (arvif->link_id > IEEE80211_MLD_MAX_NUM_LINKS) {
+ ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
+ arvif->link_id);
+ continue;
+ }
+
+ conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+ if (!conf) {
+ ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ continue;
+ }
+
+ if (arvif->is_up && conf->csa_active)
ieee80211_csa_finish(ahvif->vif, 0);
}
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 6f55dbdf629d..05aa9754118a 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1929,6 +1929,19 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
+ WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
+ WMI_TAG_MLO_TX_SEND_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+ WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
+ WMI_TAG_MLO_SETUP_CMD = 0x3C9,
+ WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
+ WMI_TAG_MLO_READY_CMD,
+ WMI_TAG_MLO_TEARDOWN_CMD,
+ WMI_TAG_MLO_TEARDOWN_COMPLETE,
+ WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
+ WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
+ WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
+ WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
@@ -2740,6 +2753,7 @@ struct ath12k_wmi_vdev_create_arg {
u8 if_stats_id;
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
+ u8 mld_addr[ETH_ALEN];
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2766,6 +2780,33 @@ struct ath12k_wmi_vdev_txrx_streams_params {
__le32 supported_rx_streams;
} __packed;
+struct wmi_vdev_create_mlo_params {
+ __le32 tlv_header;
+ struct ath12k_wmi_mac_addr_params mld_macaddr;
+} __packed;
+
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5)
+#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6)
+#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7)
+#define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8)
+
+struct wmi_vdev_start_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+} __packed;
+
+struct wmi_partner_link_info {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ struct ath12k_wmi_mac_addr_params vdev_addr;
+} __packed;
+
struct wmi_vdev_delete_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2909,6 +2950,27 @@ enum wmi_phy_mode {
MODE_MAX = 33,
};
+#define ATH12K_WMI_MLO_MAX_LINKS 4
+
+struct wmi_ml_partner_info {
+ u32 vdev_id;
+ u32 hw_link_id;
+ u8 addr[ETH_ALEN];
+ bool assoc_link;
+ bool primary_umac;
+ bool logical_link_idx_valid;
+ u32 logical_link_idx;
+};
+
+struct wmi_ml_arg {
+ bool enabled;
+ bool assoc_link;
+ bool mcast_link;
+ bool link_add;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_vdev_start_req_arg {
u32 vdev_id;
u32 freq;
@@ -2946,12 +3008,19 @@ struct wmi_vdev_start_req_arg {
u32 mbssid_flags;
u32 mbssid_tx_vdev_id;
u32 punct_bitmap;
+ struct wmi_ml_arg ml;
};
struct ath12k_wmi_peer_create_arg {
const u8 *peer_addr;
u32 peer_type;
u32 vdev_id;
+ bool ml_enabled;
+};
+
+struct wmi_peer_create_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
};
struct ath12k_wmi_pdev_set_regdomain_arg {
@@ -3618,6 +3687,24 @@ struct wmi_vdev_install_key_arg {
#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
+ (ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
+
+struct peer_assoc_mlo_params {
+ bool enabled;
+ bool assoc_link;
+ bool primary_umac;
+ bool peer_id_valid;
+ bool logical_link_idx_valid;
+ bool bridge_peer;
+ u8 mld_addr[ETH_ALEN];
+ u32 logical_link_idx;
+ u32 ml_peer_id;
+ u32 ieee_link_id;
+ u8 num_partner_links;
+ struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
struct wmi_rate_set_arg {
u32 num_rates;
u8 rates[WMI_MAX_SUPPORTED_RATES];
@@ -3692,8 +3779,36 @@ struct ath12k_wmi_peer_assoc_arg {
u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet;
u32 punct_bitmap;
+ bool is_assoc;
+ struct peer_assoc_mlo_params ml;
};
+#define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4)
+
+struct wmi_peer_assoc_mlo_partner_info_params {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 hw_link_id;
+ __le32 flags;
+ __le32 logical_link_idx;
+} __packed;
+
+struct wmi_peer_assoc_mlo_params {
+ __le32 tlv_header;
+ __le32 flags;
+ struct ath12k_wmi_mac_addr_params mld_addr;
+ __le32 logical_link_idx;
+ __le32 ml_peer_id;
+ __le32 ieee_link_id;
+ __le32 emlsr_trans_timeout_us;
+ __le32 emlsr_trans_delay_us;
+ __le32 emlsr_padding_delay_us;
+} __packed;
+
struct wmi_peer_assoc_complete_cmd {
__le32 tlv_header;
struct ath12k_wmi_mac_addr_params peer_macaddr;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 61b2e3f15f0e..72ce321f2a77 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1441,6 +1441,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ unsigned int link_id,
int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index acc84e6711b0..e5e274bc9e68 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -193,7 +193,7 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
struct ath_hw_antcomb_conf *conf)
{
- /* set alt to the conf with maximun ratio */
+ /* set alt to the conf with maximum ratio */
if (antcomb->first_ratio && antcomb->second_ratio) {
if (antcomb->rssi_second > antcomb->rssi_third) {
/* first alt*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d08ea0b28530..b26224480041 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -395,7 +395,7 @@ static void ar9002_hw_init_hang_checks(struct ath_hw *ah)
ah->config.hw_hang_checks |= HW_MAC_HANG;
}
-/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+/* Sets up the AR5008/AR9001/AR9002 hardware family callbacks */
int ar9002_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index e9bd13eeee92..6595eca74997 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1170,7 +1170,7 @@ exit:
return false;
}
-/* Sets up the AR9003 hardware familiy callbacks */
+/* Sets up the AR9003 hardware family callbacks */
void ar9003_hw_attach_ops(struct ath_hw *ah)
{
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 2b9c07961cd7..3f0543e55d9b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -637,7 +637,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
* same time. Since BT's calibration doesn't happen
* that often, we'll let BT completes calibration then
* we continue to wait for cal_grant from BT.
- * Orginal: Wait BT_CAL_GRANT.
+ * Original: Wait BT_CAL_GRANT.
* New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
* BT_CAL_DONE -> Wait BT_CAL_GRANT.
*/
@@ -747,7 +747,7 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
* BT is sleeping. Check if BT wakes up during
* WLAN calibration. If BT wakes up during
* WLAN calibration, need to go through all
- * message exchanges again and recal.
+ * message exchanges again and recalibrate.
*/
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
(AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index ad72a30b67c3..e13873fb8e2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -246,7 +246,7 @@
/*
- * MRC Feild Definitions
+ * MRC Field Definitions
*/
#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
#define AR_PHY_SGI_DSC_MAN_S 4
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 29ca65a732a6..bcfc8df0efe5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -1018,6 +1018,8 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
+ u32 rx_active_check_time;
+ u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;
bool ps_idle;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 571062f2e82a..02237d106f8c 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -17,7 +17,7 @@
#include "ath9k.h"
/* Set/change channels. If the channel is really being changed, it's done
- * by reseting the chip. To accomplish this we must first cleanup any pending
+ * by resetting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
static int ath_set_channel(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 4b27445a5fb8..628eeec4b82f 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -734,7 +734,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
ATH9K_RX_FILTER_PHYRADAR |
ATH9K_RX_FILTER_PHYERR);
- /* TODO: usually this should not be neccesary, but for some reason
+ /* TODO: usually this should not be necessary, but for some reason
* (or in some mode?) the trigger must be called after the
* configuration, otherwise the register will have its values reset
* (on my ar9220 to value 0x01002310)
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index eff894958a73..74a0134075cf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -750,6 +750,7 @@ static int read_file_reset(struct seq_file *file, void *data)
[RESET_TYPE_CALIBRATION] = "Calibration error",
[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
+ [RESET_TYPE_RX_INACTIVE] = "Rx path inactive",
};
int i;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 389459c04d14..cb3e75969875 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -53,6 +53,7 @@ enum ath_reset_type {
RESET_TYPE_CALIBRATION,
RESET_TX_DMA_ERROR,
RESET_RX_DMA_ERROR,
+ RESET_TYPE_RX_INACTIVE,
__RESET_TYPE_MAX
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 3689e12db9f7..2fb73a5e1d51 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -79,7 +79,7 @@ static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
const int DFS_UPPER_BIN_OFFSET = 64;
/* if detected radar on both channels, select the significant one */
if (is_ctl && is_ext) {
- /* first check wether channels have 'strong' bins */
+ /* first check whether channels have 'strong' bins */
is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 7265766cddbd..fe9abe8cd268 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1198,7 +1198,7 @@ static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev,
filename = FIRMWARE_AR9271;
/* expected fw locations:
- * - htc_9271.fw (stable version 1.3, depricated)
+ * - htc_9271.fw (stable version 1.3, deprecated)
*/
snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
"%s", filename);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e2bef099adb3..a25eacabc664 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2149,7 +2149,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
/* When chip goes into network sleep, it could be waken
* up by MCI_INT interrupt caused by BT's HW messages
- * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * (LNA_xxx, CONT_xxx) which could be in a very fast
* rate (~100us). This will cause chip to leave and
* re-enter network sleep mode frequently, which in
* consequence will have WLAN MCI HW to generate lots of
@@ -2544,7 +2544,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
- * For AR9271 we will temporarilly uses the rx chainmax as read from
+ * For AR9271 we will temporarily use the rx chainmax as read from
* the EEPROM.
*/
if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 450ab19b1d4e..e2cbf3f00da0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -282,7 +282,7 @@ enum ath9k_hw_caps {
* an exact user defined pattern or de-authentication/disassoc pattern.
* @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
* bytes of the pattern for user defined pattern, de-authentication and
- * disassociation patterns for all types of possible frames recieved
+ * disassociation patterns for all types of possible frames received
* of those types.
*/
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index d1e5767aab3c..d078a59d7d3c 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -50,7 +50,36 @@ reset:
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return false;
+}
+
+#define RX_INACTIVE_CHECK_INTERVAL (4 * MSEC_PER_SEC)
+
+static bool ath_hw_rx_inactive_check(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 interval, count;
+
+ interval = jiffies_to_msecs(jiffies - sc->rx_active_check_time);
+ count = sc->rx_active_count;
+
+ if (interval < RX_INACTIVE_CHECK_INTERVAL)
+ return true; /* too soon to check */
+ sc->rx_active_count = 0;
+ sc->rx_active_check_time = jiffies;
+
+ /* Need at least one interrupt per second, and we should only react if
+ * we are within a factor two of the expected interval
+ */
+ if (interval > RX_INACTIVE_CHECK_INTERVAL * 2 ||
+ count >= interval / MSEC_PER_SEC)
+ return true;
+
+ ath_dbg(common, RESET,
+ "RX inactivity detected. Schedule chip reset\n");
+ ath9k_queue_reset(sc, RESET_TYPE_RX_INACTIVE);
+
+ return false;
}
void ath_hw_check_work(struct work_struct *work)
@@ -58,8 +87,8 @@ void ath_hw_check_work(struct work_struct *work)
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_check_work.work);
- if (!ath_hw_check(sc) ||
- !ath_tx_complete_check(sc))
+ if (!ath_hw_check(sc) || !ath_tx_complete_check(sc) ||
+ !ath_hw_rx_inactive_check(sc))
return;
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index f03d792732da..16203e7ecf29 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -251,7 +251,7 @@ struct ath_desc {
* when the descriptor is specifically marked to generate
* an interrupt with this flag. Descriptors should be
* marked periodically to insure timely replenishing of the
- * supply needed for sending frames. Defering interrupts
+ * supply needed for sending frames. Deferring interrupts
* reduces system load and potentially allows more concurrent
* work to be done but if done to aggressively can cause
* senders to backup. When the hardware queue is left too
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b92c89dad8de..812e0c6bde3e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -453,6 +453,7 @@ void ath9k_tasklet(struct tasklet_struct *t)
ath_rx_tasklet(sc, 0, true);
ath_rx_tasklet(sc, 0, false);
+ sc->rx_active_count++;
}
if (status & ATH9K_INT_TX) {
@@ -1001,7 +1002,7 @@ static bool ath9k_uses_beacons(int type)
static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
struct ieee80211_vif *vif)
{
- /* Use the first (configured) interface, but prefering AP interfaces. */
+ /* Use the first (configured) interface, but preferring AP interfaces. */
if (!iter_data->primary_beacon_vif) {
iter_data->primary_beacon_vif = vif;
} else {
@@ -2767,7 +2768,7 @@ void ath9k_fill_chanctx_ops(void)
#endif
static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct ath_softc *sc = hw->priv;
struct ath_vif *avp = (void *)vif->drv_priv;
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index 8d0b1730a9d5..ed4152cd44f0 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -60,7 +60,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
/*
- * Create Dissassociate / Deauthenticate packet filter
+ * Create Disassociate / Deauthenticate packet filter
*
* 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
* +--------------+----------+---------+--------+--------+----
@@ -70,7 +70,7 @@ static int ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
* The above is the management frame format for disassociate/
* deauthenticate pattern, from this we need to match the first byte
* of 'Frame Control' and DA, SA, and BSSID fields
- * (skipping 2nd byte of FC and Duration feild.
+ * (skipping 2nd byte of FC and Duration field.
*
* Disassociate pattern
* --------------------
@@ -225,7 +225,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
ath9k_stop_btcoex(sc);
/*
- * Enable wake up on recieving disassoc/deauth
+ * Enable wake up on receiving disassoc/deauth
* frame by default.
*/
ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 35aa47a9db90..0a24439dd30d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -557,7 +557,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/*
* AR5416 can become deaf/mute when BA
* issue happens. Chip needs to be reset.
- * But AP code may have sychronization issues
+ * But AP code may have synchronization issues
* when perform internal reset in this routine.
* Only enable reset in STA mode for now.
*/
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 8557d4826a46..94d08d6ae1a3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1590,7 +1590,10 @@ static int wcn36xx_probe(struct platform_device *pdev)
}
n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
- wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ wcn->chan_survey = devm_kcalloc(wcn->dev,
+ n_channels,
+ sizeof(struct wcn36xx_chan_survey),
+ GFP_KERNEL);
if (!wcn->chan_survey) {
ret = -ENOMEM;
goto out_wq;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 42d991d9f8cb..60eb95fc19a5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -455,6 +455,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
if (sg_data_sz > max_req_sz - req_sz)
sg_data_sz = max_req_sz - req_sz;
+ if (!sgl) {
+ /* out of (pre-allocated) scatterlist entries */
+ ret = -ENOMEM;
+ goto exit;
+ }
sg_set_buf(sgl, pkt_data, sg_data_sz);
sg_cnt++;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 297a7c738c01..902ac3108782 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2676,7 +2676,7 @@ done:
static s32
brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- s32 *dbm)
+ unsigned int link_id, s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
@@ -4999,12 +4999,16 @@ exit:
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif)
{
static const s32 pktflags[] = {
- BRCMF_VNDR_IE_PRBREQ_FLAG,
BRCMF_VNDR_IE_PRBRSP_FLAG,
BRCMF_VNDR_IE_BEACON_FLAG
};
int i;
+ if (vif->wdev.iftype == NL80211_IFTYPE_AP)
+ brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG, NULL, 0);
+ else
+ brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG, NULL, 0);
+
for (i = 0; i < ARRAY_SIZE(pktflags); i++)
brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index da72fd2d541f..c3a57e30c855 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -327,8 +327,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
- brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
- brcmf_ifname(ifp), head_delta);
+ brcmf_dbg(INFO, "%s: %s headroom\n", brcmf_ifname(ifp),
+ head_delta ? "insufficient" : "unmodifiable");
atomic_inc(&drvr->bus_if->stats.pktcowed);
ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0,
GFP_ATOMIC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 31e080e4da66..ab3d6cfcb02b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -6,6 +6,8 @@
#ifndef _fwil_h_
#define _fwil_h_
+#include "debug.h"
+
/*******************************************************************************
* Dongle command codes that are interpreted by firmware
******************************************************************************/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index d69879e1bd87..d362c4337616 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -23423,6 +23423,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
break;
}
+ if (WARN_ON(k == NPHY_IQCAL_NUMGAINS))
+ return;
+
params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2];
params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 080a1587caa5..0f7fa6032c66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -104,9 +104,9 @@ static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
};
enum iwl_mvm_vendor_events_idx {
- /* 0x0 - 0x3 are deprecated */
- IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
- NUM_IWL_MVM_VENDOR_EVENT_IDX
+ /* 0x0 - 0x3 are deprecated */
+ IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4,
+ NUM_IWL_MVM_VENDOR_EVENT_IDX
};
static const struct nl80211_vendor_cmd_info
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index fca3eea7ee84..a099fdaafa45 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -410,7 +410,7 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
static int
mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index e06a0622973e..f79589cafe57 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -545,7 +545,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken,
- (10 * HZ)) <= 0) {
+ (5 * HZ)) <= 0) {
mwifiex_dbg(adapter, ERROR,
"hs_activate_wait_q terminated\n");
return false;
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 9d5561f44134..7fbce5e757df 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -1596,7 +1596,7 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct mt76_phy *phy = hw->priv;
int n_chains = hweight16(phy->chainmask);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 0b75a45ad2e8..ca2dba3ac65d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -1431,7 +1431,7 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm);
+ unsigned int link_id, int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
int mt76_get_sar_power(struct mt76_phy *phy,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e96736cc7259..e7aa0f991923 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1669,7 +1669,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
}
static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
int ret;
struct wilc_vif *vif = netdev_priv(wdev->netdev);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 8b97accf6638..0b2282528342 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -881,7 +881,7 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm)
+ unsigned int link_id, int *dbm)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index b375a4751580..a377d85c2451 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -102,7 +102,7 @@ struct qtnf_wmac {
struct qtnf_mac_info macinfo;
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
- struct mutex mac_lock; /* lock during wmac speicific ops */
+ struct mutex mac_lock; /* lock during wmac specific ops */
struct delayed_work scan_timeout;
struct ieee80211_regdomain *rd;
struct platform_device *pdev;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index f95898f68d68..4ce0c05c5129 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -8147,6 +8147,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
@@ -8157,12 +8159,18 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
@@ -8179,6 +8187,10 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@@ -8193,6 +8205,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
@@ -8218,6 +8232,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
@@ -8226,6 +8242,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
@@ -8248,6 +8266,8 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index aab4605de9c4..ff61867d142f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -575,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw,
void rtl_deinit_core(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
rtl_c2hcmd_launcher(hw, 0);
rtl_free_entries_from_scan_list(hw);
rtl_free_entries_from_ack_queue(hw, false);
+ if (rtlpriv->works.rtl_wq) {
+ destroy_workqueue(rtlpriv->works.rtl_wq);
+ rtlpriv->works.rtl_wq = NULL;
+ }
}
EXPORT_SYMBOL_GPL(rtl_deinit_core);
@@ -2696,9 +2702,6 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-struct rtl_global_var rtl_global_var = {};
-EXPORT_SYMBOL_GPL(rtl_global_var);
-
static int __init rtl_core_module_init(void)
{
BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION);
@@ -2712,10 +2715,6 @@ static int __init rtl_core_module_init(void)
/* add debugfs */
rtl_debugfs_add_topdir();
- /* init some global vars */
- INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
- spin_lock_init(&rtl_global_var.glb_list_lock);
-
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index f081a9a90563..f3a6a43a42ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -124,7 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 11709b6c83f1..0eafc4d125f9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
return status;
}
-static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- struct rtl_priv *tpriv = NULL, *iter;
- struct rtl_pci_priv *tpcipriv = NULL;
-
- if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
- list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list,
- list) {
- tpcipriv = (struct rtl_pci_priv *)iter->priv;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
- pcipriv->ndis_adapter.funcnumber);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
- tpcipriv->ndis_adapter.funcnumber);
-
- if (pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber &&
- pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber &&
- pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber) {
- tpriv = iter;
- break;
- }
- }
- }
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", tpriv != NULL);
-
- if (tpriv)
- *buddy_priv = tpriv;
-
- return tpriv != NULL;
-}
-
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
struct ieee80211_hw *hw)
{
@@ -1696,8 +1656,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
-
- destroy_workqueue(rtlpriv->works.rtl_wq);
}
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
@@ -2011,7 +1969,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
- list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
return true;
}
@@ -2158,7 +2115,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &rtl_global_var;
rtl_efuse_ops_init(hw);
/* MEM map */
@@ -2209,7 +2165,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
pr_err("Can't init_sw_vars\n");
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
rtl_init_sw_leds(hw);
@@ -2227,14 +2183,14 @@ int rtl_pci_probe(struct pci_dev *pdev,
err = rtl_pci_init(hw, pdev);
if (err) {
pr_err("Failed to init PCI\n");
- goto fail3;
+ goto fail4;
}
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
err = -ENODEV;
- goto fail3;
+ goto fail5;
}
rtlpriv->mac80211.mac80211_registered = 1;
@@ -2257,16 +2213,19 @@ int rtl_pci_probe(struct pci_dev *pdev,
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
-fail3:
- pci_set_drvdata(pdev, NULL);
+fail5:
+ rtl_pci_deinit(hw);
+fail4:
rtl_deinit_core(hw);
+fail3:
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
- complete(&rtlpriv->firmware_loading_complete);
fail1:
if (hw)
@@ -2317,7 +2276,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
if (rtlpci->using_msi)
pci_disable_msi(rtlpci->pdev);
- list_del(&rtlpriv->list);
if (rtlpriv->io.pci_mem_start != 0) {
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
pci_release_regions(pdev);
@@ -2376,7 +2334,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
const struct rtl_intf_ops rtl_pci_ops = {
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
- .check_buddy_priv = rtl_pci_check_buddy_priv,
.adapter_tx = rtl_pci_tx,
.flush = rtl_pci_flush,
.reset_trx_ring = rtl_pci_reset_trx_ring,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index bbf8ff63dced..e63c67b1861b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -64,22 +64,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
rtlpriv->max_fw_size = 0;
release_firmware(firmware);
- return;
+ goto exit;
}
pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware;
memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
pfirmware->sz_fw_tmpbufferlen = firmware->size;
release_firmware(firmware);
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 1be51ea3f3c8..9eddbada8af1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -2033,8 +2033,10 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (!_rtl8821ae_check_condition(hw, v1)) {
i += 2; /* skip the pair of expression*/
v2 = array[i+1];
- while (v2 != 0xDEAD)
+ while (v2 != 0xDEAD) {
i += 3;
+ v2 = array[i + 1];
+ }
}
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index d37a017b2b81..f5718e570011 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -629,11 +629,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
tasklet_kill(&rtlusb->rx_work_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
- if (rtlpriv->works.rtl_wq) {
- destroy_workqueue(rtlpriv->works.rtl_wq);
- rtlpriv->works.rtl_wq = NULL;
- }
-
skb_queue_purge(&rtlusb->rx_queue);
while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
@@ -1028,19 +1023,22 @@ int rtl_usb_probe(struct usb_interface *intf,
err = ieee80211_register_hw(hw);
if (err) {
pr_err("Can't register mac80211 hw.\n");
- goto error_out;
+ goto error_init_vars;
}
rtlpriv->mac80211.mac80211_registered = 1;
set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
return 0;
+error_init_vars:
+ wait_for_completion(&rtlpriv->firmware_loading_complete);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
error_out:
+ rtl_usb_deinit(hw);
rtl_deinit_core(hw);
error_out2:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
- complete(&rtlpriv->firmware_loading_complete);
kfree(rtlpriv->usb_data);
ieee80211_free_hw(hw);
return -ENODEV;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index ae6e351bc83c..f1830ddcdd8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2270,8 +2270,6 @@ struct rtl_intf_ops {
/*com */
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
- bool (*check_buddy_priv)(struct ieee80211_hw *hw,
- struct rtl_priv **buddy_priv);
int (*adapter_tx)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -2514,14 +2512,6 @@ struct dig_t {
u32 rssi_max;
};
-struct rtl_global_var {
- /* from this list we can get
- * other adapter's rtl_priv
- */
- struct list_head glb_priv_list;
- spinlock_t glb_list_lock;
-};
-
#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */
struct rtl_btc_info {
@@ -2667,9 +2657,7 @@ struct rtl_scan_list {
struct rtl_priv {
struct ieee80211_hw *hw;
struct completion firmware_loading_complete;
- struct list_head list;
struct rtl_priv *buddy_priv;
- struct rtl_global_var *glb_var;
struct rtl_dmsp_ctl dmsp_ctl;
struct rtl_locks locks;
struct rtl_works works;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
index 4da69590a423..e18995f4cc78 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c
@@ -9,8 +9,74 @@
#include "usb.h"
static const struct usb_device_id rtw_8812au_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0409, 0x0408, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* NEC */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x025d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0952, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* I-O DATA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1106, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1109, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x3426, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ZyXEL */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Logitec */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Abocom */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9051, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Netgear */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17d2, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ASUS */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0074, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Sitecom */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0022, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Hawking */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0632, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* WD */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x003f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Linksys */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x148f, 0x9097, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Amped Wireless */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1740, 0x0100, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* EnGenius */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3313, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3315, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3316, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab30, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Planex */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TRENDnet */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0101, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0103, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0122, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Tenda */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa822, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
index 730018773e1c..a01744b64e8d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821au.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c
@@ -9,8 +9,58 @@
#include "usb.h"
static const struct usb_device_id rtw_8821au_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0820, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8822, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0823, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xa811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x0242, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x029b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0953, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* I-O DATA */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4007, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9052, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Netgear */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0023, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* HAWKING */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3314, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3318, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab32, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Planex */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x804b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TRENDnet */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011f, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0120, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3823, 0x6249, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Obihai */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa812, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa813, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb611, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index ab620a0b1dfc..8883300fc6ad 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -67,6 +67,12 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* LiteOn */
{ USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x808a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-808UBM */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-805UBH */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4011, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 8d6db68246f1..be193c7add77 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -789,6 +789,30 @@ static void rtw_usb_dynamic_rx_agg_v1(struct rtw_dev *rtwdev, bool enable)
rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
}
+static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ u8 size, timeout;
+ u16 val16;
+
+ if (!enable) {
+ size = 0x0;
+ timeout = 0x1;
+ } else if (rtwusb->udev->speed == USB_SPEED_SUPER) {
+ size = 0x6;
+ timeout = 0x1a;
+ } else {
+ size = 0x5;
+ timeout = 0x20;
+ }
+
+ val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) |
+ u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1);
+
+ rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16);
+ rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN);
+}
+
static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
{
switch (rtwdev->chip->id) {
@@ -797,6 +821,10 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
case RTW_CHIP_TYPE_8821C:
rtw_usb_dynamic_rx_agg_v1(rtwdev, enable);
break;
+ case RTW_CHIP_TYPE_8821A:
+ case RTW_CHIP_TYPE_8812A:
+ rtw_usb_dynamic_rx_agg_v2(rtwdev, enable);
+ break;
case RTW_CHIP_TYPE_8723D:
/* Doesn't like aggregation. */
break;
@@ -930,6 +958,32 @@ static void rtw_usb_intf_deinit(struct rtw_dev *rtwdev,
usb_set_intfdata(intf, NULL);
}
+static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ enum usb_device_speed cur_speed = rtwusb->udev->speed;
+ u8 hci_opt;
+
+ if (cur_speed == USB_SPEED_HIGH) {
+ hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL);
+
+ if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) {
+ rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8);
+ rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2);
+ rtw_write8(rtwdev, REG_ACLK_MON, 0x1);
+ rtw_write8(rtwdev, 0x3d, 0x3);
+ /* usb disconnect */
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80);
+ return 1;
+ }
+ } else if (cur_speed == USB_SPEED_SUPER) {
+ rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1));
+ rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0));
+ }
+
+ return 0;
+}
+
static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
{
enum usb_device_speed cur_speed;
@@ -979,11 +1033,22 @@ static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev)
return 1;
}
+static bool rtw_usb3_chip_old(u8 chip_id)
+{
+ return chip_id == RTW_CHIP_TYPE_8812A;
+}
+
+static bool rtw_usb3_chip_new(u8 chip_id)
+{
+ return chip_id == RTW_CHIP_TYPE_8822C ||
+ chip_id == RTW_CHIP_TYPE_8822B;
+}
+
static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
{
u8 id = rtwdev->chip->id;
- if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B)
+ if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id))
return 0;
if (!rtwdev->efuse.usb_mode_switch) {
@@ -998,7 +1063,10 @@ static int rtw_usb_switch_mode(struct rtw_dev *rtwdev)
return 0;
}
- return rtw_usb_switch_mode_new(rtwdev);
+ if (rtw_usb3_chip_old(id))
+ return rtw_usb_switch_mode_old(rtwdev);
+ else
+ return rtw_usb_switch_mode_new(rtwdev);
}
int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 908e980a4b72..f5dedb12c129 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -148,3 +148,50 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
ACPI_FREE(obj);
return ret;
}
+
+int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
+ struct rtw89_acpi_rtag_result *res)
+{
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_handle root, handle;
+ union acpi_object *obj;
+ acpi_status status;
+ u32 buf_len;
+ int ret = 0;
+
+ root = ACPI_HANDLE(rtwdev->dev);
+ if (!root)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root, (acpi_string)"RTAG", &handle);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = buf.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len != sizeof(*res)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res));
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index d274be1775bf..b43ab106e44d 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -63,8 +63,17 @@ struct rtw89_acpi_dsm_result {
} u;
};
+struct rtw89_acpi_rtag_result {
+ u8 tag[4];
+ u8 revision;
+ __le32 domain;
+ u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
+} __packed;
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res);
+int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev,
+ struct rtw89_acpi_rtag_result *res);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 8ef59994c0db..8fa1e6c1ce13 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -135,8 +135,8 @@ again:
}
static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
- struct rtw89_sec_cam_entry *sec_cam,
- struct ieee80211_key_conf *key,
+ const struct rtw89_sec_cam_entry *sec_cam,
+ const struct ieee80211_key_conf *key,
u8 *key_idx)
{
u8 idx;
@@ -246,8 +246,8 @@ static int __rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
- struct ieee80211_key_conf *key,
- struct rtw89_sec_cam_entry *sec_cam)
+ const struct ieee80211_key_conf *key,
+ const struct rtw89_sec_cam_entry *sec_cam)
{
struct rtw89_addr_cam_entry *addr_cam;
u8 key_idx = 0;
@@ -286,6 +286,22 @@ static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return 0;
}
+int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ u8 sec_cam_idx)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ const struct rtw89_sec_cam_entry *sec_cam;
+
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
+ if (!sec_cam)
+ return -ENOENT;
+
+ return __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link, rtwsta_link,
+ sec_cam->key_conf, sec_cam);
+}
+
static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -306,6 +322,9 @@ static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
rtwvif = vif_to_rtwvif(vif);
+ if (rtwsta)
+ clear_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
+
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
rtwsta_link = rtwsta ? rtwsta->links[link_id] : NULL;
if (rtwsta && !rtwsta_link)
@@ -369,6 +388,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return ret;
}
+ set_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map);
+
return 0;
}
@@ -410,6 +431,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
sec_cam->len = RTW89_SEC_CAM_LEN;
sec_cam->ext_key = ext_key;
memcpy(sec_cam->key, key->key, key->keylen);
+
+ sec_cam->key_conf = key;
+
ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam);
if (ret) {
rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret);
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 3134ebf08825..8fd2d776408e 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -578,4 +578,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev);
+int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+ u8 sec_cam_idx);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index e5b2968c1431..f848185e2ced 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -203,6 +203,55 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
},
};
+#define RTW89_6GHZ_SPAN_HEAD 6145
+#define RTW89_6GHZ_SPAN_IDX(center_freq) \
+ ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2)
+
+#define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \
+ [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \
+ .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
+ .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
+ .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \
+ .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \
+ }
+
+/* Since 6GHz subbands are not edge aligned, some cases span two subbands.
+ * In the following, we describe each of them with rtw89_6ghz_span.
+ */
+static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = {
+ RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
+ RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
+ RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
+ RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
+ RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
+};
+
+const struct rtw89_6ghz_span *
+rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
+{
+ int idx;
+
+ if (center_freq >= RTW89_6GHZ_SPAN_HEAD) {
+ idx = RTW89_6GHZ_SPAN_IDX(center_freq);
+ /* To decrease size of rtw89_overlapping_6ghz[],
+ * RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs
+ * to make first span as index 0 of the table. So, if center
+ * frequency is less than the first one, it will get netative.
+ */
+ if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz))
+ return &rtw89_overlapping_6ghz[idx];
+ }
+
+ return NULL;
+}
+
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
@@ -2140,6 +2189,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (phy_ppdu)
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
+
+ pkt_stat->beacon_rate = desc_info->data_rate;
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@@ -2317,6 +2368,12 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
+{
+ if (!rx_status->signal)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2333,6 +2390,8 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
+ rtw89_core_validate_rx_signal(rx_status);
+
/* In low power mode, it does RX in thread context. */
local_bh_disable();
ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
@@ -2468,6 +2527,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
u8 *data, u32 data_offset)
{
+ struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt;
struct rtw89_rxdesc_short_v2 *rxd_s;
struct rtw89_rxdesc_long_v2 *rxd_l;
u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
@@ -2515,6 +2575,12 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2);
desc_info->ready = true;
+ if (phy_rtp_len == sizeof(*rxd_rpt)) {
+ rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset +
+ desc_info->rxd_len);
+ desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI);
+ }
+
if (!desc_info->long_rxdesc)
return;
@@ -2657,6 +2723,7 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->flag |= RX_FLAG_MACTIME_START;
rx_status->mactime = desc_info->free_run_cnt;
+ rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
}
@@ -2664,10 +2731,6 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- /* FIXME: Fix __rtw89_enter_ps_mode() to consider MLO cases. */
- if (rtwdev->support_mlo)
- return RTW89_PS_MODE_NONE;
-
if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
@@ -2700,6 +2763,41 @@ static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
}
}
+static
+void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb,
+ const struct rtw89_rx_desc_info *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rtw89_sta_link *rtwsta_link;
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ u8 macid = desc->mac_id;
+
+ if (!refcount_read(&rtwdev->refcount_ap_info))
+ return;
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
+ if (!rtwsta_link)
+ goto out;
+
+ rtwsta = rtwsta_link->rtwsta;
+ if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags))
+ goto out;
+
+ sta = rtwsta_to_sta(rtwsta);
+ if (ieee80211_is_pspoll(hdr->frame_control))
+ ieee80211_sta_pspoll(sta);
+ else if (ieee80211_has_pm(hdr->frame_control) &&
+ (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))
+ ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr));
+
+out:
+ rcu_read_unlock();
+}
+
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb)
@@ -2722,6 +2820,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
rx_status = IEEE80211_SKB_RXCB(skb);
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
+ rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info);
if (desc_info->long_rxdesc &&
BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
@@ -3131,6 +3230,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@@ -3146,7 +3246,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
goto out;
}
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, qos);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos);
if (!skb) {
ret = -ENOMEM;
goto out;
@@ -3367,21 +3467,10 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
return tfc_changed;
}
-static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
-{
- if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
- rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
- return;
-
- rtw89_enter_lps(rtwdev, rtwvif_link, true);
-}
-
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
{
- struct rtw89_vif_link *rtwvif_link;
+ struct ieee80211_vif *vif;
struct rtw89_vif *rtwvif;
- unsigned int link_id;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (rtwvif->tdls_peer)
@@ -3393,8 +3482,13 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE)
continue;
- rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- rtw89_vif_enter_lps(rtwdev, rtwvif_link);
+ vif = rtwvif_to_vif(rtwvif);
+
+ if (!(vif->type == NL80211_IFTYPE_STATION ||
+ vif->type == NL80211_IFTYPE_P2P_CLIENT))
+ continue;
+
+ rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@@ -3699,6 +3793,8 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
{
const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ rtw89_assoc_link_clr(rtwsta_link);
+
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
@@ -3748,6 +3844,22 @@ int rtw89_core_sta_link_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
+static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ieee80211_link_sta *link_sta)
+{
+ if (!bss_conf->he_support ||
+ bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE)
+ return false;
+
+ if (rtwdev->chip->chip_id == RTL8852C &&
+ rtw89_sta_link_has_su_mu_4xhe08(link_sta) &&
+ !rtw89_sta_link_has_er_su_4xhe08(link_sta))
+ return false;
+
+ return true;
+}
+
int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link)
@@ -3758,12 +3870,11 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtwsta_link);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
+ struct ieee80211_link_sta *link_sta;
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
if (sta->tdls) {
- struct ieee80211_link_sta *link_sta;
-
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
@@ -3814,9 +3925,8 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
- if (bss_conf->he_support &&
- !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
- rtwsta_link->er_cap = true;
+ link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
+ rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta);
rcu_read_unlock();
@@ -3834,6 +3944,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
}
+ rtw89_assoc_link_set(rtwsta_link);
return ret;
}
@@ -4433,6 +4544,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_phy_dm_init(rtwdev);
rtw89_mac_cfg_ppdu_status_bands(rtwdev, true);
+ rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
rtw89_tas_reset(rtwdev);
@@ -4755,6 +4867,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_ser_init(rtwdev);
rtw89_entity_init(rtwdev);
rtw89_tas_init(rtwdev);
+ rtw89_phy_ant_gain_init(rtwdev);
return 0;
}
@@ -5100,6 +5213,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
+ ieee80211_hw_set(hw, AP_LINK_PS);
+
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 5ad32eacd0d5..155538370a89 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -830,6 +830,7 @@ enum rtw89_phy_idx {
};
#define __RTW89_MLD_MAX_LINK_NUM 2
+#define RTW89_MLD_NON_STA_LINK_NUM 1
enum rtw89_chanctx_idx {
RTW89_CHANCTX_0 = 0,
@@ -1083,6 +1084,7 @@ struct rtw89_rx_desc_info {
u16 offset;
u16 rxd_len;
bool ready;
+ u16 rssi;
};
struct rtw89_rxdesc_short {
@@ -1125,6 +1127,11 @@ struct rtw89_rxdesc_long_v2 {
__le32 dword9;
} __packed;
+struct rtw89_rxdesc_phy_rpt_v2 {
+ __le32 dword0;
+ __le32 dword1;
+} __packed;
+
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
@@ -3358,6 +3365,8 @@ struct rtw89_sec_cam_entry {
u8 spp_mode : 1;
/* 256 bits */
u8 key[32];
+
+ struct ieee80211_key_conf *key_conf;
};
struct rtw89_sta_link {
@@ -3621,6 +3630,9 @@ struct rtw89_chip_ops {
struct ieee80211_rx_status *status);
void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
+ void (*phy_rpt_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
@@ -4255,6 +4267,7 @@ struct rtw89_chip_info {
u16 support_bandwidths;
bool support_unii4;
bool support_rnr;
+ bool support_ant_gain;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
@@ -4296,6 +4309,7 @@ struct rtw89_chip_info {
const struct rtw89_rfe_parms *dflt_parms;
const struct rtw89_chanctx_listener *chanctx_listener;
+ u8 txpwr_factor_bb;
u8 txpwr_factor_rf;
u8 txpwr_factor_mac;
@@ -4448,8 +4462,12 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0,
RTW89_FW_FEATURE_WOW_REASON_V1,
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
+ RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
RTW89_FW_FEATURE_RFK_RXDCK_V0,
RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX,
+ RTW89_FW_FEATURE_NOTIFY_AP_INFO,
+ RTW89_FW_FEATURE_CH_INFO_BE_V0,
+ RTW89_FW_FEATURE_LPS_CH_INFO,
};
struct rtw89_fw_suit {
@@ -4597,6 +4615,44 @@ struct rtw89_sar_info {
};
};
+enum rtw89_ant_gain_subband {
+ RTW89_ANT_GAIN_2GHZ_SUBBAND,
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_1, /* U-NII-1 */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_2, /* U-NII-2 */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */
+ RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_6, /* U-NII-6 */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */
+ RTW89_ANT_GAIN_6GHZ_SUBBAND_8, /* U-NII-8 */
+
+ RTW89_ANT_GAIN_SUBBAND_NR,
+};
+
+enum rtw89_ant_gain_domain_type {
+ RTW89_ANT_GAIN_ETSI = 0,
+
+ RTW89_ANT_GAIN_DOMAIN_NUM,
+};
+
+#define RTW89_ANT_GAIN_CHAIN_NUM 2
+struct rtw89_ant_gain_info {
+ s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
+ u32 regd_enabled;
+};
+
+struct rtw89_6ghz_span {
+ enum rtw89_sar_subband sar_subband_low;
+ enum rtw89_sar_subband sar_subband_high;
+ enum rtw89_ant_gain_subband ant_gain_subband_low;
+ enum rtw89_ant_gain_subband ant_gain_subband_high;
+};
+
+#define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high)
+#define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high)
+
enum rtw89_tas_state {
RTW89_TAS_STATE_DPR_OFF,
RTW89_TAS_STATE_DPR_ON,
@@ -4781,6 +4837,7 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
+ u8 beacon_rate;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
@@ -5556,6 +5613,9 @@ struct rtw89_dev {
struct rtw89_rfe_data *rfe_data;
enum rtw89_custid custid;
+ struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM];
+ refcount_t refcount_ap_info;
+
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
struct list_head rtwvifs_list;
@@ -5636,6 +5696,7 @@ struct rtw89_dev {
struct rtw89_regulatory_info regulatory;
struct rtw89_sar_info sar;
struct rtw89_tas_info tas;
+ struct rtw89_ant_gain_info ant_gain;
struct rtw89_btc btc;
enum rtw89_ps_mode ps_mode;
@@ -5654,10 +5715,17 @@ struct rtw89_dev {
u8 priv[] __aligned(sizeof(void *));
};
+struct rtw89_link_conf_container {
+ struct ieee80211_bss_conf *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+#define RTW89_VIF_IDLE_LINK_ID 0
+
struct rtw89_vif {
struct rtw89_dev *rtwdev;
struct list_head list;
struct list_head mgnt_entry;
+ struct rtw89_link_conf_container __rcu *snap_link_confs;
u8 mac_addr[ETH_ALEN];
__be32 ip_addr;
@@ -5689,10 +5757,18 @@ static inline bool rtw89_vif_assign_link_is_valid(struct rtw89_vif_link **rtwvif
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \
if (rtw89_vif_assign_link_is_valid(&(rtwvif_link), rtwvif, link_id))
+enum rtw89_sta_flags {
+ RTW89_REMOTE_STA_IN_PS,
+
+ NUM_OF_RTW89_STA_FLAGS,
+};
+
struct rtw89_sta {
struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
+ DECLARE_BITMAP(flags, NUM_OF_RTW89_STA_FLAGS);
+
bool disassoc;
struct sk_buff_head roc_queue;
@@ -5700,6 +5776,8 @@ struct rtw89_sta {
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
+ DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+
u8 links_inst_valid_num;
DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM);
struct rtw89_sta_link *links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -5770,6 +5848,31 @@ u8 rtw89_sta_link_inst_get_index(struct rtw89_sta_link *rtwsta_link)
return rtwsta_link - rtwsta->links_inst;
}
+static inline void rtw89_assoc_link_set(struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+
+ rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
+ rtwsta_link);
+}
+
+static inline void rtw89_assoc_link_clr(struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+
+ rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id],
+ NULL);
+ synchronize_rcu();
+}
+
+static inline struct rtw89_sta_link *
+rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
+{
+ return rcu_dereference(rtwdev->assoc_link_on_macid[macid]);
+}
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6194,9 +6297,19 @@ static inline struct ieee80211_bss_conf *
__rtw89_vif_rcu_dereference_link(struct rtw89_vif_link *rtwvif_link, bool *nolink)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_link_conf_container *snap;
struct ieee80211_bss_conf *bss_conf;
+ snap = rcu_dereference(rtwvif->snap_link_confs);
+ if (snap) {
+ bss_conf = snap->link_conf[rtwvif_link->link_id];
+ goto out;
+ }
+
bss_conf = rcu_dereference(vif->link_conf[rtwvif_link->link_id]);
+
+out:
if (unlikely(!bss_conf)) {
*nolink = true;
return &vif->bss_conf;
@@ -6605,6 +6718,16 @@ static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
}
+static inline void rtw89_chip_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->phy_rpt_to_rssi)
+ chip->ops->phy_rpt_to_rssi(rtwdev, desc_info, rx_status);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6753,6 +6876,26 @@ bool rtw89_sta_has_beamformer_cap(struct ieee80211_link_sta *link_sta)
return false;
}
+static inline
+bool rtw89_sta_link_has_su_mu_4xhe08(struct ieee80211_link_sta *link_sta)
+{
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ return true;
+
+ return false;
+}
+
+static inline
+bool rtw89_sta_link_has_er_su_4xhe08(struct ieee80211_link_sta *link_sta)
+{
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ return true;
+
+ return false;
+}
+
static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev,
enum rtw89_fw_type type)
{
@@ -6908,6 +7051,8 @@ struct rtw89_sta_link *rtw89_sta_set_link(struct rtw89_sta *rtwsta,
unsigned int link_id);
void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+const struct rtw89_6ghz_span *
+rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 6abd88fa80ba..fd86752d86f3 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "sar.h"
@@ -882,6 +883,9 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
seq_puts(m, "[TAS]\n");
rtw89_print_tas(m, rtwdev);
+ seq_puts(m, "[DAG]\n");
+ rtw89_print_ant_gain(m, rtwdev, chan);
+
tbl = dbgfs_txpwr_tables[chip_gen];
if (!tbl) {
ret = -EOPNOTSUPP;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 2191c037d72e..90db15685728 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -709,11 +709,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
@@ -727,7 +729,11 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -2414,6 +2420,7 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
u8 *id)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_pktofld_info *info;
struct sk_buff *skb;
int ret;
@@ -2430,10 +2437,10 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
skb = ieee80211_proberesp_get(rtwdev->hw, vif);
break;
case RTW89_PKT_OFLD_TYPE_NULL_DATA:
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
break;
case RTW89_PKT_OFLD_TYPE_QOS_NULL:
- skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
+ skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
break;
case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
skb = rtw89_eapol_get(rtwdev, rtwvif_link);
@@ -2589,14 +2596,17 @@ fail:
return ret;
}
-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- rtwvif_link->chanctx_idx);
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan;
+ struct rtw89_vif_link *rtwvif_link;
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
+ unsigned int link_id;
struct sk_buff *skb;
+ bool no_chan = true;
+ u8 phy_idx;
u32 done;
int ret;
@@ -2611,11 +2621,27 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
- h2c->info[0].central_ch = chan->channel;
- h2c->info[0].pri_ch = chan->primary_channel;
- h2c->info[0].band = chan->band_type;
- h2c->info[0].bw = chan->band_width;
- h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ phy_idx = rtwvif_link->phy_idx;
+ if (phy_idx >= ARRAY_SIZE(h2c->info))
+ continue;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ no_chan = false;
+
+ h2c->info[phy_idx].central_ch = chan->channel;
+ h2c->info[phy_idx].pri_ch = chan->primary_channel;
+ h2c->info[phy_idx].band = chan->band_type;
+ h2c->info[phy_idx].bw = chan->band_width;
+ }
+
+ if (no_chan) {
+ rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
@@ -2640,6 +2666,87 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ml_cmn_info *h2c;
+ struct rtw89_vif_link *rtwvif_link;
+ const struct rtw89_chan *chan;
+ u8 bw_idx = RTW89_BB_BW_20_40;
+ u32 len = sizeof(*h2c);
+ unsigned int link_id;
+ struct sk_buff *skb;
+ u8 gain_band;
+ u32 done;
+ u8 path;
+ int ret;
+ int i;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
+
+ h2c->fmt_id = 0x1;
+
+ h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+
+ h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
+ h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
+ h2c->band[rtwvif_link->phy_idx] = chan->band_type;
+ h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
+ if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
+ h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
+ else
+ h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
+
+ /* Fill BW20 RX gain table for beacon mode */
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ h2c->tia_gain[rtwvif_link->phy_idx][i] =
+ cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
+ }
+ memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
+ gain->lna_gain[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
+
+ rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
+ true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
+ if (ret)
+ rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
@@ -4954,13 +5061,14 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem_be *elem;
struct rtw89_mac_chinfo_be *ch_info;
- struct rtw89_h2c_chinfo *h2c;
+ struct rtw89_h2c_chinfo_be *h2c;
struct sk_buff *skb;
unsigned int cond;
+ u8 ver = U8_MAX;
int skb_len;
int ret;
- static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
@@ -4969,8 +5077,11 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return -ENOMEM;
}
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
skb_put(skb, sizeof(*h2c));
- h2c = (struct rtw89_h2c_chinfo *)skb->data;
+ h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
h2c->ch_num = ch_num;
h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
@@ -4980,8 +5091,7 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
list_for_each_entry(ch_info, chan_list, list) {
elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
- elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
- le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
le32_encode_bits(ch_info->central_ch,
RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
@@ -5028,6 +5138,12 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
le32_encode_bits(ch_info->fw_probe0_bssids,
RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
+ if (ver == 0)
+ elem->w0 |=
+ le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
+ else
+ elem->w7 = le32_encode_bits(ch_info->period,
+ RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -5171,6 +5287,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 probe_id[NUM_NL80211_BANDS];
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
+ u8 ver = U8_MAX;
void *ptr;
int ret;
u32 len;
@@ -5191,6 +5308,9 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
if (!wowlan) {
list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
if (pkt_info->wildcard_6ghz) {
@@ -5286,9 +5406,7 @@ flex_member:
le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
- opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
- RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
- le32_encode_bits(op->band_type,
+ opch->w1 = le32_encode_bits(op->band_type,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
le32_encode_bits(op->band_width,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
@@ -5314,6 +5432,13 @@ flex_member:
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+
+ if (ver == 0)
+ opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
+ else
+ opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
ptr += sizeof(*opch);
}
@@ -5416,7 +5541,9 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info_common *common;
struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
+ struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
struct rtw89_fw_h2c_rfk_pre_info *h2c;
u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
u32 len = sizeof(*h2c);
@@ -5426,7 +5553,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
u32 val32;
int ret;
- if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
+ if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
+ len = sizeof(*h2c_v1);
+ ver = 1;
+ } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
len = sizeof(*h2c_v0);
ver = 0;
}
@@ -5438,17 +5568,18 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
}
skb_put(skb, len);
h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+ common = &h2c->base_v1.common;
- h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
- h2c->common.dbcc.ch[path][tbl] =
+ common->dbcc.ch[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
- h2c->common.dbcc.band[path][tbl] =
+ common->dbcc.band[path][tbl] =
cpu_to_le32(rfk_mcc->data[path].band[tbl]);
}
}
@@ -5456,13 +5587,19 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
tbl_sel[path] = rfk_mcc->data[path].table_idx;
- h2c->common.tbl.cur_ch[path] =
+ common->tbl.cur_ch[path] =
cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
- h2c->common.tbl.cur_band[path] =
+ common->tbl.cur_band[path] =
cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
+
+ if (ver <= 1)
+ continue;
+
+ h2c->cur_bandwidth[path] =
+ cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
}
- h2c->common.phy_idx = cpu_to_le32(phy_idx);
+ common->phy_idx = cpu_to_le32(phy_idx);
if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
@@ -5488,8 +5625,10 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
goto done;
}
- if (rtw89_is_mlo_1_1(rtwdev))
- h2c->mlo_1_1 = cpu_to_le32(1);
+ if (rtw89_is_mlo_1_1(rtwdev)) {
+ h2c_v1 = &h2c->base_v1;
+ h2c_v1->mlo_1_1 = cpu_to_le32(1);
+ }
done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
@@ -6496,7 +6635,7 @@ int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
- idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6547,7 +6686,7 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6624,7 +6763,7 @@ int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = 0, list_len = 0;
- idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = nd_config->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -6679,7 +6818,7 @@ int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&chan_list);
for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
- idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
idx++, list_len++) {
channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
@@ -8164,6 +8303,71 @@ int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
return 0;
}
+static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
+{
+ struct rtw89_h2c_ap_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ap_info *)skb->data;
+
+ h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_AP,
+ H2C_FUNC_AP_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
+{
+ int ret;
+
+ if (en) {
+ if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
+ return 0;
+ } else {
+ if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
+ return 0;
+ }
+
+ ret = rtw89_fw_h2c_ap_info(rtwdev, en);
+ if (ret) {
+ if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ return ret;
+
+ /* During recovery, neither driver nor stack has full error
+ * handling, so show a warning, but return 0 with refcount
+ * increased normally. It can avoid underflow when calling
+ * with @en == false later.
+ */
+ rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
+ }
+
+ if (en)
+ refcount_set(&rtwdev->refcount_ap_info, 1);
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index efa63d444821..a3fe183c2ab0 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -310,9 +310,12 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
+#define RTW89_MAC_CHINFO_SIZE_BE 32
#define RTW89_SCAN_LIST_GUARD 4
-#define RTW89_SCAN_LIST_LIMIT \
- ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
+#define RTW89_SCAN_LIST_LIMIT(size) \
+ ((RTW89_H2C_MAX_SIZE / (size)) - RTW89_SCAN_LIST_GUARD)
+#define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE)
+#define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE)
#define RTW89_BCN_LOSS_CNT 10
@@ -1780,6 +1783,21 @@ struct rtw89_h2c_lps_ch_info {
__le32 mlo_dbcc_mode_lps;
} __packed;
+struct rtw89_h2c_lps_ml_cmn_info {
+ u8 fmt_id;
+ u8 rsvd0[3];
+ __le32 mlo_dbcc_mode;
+ u8 central_ch[RTW89_PHY_MAX];
+ u8 pri_ch[RTW89_PHY_MAX];
+ u8 bw[RTW89_PHY_MAX];
+ u8 band[RTW89_PHY_MAX];
+ u8 bcn_rate_type[RTW89_PHY_MAX];
+ u8 rsvd1[2];
+ __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM];
+ u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM];
+ u8 rsvd2[2];
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2647,6 +2665,7 @@ struct rtw89_h2c_chinfo_elem_be {
__le32 w4;
__le32 w5;
__le32 w6;
+ __le32 w7;
} __packed;
#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
@@ -2678,6 +2697,7 @@ struct rtw89_h2c_chinfo_elem_be {
#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W7_PERIOD_V1 GENMASK(15, 0)
struct rtw89_h2c_chinfo {
u8 ch_num;
@@ -2687,6 +2707,14 @@ struct rtw89_h2c_chinfo {
struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
} __packed;
+struct rtw89_h2c_chinfo_be {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem_be elem[] __counted_by(ch_num);
+} __packed;
+
#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
@@ -2733,6 +2761,7 @@ struct rtw89_h2c_scanofld_be_opch {
__le32 w1;
__le32 w2;
__le32 w3;
+ __le32 w4;
} __packed;
#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
@@ -2754,6 +2783,7 @@ struct rtw89_h2c_scanofld_be_opch {
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1 GENMASK(15, 0)
struct rtw89_h2c_scanofld_be {
__le32 w0;
@@ -3466,6 +3496,12 @@ struct rtw89_h2c_wow_aoac {
__le32 w0;
} __packed;
+struct rtw89_h2c_ap_info {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_AP_INFO_W0_PWR_INT_EN BIT(0)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3590,6 +3626,7 @@ struct rtw89_c2h_scanofld {
__le32 w5;
__le32 w6;
__le32 w7;
+ __le32 w8;
} __packed;
#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
@@ -3604,6 +3641,8 @@ struct rtw89_c2h_scanofld {
#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
+#define RTW89_C2H_SCANOFLD_W8_PERIOD_V1 GENMASK(15, 0)
+#define RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1 GENMASK(31, 16)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3725,6 +3764,14 @@ struct rtw89_c2h_wow_aoac_report {
#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0)
+struct rtw89_c2h_pwr_int_notify {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_PWR_INT_NOTIFY_W2_MACID GENMASK(15, 0)
+#define RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS BIT(16)
+
struct rtw89_h2c_tx_duty {
__le32 w0;
__le32 w1;
@@ -4168,6 +4215,10 @@ enum rtw89_mrc_h2c_func {
#define RTW89_MRC_WAIT_COND_REQ_TSF \
RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+/* CLASS 36 - AP */
+#define H2C_CL_AP 0x24
+#define H2C_FUNC_AP_INFO 0x0
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
@@ -4175,6 +4226,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_CL_OUTSRC_DM 0x2
#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+#define H2C_FUNC_FW_LPS_ML_CMN_INFO 0xe
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
@@ -4241,11 +4293,16 @@ struct rtw89_fw_h2c_rfk_pre_info_v0 {
} __packed mlo;
} __packed;
-struct rtw89_fw_h2c_rfk_pre_info {
+struct rtw89_fw_h2c_rfk_pre_info_v1 {
struct rtw89_fw_h2c_rfk_pre_info_common common;
__le32 mlo_1_1;
} __packed;
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct rtw89_fw_h2c_rfk_pre_info_v1 base_v1;
+ __le32 cur_bandwidth[NUM_OF_RTW89_FW_RFK_PATH];
+} __packed;
+
struct rtw89_h2c_rf_tssi {
__le16 len;
u8 phy;
@@ -4602,8 +4659,9 @@ int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
-int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
bool enable);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
@@ -4697,6 +4755,7 @@ int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_sync_arg *arg);
int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mrc_upd_duration_arg *arg);
+int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 7907b84d204b..c78066fd4504 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -4788,9 +4788,11 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period, expect_period;
u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u16 actual_period, expect_period;
+ u8 reason, status, tx_fail, band;
u8 mac_idx, sw_def, fw_def;
+ u8 ver = U8_MAX;
u16 chan;
int ret;
@@ -4799,6 +4801,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtwvif = rtwvif_link->rtwvif;
+ if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
+ ver = 0;
+
tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
@@ -4811,21 +4816,28 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- mac_idx, band, chan, reason, status, tx_fail, actual_period);
-
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
- expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+ if (ver == 0) {
+ expect_period =
+ le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ } else {
+ actual_period = le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_PERIOD_V1);
+ expect_period =
+ le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1);
+ }
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
"sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
sw_def, fw_def, report_tsf, expect_period);
}
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
switch (reason) {
case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
@@ -5364,6 +5376,39 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(wait, cond, &data);
}
+static void
+rtw89_mac_c2h_pwr_int_notify(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len)
+{
+ const struct rtw89_c2h_pwr_int_notify *c2h;
+ struct rtw89_sta_link *rtwsta_link;
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ u16 macid;
+ bool ps;
+
+ c2h = (const struct rtw89_c2h_pwr_int_notify *)skb->data;
+ macid = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_MACID);
+ ps = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS);
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid);
+ if (unlikely(!rtwsta_link))
+ goto out;
+
+ rtwsta = rtwsta_link->rtwsta;
+ if (ps)
+ set_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
+ else
+ clear_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags);
+
+ sta = rtwsta_to_sta(rtwsta);
+ ieee80211_sta_ps_transition(sta, ps);
+
+out:
+ rcu_read_unlock();
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5409,6 +5454,12 @@ void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt,
};
+static
+void (* const rtw89_mac_c2h_ap_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY] = rtw89_mac_c2h_pwr_int_notify,
+};
+
static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@@ -5463,6 +5514,13 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
return true;
case RTW89_MAC_C2H_CLASS_WOW:
return true;
+ case RTW89_MAC_C2H_CLASS_AP:
+ switch (func) {
+ default:
+ return false;
+ case RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY:
+ return true;
+ }
}
}
@@ -5493,14 +5551,18 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW)
handler = rtw89_mac_c2h_wow_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_AP:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_AP)
+ handler = rtw89_mac_c2h_ap_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
- rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
return;
}
if (!handler) {
- rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
func);
return;
}
@@ -6674,6 +6736,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
+ .cfg_phy_rpt = NULL,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 18579c020548..8edea96d037f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -169,6 +169,20 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
+enum rtw89_mac_phy_rpt_size {
+ MAC_AX_PHY_RPT_SIZE_0 = 0,
+ MAC_AX_PHY_RPT_SIZE_8 = 1,
+ MAC_AX_PHY_RPT_SIZE_16 = 2,
+ MAC_AX_PHY_RPT_SIZE_24 = 3,
+};
+
+enum rtw89_mac_hdr_cnv_size {
+ MAC_AX_HDR_CNV_SIZE_0 = 0,
+ MAC_AX_HDR_CNV_SIZE_32 = 1,
+ MAC_AX_HDR_CNV_SIZE_64 = 2,
+ MAC_AX_HDR_CNV_SIZE_96 = 3,
+};
+
enum rtw89_mac_wow_fw_status {
WOWLAN_NOT_READY = 0x00,
WOWLAN_SLEEP_READY = 0x01,
@@ -426,6 +440,12 @@ enum rtw89_mac_c2h_wow_func {
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
+enum rtw89_mac_c2h_ap_func {
+ RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY = 0,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_AP,
+};
+
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO = 0x0,
RTW89_MAC_C2H_CLASS_OFLD = 0x1,
@@ -434,6 +454,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_MCC = 0x4,
RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
+ RTW89_MAC_C2H_CLASS_AP = 0x18,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -961,6 +982,7 @@ struct rtw89_mac_gen_def {
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+ void (*cfg_phy_rpt)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -1216,6 +1238,27 @@ int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
+
+static inline
+void rtw89_mac_cfg_phy_rpt(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ if (mac->cfg_phy_rpt)
+ mac->cfg_phy_rpt(rtwdev, mac_idx, enable);
+}
+
+static inline
+void rtw89_mac_cfg_phy_rpt_bands(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_0, enable);
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_1, enable);
+}
static inline
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 619d2d3771d5..5eac0b524060 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -202,7 +202,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
- rtwvif_link = rtw89_vif_set_link(rtwvif, 0);
+ rtwvif_link = rtw89_vif_set_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
if (!rtwvif_link) {
ret = -EINVAL;
goto release_port;
@@ -218,7 +218,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
return 0;
unset_link:
- rtw89_vif_unset_link(rtwvif, 0);
+ rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
release_port:
list_del_init(&rtwvif->list);
rtw89_core_release_bit_map(rtwdev->hw_port, port);
@@ -246,17 +246,17 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
- rtwvif_link = rtwvif->links[0];
+ rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
- __func__, 0);
+ __func__, RTW89_VIF_IDLE_LINK_ID);
goto bottom;
}
__rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
- rtw89_vif_unset_link(rtwvif, 0);
+ rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID);
bottom:
list_del_init(&rtwvif->list);
@@ -509,6 +509,7 @@ static int __rtw89_ops_sta_add(struct rtw89_dev *rtwdev,
rtw89_core_txq_init(rtwdev, sta->txq[i]);
skb_queue_head_init(&rtwsta->roc_queue);
+ bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
rtwsta_link = rtw89_sta_set_link(rtwsta, sta->deflink.link_id);
if (!rtwsta_link) {
@@ -775,6 +776,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
+ int ret = 0;
mutex_lock(&rtwdev->mutex);
@@ -783,6 +785,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
+ ret = -ENOLINK;
goto out;
}
@@ -804,12 +807,18 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL);
rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) {
+ ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true);
+ if (ret)
+ goto out;
+ }
+
rtw89_queue_chanctx_work(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
- return 0;
+ return ret;
}
static
@@ -830,6 +839,9 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
+ if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
+ rtw89_fw_h2c_ap_info_refcount(rtwdev, false);
+
rtw89_mac_stop_ap(rtwdev, rtwvif_link);
rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
@@ -1295,10 +1307,15 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct ieee80211_sta *sta = link_sta->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(link_sta->sta);
struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta_link *rtwsta_link;
- rtw89_phy_ra_update_sta(rtwdev, sta, changed);
+ rtwsta_link = rtwsta->links[link_sta->link_id];
+ if (unlikely(!rtwsta_link))
+ return;
+
+ rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
@@ -1473,6 +1490,259 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
return 0;
}
+static bool rtw89_can_work_on_links(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif, u16 links)
+{
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ u8 w = hweight16(links);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ w > RTW89_MLD_NON_STA_LINK_NUM)
+ return false;
+
+ return w <= rtwvif->links_inst_valid_num;
+}
+
+static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ return rtw89_can_work_on_links(rtwdev, vif, active_links);
+}
+
+static void __rtw89_ops_clr_vif_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ unsigned long clr_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwvif_link = rtwvif->links[link_id];
+ if (unlikely(!rtwvif_link))
+ continue;
+
+ __rtw89_ops_remove_iface_link(rtwdev, rtwvif_link);
+
+ rtw89_vif_unset_link(rtwvif, link_id);
+ }
+}
+
+static int __rtw89_ops_set_vif_links(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ unsigned long set_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ unsigned int link_id;
+ int ret;
+
+ for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwvif_link = rtw89_vif_set_link(rtwvif, link_id);
+ if (!rtwvif_link)
+ return -EINVAL;
+
+ ret = __rtw89_ops_add_iface_link(rtwdev, rtwvif_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add iface (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static
+int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ unsigned long clr_links = old_links & ~new_links;
+ unsigned long set_links = new_links & ~old_links;
+ bool removing_links = !old_links || clr_links;
+ struct rtw89_link_conf_container *snap;
+ int ret = 0;
+ int i;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE,
+ "%s: old_links (0x%08x) -> new_links (0x%08x)\n",
+ __func__, old_links, new_links);
+
+ if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
+ return -EOPNOTSUPP;
+
+ if (removing_links) {
+ snap = kzalloc(sizeof(*snap), GFP_KERNEL);
+ if (!snap)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(snap->link_conf); i++)
+ snap->link_conf[i] = old[i];
+
+ rcu_assign_pointer(rtwvif->snap_link_confs, snap);
+ }
+
+ /* might depend on @snap; don't change order */
+ rtw89_leave_ips_by_hwflags(rtwdev);
+
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+
+ if (!old_links)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ else if (clr_links)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif, clr_links);
+
+ if (removing_links) {
+ /* @snap is required if and only if during removing links.
+ * However, it's done here. So, cleanup @snap immediately.
+ */
+ rcu_assign_pointer(rtwvif->snap_link_confs, NULL);
+
+ /* The pointers in @old will free after this function return,
+ * so synchronously wait for all readers of snap to be done.
+ */
+ synchronize_rcu();
+ kfree(snap);
+ }
+
+ if (set_links) {
+ ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif, set_links);
+ if (ret)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif, set_links);
+ } else if (!new_links) {
+ ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ if (ret)
+ __rtw89_ops_clr_vif_links(rtwdev, rtwvif,
+ BIT(RTW89_VIF_IDLE_LINK_ID));
+ }
+
+ rtw89_enter_ips_by_hwflags(rtwdev);
+ return ret;
+}
+
+static void __rtw89_ops_clr_sta_links(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ unsigned long clr_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwsta_link = rtwsta->links[link_id];
+ if (unlikely(!rtwsta_link))
+ continue;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+
+ rtw89_core_sta_link_disassoc(rtwdev, rtwvif_link, rtwsta_link);
+ rtw89_core_sta_link_disconnect(rtwdev, rtwvif_link, rtwsta_link);
+ rtw89_core_sta_link_remove(rtwdev, rtwvif_link, rtwsta_link);
+
+ rtw89_sta_unset_link(rtwsta, link_id);
+ }
+}
+
+static int __rtw89_ops_set_sta_links(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ unsigned long set_links)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ unsigned int link_id;
+ u8 sec_cam_idx;
+ int ret;
+
+ for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rtwsta_link = rtw89_sta_set_link(rtwsta, link_id);
+ if (!rtwsta_link)
+ return -EINVAL;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+
+ ret = rtw89_core_sta_link_add(rtwdev, rtwvif_link, rtwsta_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add sta (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+
+ rtw89_vif_type_mapping(rtwvif_link, true);
+
+ ret = rtw89_core_sta_link_assoc(rtwdev, rtwvif_link, rtwsta_link);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to assoc sta (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+
+ __rtw89_ops_bss_link_assoc(rtwdev, rtwvif_link);
+
+ for_each_set_bit(sec_cam_idx, rtwsta->pairwise_sec_cam_map,
+ RTW89_MAX_SEC_CAM_NUM) {
+ ret = rtw89_cam_attach_link_sec_cam(rtwdev,
+ rtwvif_link,
+ rtwsta_link,
+ sec_cam_idx);
+ if (ret) {
+ rtw89_err(rtwdev,
+ "%s: failed to apply pairwise key (link id %u)\n",
+ __func__, link_id);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static
+int rtw89_ops_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ unsigned long clr_links = old_links & ~new_links;
+ unsigned long set_links = new_links & ~old_links;
+ int ret = 0;
+
+ guard(mutex)(&rtwdev->mutex);
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE,
+ "%s: old_links (0x%08x) -> new_links (0x%08x)\n",
+ __func__, old_links, new_links);
+
+ if (!rtw89_can_work_on_links(rtwdev, vif, new_links))
+ return -EOPNOTSUPP;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ if (clr_links)
+ __rtw89_ops_clr_sta_links(rtwdev, rtwsta, clr_links);
+
+ if (set_links) {
+ ret = __rtw89_ops_set_sta_links(rtwdev, rtwsta, set_links);
+ if (ret)
+ __rtw89_ops_clr_sta_links(rtwdev, rtwsta, set_links);
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_PM
static int rtw89_ops_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
@@ -1600,6 +1870,9 @@ const struct ieee80211_ops rtw89_ops = {
.set_sar_specs = rtw89_ops_set_sar_specs,
.link_sta_rc_update = rtw89_ops_sta_rc_update,
.set_tid_config = rtw89_ops_set_tid_config,
+ .can_activate_links = rtw89_ops_can_activate_links,
+ .change_vif_links = rtw89_ops_change_vif_links,
+ .change_sta_links = rtw89_ops_change_sta_links,
#ifdef CONFIG_PM
.suspend = rtw89_ops_suspend,
.resume = rtw89_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f7a396c8a3cd..2dbdeae904ad 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -1988,6 +1988,20 @@ int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg, val;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RCR, mac_idx);
+ val = enable ? MAC_AX_PHY_RPT_SIZE_8 : MAC_AX_PHY_RPT_SIZE_0;
+ rtw89_write32_mask(rtwdev, reg, B_BE_PHY_RPT_SZ_MASK, val);
+ rtw89_write32_mask(rtwdev, reg, B_BE_HDR_CNV_SZ_MASK, MAC_AX_HDR_CNV_SIZE_0);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DRV_INFO_OPTION, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_DRV_INFO_PHYRPT_EN, enable);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_phy_rpt_be);
+
static
int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
@@ -2583,6 +2597,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
.cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
+ .cfg_phy_rpt = rtw89_mac_cfg_phy_rpt_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index f923bec03d41..c3a027735d0f 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -2516,7 +2516,7 @@ static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
}
-static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
{
if (pwr_up)
rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
@@ -2825,6 +2825,8 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
+ rtw89_pci_power_wake(rtwdev, false);
+
if (rtwdev->chip->chip_id == RTL8852A) {
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
@@ -2867,7 +2869,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_pci_power_wake(rtwdev, true);
+ rtw89_pci_power_wake_ax(rtwdev, true);
rtw89_pci_autoload_hang(rtwdev);
rtw89_pci_l12_vmain(rtwdev);
rtw89_pci_gen2_force_ib(rtwdev);
@@ -2912,6 +2914,13 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
+{
+ rtw89_pci_power_wake_ax(rtwdev, false);
+
+ return 0;
+}
+
int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
{
u32 val;
@@ -4325,7 +4334,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
B_AX_RDU_INT},
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
- .mac_pre_deinit = NULL,
+ .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
.clr_idx_all = rtw89_pci_clr_idx_all_ax,
@@ -4343,6 +4352,7 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.l1ss_set = rtw89_pci_l1ss_set_ax,
.disable_eq = rtw89_pci_disable_eq_ax,
+ .power_wake = rtw89_pci_power_wake_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index b68e2d82eea9..d52db4ca1b99 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -1290,6 +1290,7 @@ struct rtw89_pci_gen_def {
void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
void (*disable_eq)(struct rtw89_dev *rtwdev);
+ void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up);
};
#define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \
@@ -1805,4 +1806,12 @@ static inline void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
gen_def->disable_eq(rtwdev);
}
+static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ gen_def->power_wake(rtwdev, pwr_up);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 34154506f5d4..cd39eebe8186 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -691,5 +691,6 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.l1ss_set = rtw89_pci_l1ss_set_be,
.disable_eq = rtw89_pci_disable_eq_be,
+ .power_wake = _patch_pcie_power_wake_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index f24aca663cf0..4e3754fd18fd 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "acpi.h"
#include "chan.h"
#include "coex.h"
#include "debug.h"
@@ -263,16 +264,26 @@ rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link,
+ struct ieee80211_link_sta *link_sta,
const struct rtw89_chan *chan,
bool *fix_giltf_en, u8 *fix_giltf)
{
struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
- u8 he_gi = mask->control[nl_band].he_gi;
u8 he_ltf = mask->control[nl_band].he_ltf;
+ u8 he_gi = mask->control[nl_band].he_gi;
- if (!rtwsta_link->use_cfg_mask)
+ *fix_giltf_en = true;
+
+ if (rtwdev->chip->chip_id == RTL8852C &&
+ chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
+ rtw89_sta_link_has_su_mu_4xhe08(link_sta))
+ *fix_giltf = RTW89_GILTF_SGI_4XHE08;
+ else
+ *fix_giltf = RTW89_GILTF_2XHE08;
+
+ if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he))
return;
if (he_ltf == 2 && he_gi == 2) {
@@ -287,12 +298,7 @@ static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
*fix_giltf = RTW89_GILTF_1XHE16;
} else if (he_ltf == 0 && he_gi == 0) {
*fix_giltf = RTW89_GILTF_1XHE08;
- } else {
- *fix_giltf_en = false;
- return;
}
-
- *fix_giltf_en = true;
}
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
@@ -325,6 +331,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
mode |= RTW89_RA_MODE_EHT;
ra_mask |= get_eht_ra_mask(link_sta);
high_rate_masks = rtw89_ra_mask_eht_rates;
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
+ chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->he_cap.has_he) {
mode |= RTW89_RA_MODE_HE;
csi_mode = RTW89_RA_RPT_MODE_HE;
@@ -336,7 +344,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
- rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf);
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
+ chan, &fix_giltf_en, &fix_giltf);
} else if (link_sta->vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
@@ -466,11 +475,11 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->csi_mode = csi_mode;
}
-static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct rtw89_sta_link *rtwsta_link,
- u32 changed)
+void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link,
+ u32 changed)
{
+ struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_ra_info *ra = &rtwsta_link->ra;
struct ieee80211_link_sta *link_sta;
@@ -503,14 +512,11 @@ void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta
u32 changed)
{
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
- struct rtw89_vif_link *rtwvif_link;
struct rtw89_sta_link *rtwsta_link;
unsigned int link_id;
- rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
- rtwvif_link = rtwsta_link->rtwvif_link;
- __rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed);
- }
+ rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
+ rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
}
static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
@@ -1854,6 +1860,228 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
+static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
+{
+ switch (ant_gain_regd) {
+ case RTW89_ANT_GAIN_ETSI:
+ return RTW89_ETSI;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "unknown antenna gain domain: %d\n",
+ ant_gain_regd);
+ return RTW89_REGD_NUM;
+ }
+}
+
+/* antenna gain in unit of 0.25 dbm */
+#define RTW89_ANT_GAIN_2GHZ_MIN -8
+#define RTW89_ANT_GAIN_2GHZ_MAX 14
+#define RTW89_ANT_GAIN_5GHZ_MIN -8
+#define RTW89_ANT_GAIN_5GHZ_MAX 20
+#define RTW89_ANT_GAIN_6GHZ_MIN -8
+#define RTW89_ANT_GAIN_6GHZ_MAX 20
+
+#define RTW89_ANT_GAIN_REF_2GHZ 14
+#define RTW89_ANT_GAIN_REF_5GHZ 20
+#define RTW89_ANT_GAIN_REF_6GHZ 20
+
+void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_acpi_rtag_result res = {};
+ u32 domain;
+ int ret;
+ u8 i, j;
+ u8 regd;
+ u8 val;
+
+ if (!chip->support_ant_gain)
+ return;
+
+ ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "acpi: cannot eval rtag: %d\n", ret);
+ return;
+ }
+
+ if (res.revision != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "unknown rtag revision: %d\n", res.revision);
+ return;
+ }
+
+ domain = get_unaligned_le32(&res.domain);
+
+ for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
+ if (!(domain & BIT(i)))
+ continue;
+
+ regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
+ if (regd >= RTW89_REGD_NUM)
+ continue;
+ ant_gain->regd_enabled |= BIT(regd);
+ }
+
+ for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
+ for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
+ val = res.ant_gain_table[i][j];
+ switch (j) {
+ default:
+ case RTW89_ANT_GAIN_2GHZ_SUBBAND:
+ val = RTW89_ANT_GAIN_REF_2GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_2GHZ_MIN,
+ RTW89_ANT_GAIN_2GHZ_MAX);
+ break;
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
+ case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
+ val = RTW89_ANT_GAIN_REF_5GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_5GHZ_MIN,
+ RTW89_ANT_GAIN_5GHZ_MAX);
+ break;
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
+ case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
+ val = RTW89_ANT_GAIN_REF_6GHZ -
+ clamp_t(s8, val,
+ RTW89_ANT_GAIN_6GHZ_MIN,
+ RTW89_ANT_GAIN_6GHZ_MAX);
+ }
+ ant_gain->offset[i][j] = val;
+ }
+ }
+}
+
+static
+enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
+ u32 center_freq)
+{
+ switch (center_freq) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "center freq: %u to antenna gain subband is unhandled\n",
+ center_freq);
+ fallthrough;
+ case 2412 ... 2484:
+ return RTW89_ANT_GAIN_2GHZ_SUBBAND;
+ case 5180 ... 5240:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
+ case 5250 ... 5320:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
+ case 5500 ... 5720:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
+ case 5745 ... 5885:
+ return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
+ case 5955 ... 6155:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
+ case 6175 ... 6415:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
+ case 6435 ... 6515:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
+ case 6535 ... 6695:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
+ case 6715 ... 6855:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
+
+ /* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
+ * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
+ * struct rtw89_6ghz_span.
+ */
+
+ case 6895 ... 7115:
+ return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
+ }
+}
+
+static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u32 center_freq)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ enum rtw89_ant_gain_subband subband_l, subband_h;
+ const struct rtw89_6ghz_span *span;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
+
+ if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
+ subband_l = span->ant_gain_subband_low;
+ subband_h = span->ant_gain_subband_high;
+ } else {
+ subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
+ subband_h = subband_l;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "center_freq %u: antenna gain subband {%u, %u}\n",
+ center_freq, subband_l, subband_h);
+
+ return min(ant_gain->offset[path][subband_l],
+ ant_gain->offset[path][subband_h]);
+}
+
+static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ s8 offset_patha, offset_pathb;
+
+ if (!chip->support_ant_gain)
+ return 0;
+
+ if (!(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
+
+ return max(offset_patha, offset_pathb);
+}
+
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ s8 offset_patha, offset_pathb;
+
+ if (!(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
+
+ return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
+}
+EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
+
+void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ s8 offset_patha, offset_pathb;
+
+ if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) {
+ seq_puts(m, "no DAG is applied\n");
+ return;
+ }
+
+ offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
+ offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
+
+ seq_printf(m, "ChainA offset: %d dBm\n", offset_patha);
+ seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb);
+}
+
static const u8 rtw89_rs_idx_num_ax[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
@@ -1917,20 +2145,6 @@ void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
-static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
-}
-
-static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
-}
-
static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
{
const u8 tssi_deviation_point = 0;
@@ -2027,7 +2241,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt = 0, sar;
+ s8 lmt = 0, sar, offset;
s8 cstr;
switch (band) {
@@ -2059,7 +2273,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
+ offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
+ lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@@ -2286,7 +2501,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
- s8 lmt_ru = 0, sar;
+ s8 lmt_ru = 0, sar, offset;
s8 cstr;
switch (band) {
@@ -2318,7 +2533,8 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
return 0;
}
- lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
+ offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq);
+ lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset);
sar = rtw89_query_sar(rtwdev, freq);
cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
@@ -3228,10 +3444,16 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
(int)(len - sizeof(report->hdr)), &report->state);
}
+static void
+rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
+ [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@@ -3285,11 +3507,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
fallthrough;
default:
- rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
}
if (!handler) {
- rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
func);
return;
}
@@ -4058,7 +4280,6 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
if (!force && cfo->crystal_cap == crystal_cap)
return;
- crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
@@ -4181,7 +4402,7 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
s32 curr_cfo)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- s8 crystal_cap = cfo->crystal_cap;
+ int crystal_cap = cfo->crystal_cap;
s32 cfo_abs = abs(curr_cfo);
int sign;
@@ -4202,15 +4423,17 @@ static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
}
sign = curr_cfo > 0 ? 1 : -1;
if (cfo_abs > CFO_TRK_STOP_TH_4)
- crystal_cap += 7 * sign;
+ crystal_cap += 3 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_3)
- crystal_cap += 5 * sign;
- else if (cfo_abs > CFO_TRK_STOP_TH_2)
crystal_cap += 3 * sign;
+ else if (cfo_abs > CFO_TRK_STOP_TH_2)
+ crystal_cap += 1 * sign;
else if (cfo_abs > CFO_TRK_STOP_TH_1)
crystal_cap += 1 * sign;
else
return;
+
+ crystal_cap = clamp(crystal_cap, 0, 127);
rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
rtw89_debug(rtwdev, RTW89_DBG_CFO,
"X_cap{Curr,Default}={0x%x,0x%x}\n",
@@ -6310,6 +6533,12 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_cfg_txrx_path(rtwdev);
}
+void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_physts_parsing_init(rtwdev);
+}
+
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index c683f4d7d29b..697ee47fe325 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -57,7 +57,7 @@
#define CFO_TRK_STOP_TH_4 (30 << 2)
#define CFO_TRK_STOP_TH_3 (20 << 2)
#define CFO_TRK_STOP_TH_2 (10 << 2)
-#define CFO_TRK_STOP_TH_1 (00 << 2)
+#define CFO_TRK_STOP_TH_1 (03 << 2)
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
@@ -151,6 +151,7 @@ enum rtw89_phy_c2h_rfk_log_func {
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
+ RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@@ -813,6 +814,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
+void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@@ -826,6 +828,11 @@ s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
const struct rtw89_rate_desc *desc);
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc);
+void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev);
+s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
+void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
@@ -896,10 +903,34 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
+static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
+}
+
+static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
+}
+
+static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
+}
+
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
u32 changed);
+void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link,
+ u32 changed);
void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index c1c12abc2ea9..96ea04d90cd3 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -8,6 +8,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@@ -62,11 +63,8 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
{
- if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
- return;
-
if (!rtwdev->ps_mode)
return;
@@ -85,8 +83,8 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_lps_parm lps_param = {
.macid = rtwvif_link->mac_id,
@@ -96,7 +94,6 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
- rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
@@ -121,17 +118,32 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode)
{
+ struct rtw89_vif_link *rtwvif_link;
+ bool can_ps_mode = true;
+ unsigned int link_id;
+
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif_link);
- if (ps_mode)
- __rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ __rtw89_enter_lps_link(rtwdev, rtwvif_link);
+
+ if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ can_ps_mode = false;
+ }
+
+ if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw))
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
+ else
+ rtw89_fw_h2c_lps_ml_cmn_info(rtwdev, rtwvif);
+
+ if (ps_mode && can_ps_mode)
+ __rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev,
@@ -157,6 +169,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
+ rtw89_phy_dm_reinit(rtwdev);
+
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
rtw89_leave_lps_vif(rtwdev, rtwvif_link);
@@ -282,12 +296,6 @@ void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
enum rtw89_entity_mode mode;
int count = 0;
- /* FIXME: Fix rtw89_enter_lps() and __rtw89_enter_ps_mode()
- * to take MLO cases into account before doing the following.
- */
- if (rtwdev->support_mlo)
- goto disable_lps;
-
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
goto disable_lps;
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index cdd712966b09..2b88f254a32d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -5,11 +5,11 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool ps_mode);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
-void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
+void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 18ec7c0252fb..10d0efa7a58e 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -7447,6 +7447,10 @@
#define B_BE_CSIPRT_HESU_AID_EN BIT(25)
#define B_BE_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_BE_DRV_INFO_OPTION 0x11470
+#define R_BE_DRV_INFO_OPTION_C1 0x15470
+#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
+
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index cad5189708e7..80b2f74589eb 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -17,7 +17,7 @@ static const struct rtw89_regd rtw89_ww_regd =
static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
@@ -35,7 +35,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -72,7 +72,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -82,13 +82,13 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -101,7 +101,7 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
@@ -110,12 +110,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI),
- COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND),
+ COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND),
COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
@@ -158,9 +158,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
@@ -176,12 +176,12 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -194,19 +194,19 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -216,15 +216,15 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
@@ -237,9 +237,9 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
@@ -247,13 +247,16 @@ static const struct rtw89_regd rtw89_regd_map[] = {
COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC),
COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA),
+ COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 68c67a763f4d..c56f70267882 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2298,7 +2298,8 @@ static void rtw8851b_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]);
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]);
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
@@ -2391,6 +2392,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
@@ -2464,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.nctl_post_table = &rtw8851b_nctl_post_defs_tbl,
.dflt_parms = &rtw89_8851b_dflt_parms,
.rfe_parms_conf = rtw89_8851b_rfe_parms_conf,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2479,6 +2482,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 364e36354225..f72b3ac6f149 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -2199,7 +2199,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (dgain > 0x5fc || dgain < 0x556) {
_dpk_one_shot(rtwdev, phy, path, D_SYNC);
- dgain = _dpk_dgain_read(rtwdev);
+ _dpk_dgain_read(rtwdev);
}
if (agc_cnt == 0) {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index e647759ebd69..9bd2842c27d5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2068,7 +2068,9 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2116,6 +2118,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
@@ -2181,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852a_dflt_parms,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
@@ -2196,6 +2200,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 49a319128316..dfb2bf61b0b8 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -745,6 +745,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
@@ -819,6 +820,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852b_dflt_parms,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -834,6 +836,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index f4aa4437fb75..0e094ce9c9b0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1206,24 +1206,25 @@ void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_cha
}
static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
+ enum rtw89_phy_idx phy_idx,
+ s16 ref, u16 pwr_ofst_decrease)
{
const u16 tssi_16dbm_cw = 0x12c;
const u8 base_cw_0db = 0x27;
- const s8 ofst_int = 0;
s16 pwr_s10_3;
s16 rf_pwr_cw;
u16 bb_pwr_cw;
u32 pwr_cw;
u32 tssi_ofst_cw;
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease;
bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0));
rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3));
rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) -
+ pwr_ofst_decrease;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
@@ -1234,10 +1235,11 @@ static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800};
const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
+ u16 ofst_dec[RF_PATH_NUM_8852BX];
const u8 ofst_ofdm = 0x4;
const u8 ofst_cck = 0x8;
const s16 ref_ofdm = 0;
@@ -1250,19 +1252,20 @@ static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
B_AX_PWR_REF, 0x0);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst);
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0;
- for (i = 0; i < RF_PATH_NUM_8852BX; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx);
+ }
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852BX; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx);
+ }
}
static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
@@ -1333,6 +1336,16 @@ static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
tx_shape_ofdm);
}
+static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst;
+
+ pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
+}
+
static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -1342,12 +1355,13 @@ static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8852bx_set_txpwr_diff(rtwdev, chan, phy_idx);
}
static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
- rtw8852bx_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, 0);
}
static
@@ -1936,7 +1950,9 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 876725133228..bde3e1fb7ca6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -679,6 +679,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
@@ -752,6 +753,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL,
.rfe_parms_conf = NULL,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -767,6 +769,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = false,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index cde34f8e1e67..bc84b15e7826 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1882,9 +1882,9 @@ static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev)
}
static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
+ enum rtw89_phy_idx phy_idx,
+ s16 ref, u16 pwr_ofst_decrease)
{
- s8 ofst_int = 0;
u8 base_cw_0db = 0x27;
u16 tssi_16dbm_cw = 0x12c;
s16 pwr_s10_3 = 0;
@@ -1893,13 +1893,14 @@ static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
u32 pwr_cw = 0;
u32 tssi_ofst_cw = 0;
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease;
bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) -
+ pwr_ofst_decrease;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
@@ -1943,9 +1944,10 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx, s16 pwr_ofst)
{
static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800};
+ u16 ofst_dec[RF_PATH_NUM_8852C];
const u32 mask = 0x7FFFFFF;
const u8 ofst_ofdm = 0x4;
const u8 ofst_cck = 0x8;
@@ -1959,19 +1961,20 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
GENMASK(27, 10), 0x0);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst);
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0;
- for (i = 0; i < RF_PATH_NUM_8852C; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx);
+ }
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852C; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]);
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx);
+ }
}
static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
@@ -2052,6 +2055,16 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
B_P1_DAC_COMP_POST_DPD_EN);
}
+static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst;
+
+ pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst);
+}
+
static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
@@ -2061,12 +2074,13 @@ static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx);
}
static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_ref(rtwdev, phy_idx);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx, 0);
}
static void
@@ -2793,7 +2807,10 @@ static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
+
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2893,6 +2910,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
.convert_rpl_to_rssi = NULL,
+ .phy_rpt_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
@@ -2959,6 +2977,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dflt_parms = &rtw89_8852c_dflt_parms,
.rfe_parms_conf = NULL,
.chanctx_listener = &rtw8852c_chanctx_listener,
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2976,6 +2995,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
+ .support_ant_gain = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.hw_sec_hdr = true,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index bd17c0a1c684..b92e2ce4f4ad 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -1769,10 +1769,10 @@ u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
target_ch = chan->channel - 33;
}
} else if (chan->band_type == RTW89_BAND_6G) {
- if (chan->channel >= 1 && chan->channel <= 125)
- target_ch = chan->channel + 32;
- else
+ if (chan->channel > 125)
target_ch = chan->channel - 32;
+ else
+ target_ch = chan->channel + 32;
} else {
target_ch = chan->channel;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 9a4db04a1967..f04cb3b11372 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -14,7 +14,7 @@
#include "rtw8922a_rfk.h"
#include "util.h"
-#define RTW8922A_FW_FORMAT_MAX 2
+#define RTW8922A_FW_FORMAT_MAX 3
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin"
@@ -2565,8 +2565,10 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
u8 path;
u8 *rx_power = phy_ppdu->rssi;
- status->signal =
- RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ if (!status->signal)
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B]));
+
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2607,6 +2609,16 @@ static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
}
+static void rtw8922a_phy_rpt_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (desc_info->rssi <= 0x1 || (desc_info->rssi >> 2) > MAX_RSSI)
+ return;
+
+ rx_status->signal = (desc_info->rssi >> 2) - MAX_RSSI;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2665,6 +2677,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
.convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
+ .phy_rpt_to_rssi = rtw8922a_phy_rpt_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
@@ -2729,6 +2742,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL, /* load parm from fw */
.rfe_parms_conf = NULL, /* load parm from fw */
+ .txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
@@ -2746,6 +2760,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
+ .support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.hw_sec_hdr = true,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index bcc287771b2a..871f45a6508c 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -42,7 +42,7 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
/* freq 6875 (ch 185, 20MHz) spans RTW89_SAR_6GHZ_SUBBAND_7_H
* and RTW89_SAR_6GHZ_SUBBAND_8, so directly describe it with
- * struct rtw89_sar_span in the following.
+ * struct rtw89_6ghz_span.
*/
case 6895 ... 7115:
@@ -50,63 +50,18 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
}
}
-struct rtw89_sar_span {
- enum rtw89_sar_subband subband_low;
- enum rtw89_sar_subband subband_high;
-};
-
-#define RTW89_SAR_SPAN_VALID(span) ((span)->subband_high)
-
-#define RTW89_SAR_6GHZ_SPAN_HEAD 6145
-#define RTW89_SAR_6GHZ_SPAN_IDX(center_freq) \
- ((((int)(center_freq) - RTW89_SAR_6GHZ_SPAN_HEAD) / 5) / 2)
-
-#define RTW89_DECL_SAR_6GHZ_SPAN(center_freq, subband_l, subband_h) \
- [RTW89_SAR_6GHZ_SPAN_IDX(center_freq)] = { \
- .subband_low = RTW89_SAR_6GHZ_ ## subband_l, \
- .subband_high = RTW89_SAR_6GHZ_ ## subband_h, \
- }
-
-/* Since 6GHz SAR subbands are not edge aligned, some cases span two SAR
- * subbands. In the following, we describe each of them with rtw89_sar_span.
- */
-static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
- RTW89_DECL_SAR_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L),
- RTW89_DECL_SAR_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H),
- RTW89_DECL_SAR_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8),
- RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8),
-};
-
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev,
u32 center_freq, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- const struct rtw89_sar_span *span = NULL;
enum rtw89_sar_subband subband_l, subband_h;
- int idx;
-
- if (center_freq >= RTW89_SAR_6GHZ_SPAN_HEAD) {
- idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq);
- /* To decrease size of rtw89_sar_overlapping_6ghz[],
- * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs
- * to make first span as index 0 of the table. So, if center
- * frequency is less than the first one, it will get netative.
- */
- if (idx >= 0 && idx < ARRAY_SIZE(rtw89_sar_overlapping_6ghz))
- span = &rtw89_sar_overlapping_6ghz[idx];
- }
+ const struct rtw89_6ghz_span *span;
+
+ span = rtw89_get_6ghz_span(rtwdev, center_freq);
if (span && RTW89_SAR_SPAN_VALID(span)) {
- subband_l = span->subband_low;
- subband_h = span->subband_high;
+ subband_l = span->sar_subband_low;
+ subband_h = span->sar_subband_high;
} else {
subband_l = rtw89_sar_get_subband(rtwdev, center_freq);
subband_h = subband_l;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 7b203bb7f151..26a944d3b672 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -365,6 +365,7 @@ static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
ser_reset_vif(rtwdev, rtwvif);
rtwdev->total_sta_assoc = 0;
+ refcount_set(&rtwdev->refcount_ap_info, 0);
}
/* hal function */
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index b2e47829983f..70fe7cebc9d5 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -560,6 +560,9 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
+/* BE RXD - PHY RPT dword0 */
+#define BE_RXD_PHY_RSSI GENMASK(11, 0)
+
struct rtw89_phy_sts_ie00 {
__le32 w0;
__le32 w1;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 3e81fd974ec1..01754d031bb4 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -620,7 +620,10 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
* need to unlock mutex
*/
mutex_unlock(&rtwdev->mutex);
- key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
+ if (ieee80211_vif_is_mld(wow_vif))
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id);
+ else
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
mutex_lock(&rtwdev->mutex);
kfree(rekey_conf);
@@ -691,9 +694,7 @@ static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev)
{
- struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
-
- __rtw89_enter_ps_mode(rtwdev, rtwvif_link);
+ __rtw89_enter_ps_mode(rtwdev);
}
static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
@@ -701,7 +702,7 @@ static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev)
struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link;
if (rtw89_wow_mgd_linked(rtwdev))
- rtw89_enter_lps(rtwdev, rtwvif_link, false);
+ rtw89_enter_lps(rtwdev, rtwvif_link->rtwvif, false);
else if (rtw89_wow_no_link(rtwdev))
rtw89_fw_h2c_fwips(rtwdev, rtwvif_link, true);
}
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index c07acfcbbd9c..7c57d4c8744a 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -88,7 +88,7 @@ static ssize_t hw_pg_ver_show(struct device *dev,
static DEVICE_ATTR_RO(hw_pg_ver);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -121,7 +121,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
static const struct bin_attribute fwlog_attr = {
.attr = { .name = "fwlog", .mode = 0400 },
- .read = wl1271_sysfs_read_fwlog,
+ .read_new = wl1271_sysfs_read_fwlog,
};
int wlcore_sysfs_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 3f338b8096c7..fc8ea58bc165 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -45,7 +45,7 @@ enum wl1271_tm_attrs {
};
#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
-static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
+static const struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
[WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
[WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
[WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 347a15544afe..cf6a331d4042 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -5048,6 +5048,45 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
.tx_mcs_80p80 = cpu_to_le16(0xfffa),
},
},
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ,
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /* As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
},
#endif
};
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 04517bd3325a..a066977af0be 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
+#include <linux/suspend.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
@@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2");
/* WWAN GUID */
static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
+static bool pci_registered;
static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
{
@@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = {
},
.id_table = iosm_ipc_ids,
};
-module_pci_driver(iosm_ipc_driver);
int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
size_t size, dma_addr_t *mapping, int direction)
@@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
IPC_CB(skb)->mapping = 0;
dev_kfree_skb(skb);
}
+
+static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) {
+ if (pci_registered) {
+ pci_unregister_driver(&iosm_ipc_driver);
+ pci_registered = false;
+ }
+ } else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) {
+ if (!pci_registered) {
+ int ret;
+
+ ret = pci_register_driver(&iosm_ipc_driver);
+ if (ret) {
+ pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n",
+ ret);
+ } else {
+ pci_registered = true;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block pm_notifier = {
+ .notifier_call = pm_notify,
+};
+
+static int __init iosm_ipc_driver_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&iosm_ipc_driver);
+ if (ret)
+ return ret;
+
+ pci_registered = true;
+
+ register_pm_notifier(&pm_notifier);
+
+ return 0;
+}
+module_init(iosm_ipc_driver_init);
+
+static void __exit iosm_ipc_driver_exit(void)
+{
+ unregister_pm_notifier(&pm_notifier);
+
+ if (pci_registered)
+ pci_unregister_driver(&iosm_ipc_driver);
+}
+module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 8381b0dc7acb..02f2ec7cf4ce 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -43,6 +43,8 @@
#include "t7xx_state_monitor.h"
#include "t7xx_port_proxy.h"
+#define DRIVER_NAME "mtk_t7xx"
+
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
@@ -833,6 +835,7 @@ static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct t7xx_pci_dev *t7xx_dev;
+ void __iomem *iomem;
int ret;
t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
@@ -848,12 +851,21 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
- pci_name(pdev));
+ iomem = pcim_iomap_region(pdev, T7XX_PCI_IREG_BASE, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(iomem);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IREG BAR: %d\n", ret);
+ return -ENOMEM;
+ }
+ IREG_BASE(t7xx_dev) = iomem;
+
+ iomem = pcim_iomap_region(pdev, T7XX_PCI_EREG_BASE, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(iomem);
if (ret) {
- dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
+ dev_err(&pdev->dev, "Could not request EREG BAR: %d\n", ret);
return -ENOMEM;
}
+ t7xx_dev->base_addr.pcie_ext_reg_base = iomem;
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
@@ -867,9 +879,6 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
- t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
-
ret = t7xx_pci_pm_init(t7xx_dev);
if (ret)
return ret;
@@ -937,7 +946,7 @@ static const struct pci_device_id t7xx_pci_table[] = {
MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
static struct pci_driver t7xx_pci_driver = {
- .name = "mtk_t7xx",
+ .name = DRIVER_NAME,
.id_table = t7xx_pci_table,
.probe = t7xx_pci_probe,
.remove = t7xx_pci_remove,
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 1ec651e31064..3425b68f0ddc 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -116,18 +116,16 @@ static void st21nfca_tx_work(struct work_struct *work)
struct nfc_dev *dev;
struct sk_buff *skb;
- if (info) {
- dev = info->hdev->ndev;
- skb = info->dep_info.tx_pending;
+ dev = info->hdev->ndev;
+ skb = info->dep_info.tx_pending;
- device_lock(&dev->dev);
+ device_lock(&dev->dev);
- nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
- ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
- info->async_cb, info);
- device_unlock(&dev->dev);
- kfree_skb(skb);
- }
+ nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE,
+ ST21NFCA_WR_XCHG_DATA, skb->data, skb->len,
+ info->async_cb, info);
+ device_unlock(&dev->dev);
+ kfree_skb(skb);
}
static void st21nfca_im_send_pdu(struct st21nfca_hci_info *info,
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5feecaadde8e..7f08c70d8123 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -3692,7 +3692,7 @@ DEVICE_FREQ_GROUP(freq4, 3);
static ssize_t
disciplining_config_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3727,7 +3727,7 @@ out:
static ssize_t
disciplining_config_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3750,11 +3750,11 @@ disciplining_config_write(struct file *filp, struct kobject *kobj,
return err;
}
-static BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
+static const BIN_ATTR_RW(disciplining_config, OCP_ART_CONFIG_SIZE);
static ssize_t
temperature_table_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3789,7 +3789,7 @@ out:
static ssize_t
temperature_table_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ptp_ocp *bp = dev_get_drvdata(kobj_to_dev(kobj));
@@ -3812,7 +3812,7 @@ temperature_table_write(struct file *filp, struct kobject *kobj,
return err;
}
-static BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
+static const BIN_ATTR_RW(temperature_table, OCP_ART_TEMP_TABLE_SIZE);
static struct attribute *fb_timecard_attrs[] = {
&dev_attr_serialnum.attr,
@@ -3867,7 +3867,7 @@ static struct attribute *art_timecard_attrs[] = {
NULL,
};
-static struct bin_attribute *bin_art_timecard_attrs[] = {
+static const struct bin_attribute *const bin_art_timecard_attrs[] = {
&bin_attr_disciplining_config,
&bin_attr_temperature_table,
NULL,
@@ -3875,7 +3875,7 @@ static struct bin_attribute *bin_art_timecard_attrs[] = {
static const struct attribute_group art_timecard_group = {
.attrs = art_timecard_attrs,
- .bin_attrs = bin_art_timecard_attrs,
+ .bin_attrs_new = bin_art_timecard_attrs,
};
static const struct ocp_attr_group art_timecard_groups[] = {
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index c053ee9c1361..7fcc46a0bb48 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1802,7 +1802,8 @@ static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
}
static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev, int *dbm)
+ struct wireless_dev *wdev,
+ unsigned int link_id, int *dbm)
{
*dbm = (12);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 5f581e71e201..36099047560d 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1952,7 +1952,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
goto out_free;
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter);
+ dests[1].counter = node->ucast_counter.counter;
#endif
node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS);
if (IS_ERR(node->ucast_rule)) {
@@ -1961,7 +1961,7 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
}
#if defined(CONFIG_MLX5_VDPA_STEERING_DEBUG)
- dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter);
+ dests[1].counter = node->mcast_counter.counter;
#endif
memset(dmac_c, 0, ETH_ALEN);