summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2022-02-03 10:53:49 +0300
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2022-02-03 10:53:49 +0300
commit876f7a438e4247a948268ad77b67c494f709cc30 (patch)
treec3fa2548657920df9d80822d08ff24666e62d37d /drivers/net/ethernet
parent86df4141869350edaa53fb994b3db2c2cca5065d (diff)
parent53dbee4926d3706ca9e03f3928fa85b5ec3bc0cc (diff)
downloadlinux-876f7a438e4247a948268ad77b67c494f709cc30.tar.xz
Merge drm/drm-next into drm-intel-gt-next
Backmerge to bring in 5.17-rc2 to introduce a common baseline to merge i915_regs changes from drm-intel-next. Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/typhoon.c10
-rw-r--r--drivers/net/ethernet/8390/etherh.c6
-rw-r--r--drivers/net/ethernet/8390/hydra.c4
-rw-r--r--drivers/net/ethernet/8390/mac8390.c4
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c4
-rw-r--r--drivers/net/ethernet/8390/wd.c4
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/agere/et131x.c5
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c245
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h18
-rw-r--r--drivers/net/ethernet/alteon/acenic.c9
-rw-r--r--drivers/net/ethernet/alteon/acenic.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h13
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c168
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h25
-rw-r--r--drivers/net/ethernet/amd/a2065.c18
-rw-r--r--drivers/net/ethernet/amd/ariadne.c20
-rw-r--r--drivers/net/ethernet/amd/atarilance.c7
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c14
-rw-r--r--drivers/net/ethernet/amd/ni65.c8
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c5
-rw-r--r--drivers/net/ethernet/apple/mace.c16
-rw-r--r--drivers/net/ethernet/apple/macmace.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c8
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c18
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c134
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c8
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c25
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c139
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c103
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c34
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c30
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c133
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c3
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/cortina/gemini.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/engleder/Kconfig39
-rw-r--r--drivers/net/ethernet/engleder/Makefile10
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h189
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c293
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h230
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c1272
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c218
-rw-r--r--drivers/net/ethernet/engleder/tsnep_selftests.c811
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c443
-rw-r--r--drivers/net/ethernet/ethoc.c17
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c142
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c81
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c48
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c12
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c12
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c21
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c29
-rw-r--r--drivers/net/ethernet/google/gve/gve.h23
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c10
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c86
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c117
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c610
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h458
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c525
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h136
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c904
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c116
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c591
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h434
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c116
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1412
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h95
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c556
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h218
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c825
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c26
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c40
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c10
-rw-r--r--drivers/net/ethernet/i825xx/82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c6
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c241
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c112
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c177
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h115
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c60
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c814
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c75
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c558
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c429
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c398
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c169
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c157
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c304
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c702
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c214
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c397
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c302
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c739
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c208
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c873
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h374
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c2794
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h345
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c402
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c557
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c298
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c468
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c68
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c192
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c45
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c323
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h5
-rw-r--r--drivers/net/ethernet/lantiq_etop.c55
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c135
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c432
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c229
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h70
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h38
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c727
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h215
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c475
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c108
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c353
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c630
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h73
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c184
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c214
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h37
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c92
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c219
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h19
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c218
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1387
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c314
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c649
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c250
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h262
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h642
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c306
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c24
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c22
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c682
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c244
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h173
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c470
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1002
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h278
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c506
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c127
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c406
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h871
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c544
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c317
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c27
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c75
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h15
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c162
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c140
-rw-r--r--drivers/net/ethernet/mscc/Makefile4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c304
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h15
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c894
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.h166
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c128
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c91
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c103
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c535
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c523
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c27
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.c25
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c31
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c70
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c100
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c42
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c91
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c38
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c71
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c18
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c29
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c3
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c15
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c14
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c14
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c10
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c5
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c271
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c189
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c26
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c40
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c7
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c31
-rw-r--r--drivers/net/ethernet/ti/cpmac.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c8
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c32
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h10
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c69
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c12
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c35
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig25
-rw-r--r--drivers/net/ethernet/vertexcom/Makefile6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c769
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c221
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c9
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
651 files changed, 40387 insertions, 15083 deletions
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 05e15b6e5e2c..8aec5d9fbfef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1138,7 +1138,9 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static void
-typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
ering->rx_max_pending = RXENT_ENTRIES;
ering->tx_max_pending = TXLO_ENTRIES - 1;
@@ -2276,6 +2278,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2407,8 +2410,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..e7b879123bb1 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 941754ea78ec..1df7601af86a 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -116,6 +116,7 @@ static int hydra_init(struct zorro_dev *z)
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
+ u8 macaddr[ETH_ALEN];
int j;
int err;
@@ -129,7 +130,8 @@ static int hydra_init(struct zorro_dev *z)
return -ENOMEM;
for (j = 0; j < ETH_ALEN; j++)
- dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ macaddr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ eth_hw_addr_set(dev, macaddr);
/* We must set the 8390 for word mode. */
z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 91b04abfd687..7fb819b9b89a 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -292,6 +292,7 @@ static bool mac8390_rsrc_init(struct net_device *dev,
struct nubus_dirent ent;
int offset;
volatile unsigned short *i;
+ u8 addr[ETH_ALEN];
dev->irq = SLOT2IRQ(board->slot);
/* This is getting to be a habit */
@@ -314,7 +315,8 @@ static bool mac8390_rsrc_init(struct net_device *dev,
return false;
}
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+ nubus_get_rsrc_mem(addr, &ent, 6);
+ eth_hw_addr_set(dev, addr);
if (useresources[cardtype] == 1) {
nubus_rewinddir(&dir);
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 0890fa493f70..6e62c37c9400 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -204,6 +204,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
int checksum = 0;
+ u8 macaddr[ETH_ALEN];
const char *model_name;
unsigned char eeprom_irq = 0;
static unsigned version_printed;
@@ -239,7 +240,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ macaddr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, macaddr);
netdev_info(dev, "%s at %#3x, %pM", model_name,
ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 263a942d81fa..5b00c452bede 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -168,6 +168,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int checksum = 0;
int ancient = 0; /* An old card without config registers. */
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ u8 addr[ETH_ALEN];
const char *model_name;
static unsigned version_printed;
struct ei_device *ei_local = netdev_priv(dev);
@@ -191,7 +192,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
netdev_info(dev, version);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ addr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, addr);
netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4601b38f532a..db3ec4768159 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -73,6 +73,7 @@ config DNET
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/engleder/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
@@ -182,6 +183,7 @@ source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fdd8c6c17451..8a87c1083d1d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
@@ -92,6 +93,7 @@ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index f4edc616388c..537e6a85e18d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3914,10 +3914,9 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- rc = -EIO;
goto err_release_res;
}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 800ee022388f..621ce742ad21 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/dmaengine.h>
#include "sun4i-emac.h"
@@ -76,7 +77,6 @@ struct emac_board_info {
void __iomem *membase;
u32 msg_enable;
struct net_device *ndev;
- struct sk_buff *skb_last;
u16 tx_fifo_stat;
int emacrx_completed_flag;
@@ -87,6 +87,16 @@ struct emac_board_info {
unsigned int duplex;
phy_interface_t phy_interface;
+ struct dma_chan *rx_chan;
+ phys_addr_t emac_rx_fifo;
+};
+
+struct emac_dma_req {
+ struct emac_board_info *db;
+ struct dma_async_tx_descriptor *desc;
+ struct sk_buff *skb;
+ dma_addr_t rxbuf;
+ int count;
};
static void emac_update_speed(struct net_device *dev)
@@ -96,9 +106,9 @@ static void emac_update_speed(struct net_device *dev)
/* set EMAC SPEED, depend on PHY */
reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
- reg_val &= ~(0x1 << 8);
+ reg_val &= ~EMAC_MAC_SUPP_100M;
if (db->speed == SPEED_100)
- reg_val |= 1 << 8;
+ reg_val |= EMAC_MAC_SUPP_100M;
writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
}
@@ -206,6 +216,117 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
+static struct emac_dma_req *
+emac_alloc_dma_req(struct emac_board_info *db,
+ struct dma_async_tx_descriptor *desc, struct sk_buff *skb,
+ dma_addr_t rxbuf, int count)
+{
+ struct emac_dma_req *req;
+
+ req = kzalloc(sizeof(struct emac_dma_req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ req->db = db;
+ req->desc = desc;
+ req->skb = skb;
+ req->rxbuf = rxbuf;
+ req->count = count;
+ return req;
+}
+
+static void emac_free_dma_req(struct emac_dma_req *req)
+{
+ kfree(req);
+}
+
+static void emac_dma_done_callback(void *arg)
+{
+ struct emac_dma_req *req = arg;
+ struct emac_board_info *db = req->db;
+ struct sk_buff *skb = req->skb;
+ struct net_device *dev = db->ndev;
+ int rxlen = req->count;
+ u32 reg_val;
+
+ dma_unmap_single(db->dev, req->rxbuf, rxlen, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += rxlen;
+ /* Pass to upper layer */
+ dev->stats.rx_packets++;
+
+ /* re enable cpu receive */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+
+ /* re enable interrupt */
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= EMAC_INT_CTL_RX_EN;
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+ db->emacrx_completed_flag = 1;
+ emac_free_dma_req(req);
+}
+
+static int emac_dma_inblk_32bit(struct emac_board_info *db,
+ struct sk_buff *skb, void *rdptr, int count)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t rxbuf;
+ struct emac_dma_req *req;
+ int ret = 0;
+
+ rxbuf = dma_map_single(db->dev, rdptr, count, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(db->dev, rxbuf);
+ if (ret) {
+ dev_err(db->dev, "dma mapping error.\n");
+ return ret;
+ }
+
+ desc = dmaengine_prep_slave_single(db->rx_chan, rxbuf, count,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(db->dev, "prepare slave single failed\n");
+ ret = -ENOMEM;
+ goto prepare_err;
+ }
+
+ req = emac_alloc_dma_req(db, desc, skb, rxbuf, count);
+ if (!req) {
+ dev_err(db->dev, "alloc emac dma req error.\n");
+ ret = -ENOMEM;
+ goto alloc_req_err;
+ }
+
+ desc->callback_param = req;
+ desc->callback = emac_dma_done_callback;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(db->dev, "dma submit error.\n");
+ goto submit_err;
+ }
+
+ dma_async_issue_pending(db->rx_chan);
+ return ret;
+
+submit_err:
+ emac_free_dma_req(req);
+
+alloc_req_err:
+ dmaengine_desc_free(desc);
+
+prepare_err:
+ dma_unmap_single(db->dev, rxbuf, count, DMA_FROM_DEVICE);
+ return ret;
+}
+
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -308,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* initial EMAC */
/* flush RX FIFO */
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val |= 0x8;
+ reg_val |= EMAC_RX_CTL_FLUSH_FIFO;
writel(reg_val, db->membase + EMAC_RX_CTL_REG);
udelay(1);
@@ -320,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* set MII clock */
reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
- reg_val &= (~(0xf << 2));
- reg_val |= (0xD << 2);
+ reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK;
+ reg_val |= EMAC_MAC_MCFG_MII_CLKD_72;
writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
/* clear RX counter */
@@ -385,7 +506,7 @@ static void emac_init_device(struct net_device *dev)
/* enable RX/TX0/RX Hlevel interrup */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
spin_unlock_irqrestore(&db->lock, flags);
@@ -499,7 +620,6 @@ static void emac_rx(struct net_device *dev)
struct sk_buff *skb;
u8 *rdptr;
bool good_packet;
- static int rxlen_last;
unsigned int reg_val;
u32 rxhdr, rxstatus, rxcount, rxlen;
@@ -514,26 +634,12 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RXCount: %x\n", rxcount);
- if ((db->skb_last != NULL) && (rxlen_last > 0)) {
- dev->stats.rx_bytes += rxlen_last;
-
- /* Pass to upper layer */
- db->skb_last->protocol = eth_type_trans(db->skb_last,
- dev);
- netif_rx(db->skb_last);
- dev->stats.rx_packets++;
- db->skb_last = NULL;
- rxlen_last = 0;
-
- reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val &= ~EMAC_RX_CTL_DMA_EN;
- writel(reg_val, db->membase + EMAC_RX_CTL_REG);
- }
-
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
/* had one stuck? */
@@ -565,7 +671,9 @@ static void emac_rx(struct net_device *dev)
writel(reg_val | EMAC_CTL_RX_EN,
db->membase + EMAC_CTL_REG);
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -623,6 +731,19 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RxLen %x\n", rxlen);
+ if (rxlen >= dev->mtu && db->rx_chan) {
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val |= EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ if (!emac_dma_inblk_32bit(db, skb, rdptr, rxlen))
+ break;
+
+ /* re enable cpu receive. then try to receive by emac_inblk_32bit */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ }
+
emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
rdptr, rxlen);
dev->stats.rx_bytes += rxlen;
@@ -666,18 +787,23 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
}
/* Transmit Interrupt check */
- if (int_status & (0x01 | 0x02))
+ if (int_status & EMAC_INT_STA_TX_COMPLETE)
emac_tx_done(dev, db, int_status);
- if (int_status & (0x04 | 0x08))
+ if (int_status & EMAC_INT_STA_TX_ABRT)
netdev_info(dev, " ab : %x\n", int_status);
/* Re-enable interrupt mask */
if (db->emacrx_completed_flag == 1) {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+ } else {
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
+
spin_unlock(&db->lock);
return IRQ_HANDLED;
@@ -782,6 +908,58 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+static int emac_configure_dma(struct emac_board_info *db)
+{
+ struct platform_device *pdev = db->pdev;
+ struct net_device *ndev = db->ndev;
+ struct dma_slave_config conf = {};
+ struct resource *regs;
+ int err = 0;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ netdev_err(ndev, "get io resource from device failed.\n");
+ err = -ENOMEM;
+ goto out_clear_chan;
+ }
+
+ netdev_info(ndev, "get io resource from device: %pa, size = %u\n",
+ &regs->start, (unsigned int)resource_size(regs));
+ db->emac_rx_fifo = regs->start + EMAC_RX_IO_DATA_REG;
+
+ db->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(db->rx_chan)) {
+ netdev_err(ndev,
+ "failed to request dma channel. dma is disabled\n");
+ err = PTR_ERR(db->rx_chan);
+ goto out_clear_chan;
+ }
+
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr = db->emac_rx_fifo;
+ conf.dst_maxburst = 4;
+ conf.src_maxburst = 4;
+ conf.device_fc = false;
+
+ err = dmaengine_slave_config(db->rx_chan, &conf);
+ if (err) {
+ netdev_err(ndev, "config dma slave failed\n");
+ err = -EINVAL;
+ goto out_slave_configure_err;
+ }
+
+ return err;
+
+out_slave_configure_err:
+ dma_release_channel(db->rx_chan);
+
+out_clear_chan:
+ db->rx_chan = NULL;
+ return err;
+}
+
/* Search EMAC board, allocate space and register it
*/
static int emac_probe(struct platform_device *pdev)
@@ -824,6 +1002,9 @@ static int emac_probe(struct platform_device *pdev)
goto out_iounmap;
}
+ if (emac_configure_dma(db))
+ netdev_info(ndev, "configure dma failed. disable dma.\n");
+
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
@@ -891,6 +1072,7 @@ out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_dispose_mapping:
irq_dispose_mapping(ndev->irq);
+ dma_release_channel(db->rx_chan);
out_iounmap:
iounmap(db->membase);
out:
@@ -906,6 +1088,11 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
+ if (db->rx_chan) {
+ dmaengine_terminate_all(db->rx_chan);
+ dma_release_channel(db->rx_chan);
+ }
+
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
index 38c72d9ec600..90bd9ad77607 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.h
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -38,6 +38,7 @@
#define EMAC_RX_CTL_REG (0x3c)
#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3)
#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
@@ -61,7 +62,21 @@
#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
#define EMAC_RX_FBC_REG (0x50)
#define EMAC_INT_CTL_REG (0x54)
+#define EMAC_INT_CTL_RX_EN (1 << 8)
+#define EMAC_INT_CTL_TX0_EN (1)
+#define EMAC_INT_CTL_TX1_EN (1 << 1)
+#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN)
+#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2)
+#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3)
+#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN)
#define EMAC_INT_STA_REG (0x58)
+#define EMAC_INT_STA_TX0_COMPLETE (0x1)
+#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1)
+#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE)
+#define EMAC_INT_STA_TX0_ABRT (0x1 << 2)
+#define EMAC_INT_STA_TX1_ABRT (0x1 << 3)
+#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT)
+#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8)
#define EMAC_MAC_CTL0_REG (0x5c)
#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
@@ -87,8 +102,11 @@
#define EMAC_MAC_CLRT_RM (0x0f)
#define EMAC_MAC_MAXF_REG (0x70)
#define EMAC_MAC_SUPP_REG (0x74)
+#define EMAC_MAC_SUPP_100M (0x1 << 8)
#define EMAC_MAC_TEST_REG (0x78)
#define EMAC_MAC_MCFG_REG (0x7c)
+#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2)
+#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2)
#define EMAC_MAC_A0_REG (0x98)
#define EMAC_MAC_A1_REG (0x9c)
#define EMAC_MAC_A2_REG (0xa0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 732da15a3827..22fe98555b24 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -589,8 +589,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
}
ap->name = dev->name;
- if (ap->pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
@@ -1130,11 +1129,7 @@ static int ace_init(struct net_device *dev)
/*
* Configure DMA attributes.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- ap->pci_using_dac = 1;
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- ap->pci_using_dac = 0;
- } else {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ecode = -ENODEV;
goto init_error;
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index 265fa601a258..ca5ce0cbbad1 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -692,7 +692,6 @@ struct ace_private
__attribute__ ((aligned (SMP_CACHE_BYTES)));
u32 last_tx, last_std_rx, last_mini_rx;
#endif
- int pci_using_dac;
u8 firmware_major;
u8 firmware_minor;
u8 firmware_fix;
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index f5ec35fa4c63..466ad9470d1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
+/* device capabilities */
+enum ena_admin_aq_caps_id {
+ ENA_ADMIN_ENI_STATS = 0,
+};
+
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
@@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc {
*/
u32 supported_features;
- u32 reserved3;
+ /* bitmap of ena_admin_aq_caps_id, which represents device
+ * capabilities.
+ */
+ u32 capabilities;
/* Indicates how many bits are used physical address access. */
u32 phys_addr_width;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ab413fc1f68e..8c8b4c88c7de 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1971,6 +1971,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.dev_attr));
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+ ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
rc = ena_com_get_feature(ena_dev, &get_resp,
@@ -2223,6 +2224,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_com_stats_ctx ctx;
int ret;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device,
+ "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
+
memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
if (likely(ret == 0))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 73b03ce59412..3c5081d9d25d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -314,6 +314,7 @@ struct ena_com_dev {
struct ena_rss rss;
u32 supported_features;
+ u32 capabilities;
u32 dma_addr_bits;
struct ena_host_attribute host_attr;
@@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
ena_dev->adaptive_coalescing = false;
}
+/* ena_com_get_cap - query whether device supports a capability.
+ * @ena_dev: ENA communication layer struct
+ * @cap_id: enum value representing the capability
+ *
+ * @return - true if capability is supported or false otherwise
+ */
+static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_caps_id cap_id)
+{
+ return !!(ena_dev->capabilities & BIT(cap_id));
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 13e745cf3781..39242c5a1729 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -82,7 +82,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial),
- ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(csum_bad),
ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err),
@@ -110,8 +110,7 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) \
- (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
+#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -213,8 +212,9 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, adapter->eni_stats_supported);
+ ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -226,7 +226,9 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- return ENA_STATS_ARRAY_ENI(adapter);
+ bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+
+ return ENA_STATS_ARRAY_ENI(adapter) * supported;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -316,10 +318,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
break;
}
}
@@ -465,7 +468,9 @@ static void ena_get_drvinfo(struct net_device *dev,
}
static void ena_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
@@ -476,7 +481,9 @@ static void ena_get_ringparam(struct net_device *netdev,
}
static int ena_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
u32 new_tx_size, new_rx_size;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7d5d885d85d5..53080fd143dc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -103,7 +103,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
+ ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n");
@@ -166,11 +166,9 @@ static int ena_xmit_common(struct net_device *dev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
- if (rc != -ENOMEM) {
- adapter->reset_reason =
- ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- }
+ if (rc != -ENOMEM)
+ ena_reset_device(adapter,
+ ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -434,7 +432,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
xdp_stat = &rx_ring->rx_stats.xdp_pass;
break;
default:
- bpf_warn_invalid_xdp_action(verdict);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
}
@@ -1269,45 +1267,39 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "tx_info doesn't have valid %s",
- is_xdp ? "xdp frame" : "skb");
+ "tx_info doesn't have valid %s. qid %u req_id %u",
+ is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
else
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "Invalid req_id: %hu\n",
- req_id);
+ "Invalid req_id %u in qid %u\n",
+ req_id, ring->qid);
ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
+ ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
- /* Trigger device reset */
- ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
return -EFAULT;
}
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < tx_ring->ring_size)) {
- tx_info = &tx_ring->tx_buffer_info[req_id];
- if (likely(tx_info->skb))
- return 0;
- }
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->skb))
+ return 0;
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < xdp_ring->ring_size)) {
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
- }
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
@@ -1332,9 +1324,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL,
+ false);
break;
+ }
+ /* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id);
if (rc)
break;
@@ -1427,6 +1424,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u16 *next_to_clean)
{
struct ena_rx_buffer *rx_info;
+ struct ena_adapter *adapter;
u16 len, req_id, buf = 0;
struct sk_buff *skb;
void *page_addr;
@@ -1439,8 +1437,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) {
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Page is NULL\n");
+ adapter = rx_ring->adapter;
+ netif_err(adapter, rx_err, rx_ring->netdev,
+ "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
return NULL;
}
@@ -1550,7 +1551,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE;
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
@@ -1562,7 +1563,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
@@ -1773,15 +1774,12 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
}
-
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-
return 0;
}
@@ -1896,9 +1894,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(xdp_ring, req_id, NULL,
+ true);
break;
+ }
+ /* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id);
if (rc)
break;
@@ -3240,11 +3243,11 @@ err:
int ena_update_hw_stats(struct ena_adapter *adapter)
{
- int rc = 0;
+ int rc;
rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
if (rc) {
- dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
+ netdev_err(adapter->netdev, "Failed to get ENI stats\n");
return rc;
}
@@ -3641,8 +3644,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
adapter->last_keep_alive_jiffies = jiffies;
- dev_err(&pdev->dev, "Device reset completed successfully\n");
-
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3672,6 +3673,8 @@ static void ena_fw_reset_device(struct work_struct *work)
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
+
+ dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
rtnl_unlock();
@@ -3694,9 +3697,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
netif_err(adapter, rx_err, adapter->netdev,
"Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
rx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3733,9 +3735,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
netif_err(adapter, tx_err, adapter->netdev,
"Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
tx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3761,9 +3761,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
missed_tx,
adapter->missing_tx_completion_threshold);
- adapter->reset_reason =
- ENA_REGS_RESET_MISS_TX_CMPL;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
rc = -EIO;
}
@@ -3884,8 +3882,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
"Keep alive watchdog timeout.\n");
ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
}
}
@@ -3896,8 +3893,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
"ENA admin queue is not in running state!\n");
ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
}
}
@@ -4013,10 +4009,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!max_num_io_queues)) {
- dev_err(&pdev->dev, "The device doesn't have io queues\n");
- return -EFAULT;
- }
return max_num_io_queues;
}
@@ -4101,7 +4093,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ if (unlikely(rc)) {
dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -4137,10 +4129,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
}
-static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
+static void ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
- struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
- struct ena_com_dev *ena_dev = ctx->ena_dev;
+ struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
u32 max_tx_queue_size;
@@ -4148,7 +4141,7 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext =
- &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
max_queue_ext->max_rx_sq_depth);
max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
@@ -4160,13 +4153,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queue_ext->max_tx_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
} else {
struct ena_admin_queue_feature_desc *max_queues =
- &ctx->get_feat_ctx->max_queues;
+ &get_feat_ctx->max_queues;
max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
max_queues->max_sq_depth);
max_tx_queue_size = max_queues->max_cq_depth;
@@ -4178,10 +4171,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_tx_queue_size = min_t(u32, max_tx_queue_size,
max_queues->max_sq_depth);
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_rx_descs);
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
}
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
@@ -4195,12 +4188,10 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
tx_queue_size = rounddown_pow_of_two(tx_queue_size);
rx_queue_size = rounddown_pow_of_two(rx_queue_size);
- ctx->max_tx_queue_size = max_tx_queue_size;
- ctx->max_rx_queue_size = max_rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
- ctx->rx_queue_size = rx_queue_size;
-
- return 0;
+ adapter->max_tx_ring_size = max_tx_queue_size;
+ adapter->max_rx_ring_size = max_rx_queue_size;
+ adapter->requested_tx_ring_size = tx_queue_size;
+ adapter->requested_rx_ring_size = rx_queue_size;
}
/* ena_probe - Device Initialization Routine
@@ -4215,7 +4206,6 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4300,10 +4290,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- calc_queue_ctx.ena_dev = ena_dev;
- calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- calc_queue_ctx.pdev = pdev;
-
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
@@ -4311,8 +4297,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_io_queue_size(&calc_queue_ctx);
- if (rc || !max_num_io_queues) {
+ ena_calc_io_queue_size(adapter, &get_feat_ctx);
+ if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
}
@@ -4321,13 +4307,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
- adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
- adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
- adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
- adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
-
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
@@ -4378,11 +4357,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
- if (!ena_update_hw_stats(adapter))
- adapter->eni_stats_supported = true;
- else
- adapter->eni_stats_supported = false;
-
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0c39fc2fa345..1bdce99bf688 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <uapi/linux/bpf.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -139,18 +140,6 @@ struct ena_napi {
struct dim dim;
};
-struct ena_calc_queue_size_ctx {
- struct ena_com_dev_get_features_ctx *get_feat_ctx;
- struct ena_com_dev *ena_dev;
- struct pci_dev *pdev;
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
- u16 max_tx_sgl_size;
- u16 max_rx_sgl_size;
-};
-
struct ena_tx_buffer {
struct sk_buff *skb;
/* num of ena desc for this specific skb
@@ -215,7 +204,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 csum_good;
u64 refil_partial;
- u64 bad_csum;
+ u64 csum_bad;
u64 page_alloc_fail;
u64 skb_alloc_fail;
u64 dma_mapping_err;
@@ -378,7 +367,6 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
- bool eni_stats_supported;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -406,6 +394,15 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
int ena_get_sset_count(struct net_device *netdev, int sset);
+static inline void ena_reset_device(struct ena_adapter *adapter,
+ enum ena_regs_reset_reason_types reset_reason)
+{
+ adapter->reset_reason = reset_reason;
+ /* Make sure reset reason is set before triggering the reset */
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+}
+
enum ena_xdp_errors_t {
ENA_XDP_ALLOWED = 0,
ENA_XDP_CURRENT_MTU_TOO_LARGE,
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 2f808dbc8b0e..3a351d3396bf 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -680,6 +680,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -706,17 +707,18 @@ static int a2065_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
+ addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0x10;
+ addr[1] = 0x80;
+ addr[2] = 0x10;
} else { /* Ameristar */
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x9f;
+ addr[1] = 0x00;
+ addr[2] = 0x9f;
}
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 5e0f645f5bde..4ea7b9f3c424 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -441,11 +441,11 @@ static int ariadne_open(struct net_device *dev)
/* Set the Ethernet Hardware Address */
lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[0];
lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[1];
lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[2];
/* Set the Init Block Mode */
lance->RAP = CSR15; /* Mode Register */
@@ -717,6 +717,7 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -740,12 +741,13 @@ static int ariadne_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x60;
- dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x60;
+ addr[2] = 0x30;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9c7d9690d00c..27869164c6e6 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -471,6 +471,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
int i;
static int did_version;
unsigned short save1, save2;
+ u8 addr[ETH_ALEN];
PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
(long)memaddr, (long)ioaddr ));
@@ -585,14 +586,16 @@ static unsigned long __init lance_probe1( struct net_device *dev,
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
- lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
break;
case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
- dev->dev_addr[i] =
+ addr[i] =
((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ eth_hw_addr_set(dev, addr);
i = IO->mem;
break;
}
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..ec8df05e7bf6 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 6784f8748638..055fda11c572 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -129,6 +129,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
+ u8 addr[ETH_ALEN];
int i;
/* reset the board */
@@ -144,9 +145,10 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
/* The NVRAM holds our ethernet address, one nibble per byte,
* at bytes NVRAMOFF+1,3,5,7,9...
*/
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
| (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
}
+ eth_hw_addr_set(dev, addr);
lp = netdev_priv(dev);
lp->lance.name = d->name;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 945bf1d87507..462016666752 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -480,6 +480,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
unsigned long flags;
int err = -ENOMEM;
void __iomem *bios;
+ u8 addr[ETH_ALEN];
/* First we look for special cases.
Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
@@ -541,7 +542,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
/* There is a 16 byte station address PROM at the base address.
The first six bytes are the station address. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
+ addr[i] = inb(ioaddr + i);
+ eth_hw_addr_set(dev, addr);
printk("%pM", dev->dev_addr);
dev->base_addr = ioaddr;
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index da97fccea9ea..410c7b67eba4 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -74,6 +74,7 @@ static struct net_device * __init mvme147lance_probe(void)
static int called;
static const char name[] = "MVME147 LANCE";
struct m147lance_private *lp;
+ u8 macaddr[ETH_ALEN];
u_long *addr;
u_long address;
int err;
@@ -93,15 +94,16 @@ static struct net_device * __init mvme147lance_probe(void)
addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0] = 0x08;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x3e;
+ macaddr[0] = 0x08;
+ macaddr[1] = 0x00;
+ macaddr[2] = 0x3e;
address = address >> 8;
- dev->dev_addr[5] = address&0xff;
+ macaddr[5] = address&0xff;
address = address >> 8;
- dev->dev_addr[4] = address&0xff;
+ macaddr[4] = address&0xff;
address = address >> 8;
- dev->dev_addr[3] = address&0xff;
+ macaddr[3] = address&0xff;
+ eth_hw_addr_set(dev, macaddr);
printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 032e8922b482..8ba579b89b75 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -251,7 +251,7 @@ static void ni65_recv_intr(struct net_device *dev,int);
static void ni65_xmit_intr(struct net_device *dev,int);
static int ni65_open(struct net_device *dev);
static int ni65_lance_reinit(struct net_device *dev);
-static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static void ni65_init_lance(struct priv *p,const unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
@@ -418,6 +418,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
{
int i,j;
struct priv *p;
+ u8 addr[ETH_ALEN];
unsigned long flags;
dev->irq = irq;
@@ -444,7 +445,8 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
return -ENODEV;
for(j=0;j<6;j++)
- dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+ addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+ eth_hw_addr_set(dev, addr);
if( (j=ni65_alloc_buffer(dev)) < 0) {
release_region(ioaddr, cards[i].total_size);
@@ -566,7 +568,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
/*
* set lance register and trigger init
*/
-static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+static void ni65_init_lance(struct priv *p,const unsigned char *daddr,int filter,int mode)
{
int i;
u32 pib;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5c50ff377ff..c20c369c7eb8 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -860,7 +860,9 @@ static int pcnet32_nway_reset(struct net_device *dev)
}
static void pcnet32_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
@@ -871,7 +873,9 @@ static void pcnet32_get_ringparam(struct net_device *dev,
}
static int pcnet32_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 533b8519ec35..466273b22f0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -898,6 +898,8 @@
#define PCS_V2_WINDOW_SELECT 0x9064
#define PCS_V2_RV_WINDOW_DEF 0x1060
#define PCS_V2_RV_WINDOW_SELECT 0x1064
+#define PCS_V2_YC_WINDOW_DEF 0x18060
+#define PCS_V2_YC_WINDOW_SELECT 0x18064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1030,8 +1032,8 @@
#define XP_PROP_0_PORT_ID_WIDTH 8
#define XP_PROP_0_PORT_MODE_INDEX 8
#define XP_PROP_0_PORT_MODE_WIDTH 4
-#define XP_PROP_0_PORT_SPEEDS_INDEX 23
-#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 22
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 5
#define XP_PROP_1_MAX_RX_DMA_INDEX 24
#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 30d24d19f40d..492ac383f16d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1508,9 +1508,6 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
mac_tscr = 0;
switch (config.tx_type) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 94879cf8b420..6ceb1cdf6eba 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -619,8 +619,11 @@ static int xgbe_get_module_eeprom(struct net_device *netdev,
return pdata->phy_if.module_eeprom(pdata, eeprom, data);
}
-static void xgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+static void
+xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -631,7 +634,9 @@ static void xgbe_get_ringparam(struct net_device *netdev,
}
static int xgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 90cb55eb5466..efdcf484a510 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -278,6 +278,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
+ (rdev->device == 0x14b5)) {
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -460,7 +467,7 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
-static const struct xgbe_version_data xgbe_v2a = {
+static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
@@ -475,7 +482,7 @@ static const struct xgbe_version_data xgbe_v2a = {
.an_cdr_workaround = 1,
};
-static const struct xgbe_version_data xgbe_v2b = {
+static struct xgbe_version_data xgbe_v2b = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 213769054391..2156600641b6 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -124,10 +124,10 @@
#include "xgbe.h"
#include "xgbe-common.h"
-#define XGBE_PHY_PORT_SPEED_100 BIT(0)
-#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
-#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
-#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+#define XGBE_PHY_PORT_SPEED_100 BIT(1)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(4)
#define XGBE_MUTEX_RELEASE 0x80000000
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 220dc42af31a..ff2d099aab21 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -869,7 +869,7 @@ static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
for (i = 0; i < pdata->txq_cnt; i++) {
txq = netdev_get_tx_queue(ndev, i);
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
netif_tx_start_queue(txq);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9a650d1c1bdd..4d2ba30c2fbd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1237,6 +1237,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct bmac_data *bp;
const unsigned char *prop_addr;
unsigned char addr[6];
+ u8 macaddr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1284,7 +1285,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ eth_hw_addr_set(dev, macaddr);
/* Enable chip without interrupts for now */
bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 4b80e3a52a19..6f8c91eb1263 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -90,7 +90,7 @@ static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
@@ -112,6 +112,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
+ u8 macaddr[ETH_ALEN];
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
@@ -167,8 +168,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
}
+ eth_hw_addr_set(dev, macaddr);
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
@@ -369,11 +371,12 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -385,7 +388,10 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ out_8(&mb->padr, macaddr[i] = p[i]);
+
+ eth_hw_addr_set(dev, macaddr);
+
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 95d3061c61be..8fcaf1639920 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -92,7 +92,7 @@ static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* Load a receive DMA channel with a base address and ring length
@@ -197,6 +197,7 @@ static int mace_probe(struct platform_device *pdev)
unsigned char *addr;
struct net_device *dev;
unsigned char checksum = 0;
+ u8 macaddr[ETH_ALEN];
int err;
dev = alloc_etherdev(PRIV_BYTES);
@@ -229,8 +230,9 @@ static int mace_probe(struct platform_device *pdev)
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
checksum ^= v;
- dev->dev_addr[j] = v;
+ macaddr[j] = v;
}
+ eth_hw_addr_set(dev, macaddr);
for (; j < 8; ++j) {
checksum ^= bitrev8(addr[j<<4]);
}
@@ -315,11 +317,12 @@ static void mace_reset(struct net_device *dev)
* Load the address on a mace controller.
*/
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -331,7 +334,8 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- mb->padr = dev->dev_addr[i] = p[i];
+ mb->padr = macaddr[i] = p[i];
+ eth_hw_addr_set(dev, macaddr);
if (mp->chipid != BROKEN_ADDRCHG_REV)
mb->iac = 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a9ef0544e30f..a418238f6309 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -812,7 +812,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
}
static void aq_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -827,7 +829,9 @@ static void aq_get_ringparam(struct net_device *ndev,
}
static int aq_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
const struct aq_hw_caps_s *hw_caps;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e22935ce9573..e65ce7199dac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -231,9 +231,6 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
struct hwtstamp_config *config)
{
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 81b3756417ec..77e76c9efd32 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -366,6 +366,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -389,6 +393,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e230d8d0ff73..e7a9f9863258 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -144,12 +144,13 @@ static void ax88796c_set_mac_addr(struct net_device *ndev)
static void ax88796c_load_mac_addr(struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 addr[ETH_ALEN];
u16 temp;
lockdep_assert_held(&ax_local->spi_lock);
/* Try the device tree first */
- if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
is_valid_ether_addr(ndev->dev_addr)) {
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
@@ -159,18 +160,19 @@ static void ax88796c_load_mac_addr(struct net_device *ndev)
/* Read the MAC address from AX88796C */
temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
- ndev->dev_addr[5] = (u8)temp;
- ndev->dev_addr[4] = (u8)(temp >> 8);
+ addr[5] = (u8)temp;
+ addr[4] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
- ndev->dev_addr[3] = (u8)temp;
- ndev->dev_addr[2] = (u8)(temp >> 8);
+ addr[3] = (u8)temp;
+ addr[2] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
- ndev->dev_addr[1] = (u8)temp;
- ndev->dev_addr[0] = (u8)(temp >> 8);
+ addr[1] = (u8)temp;
+ addr[0] = (u8)(temp >> 8);
- if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
"MAC address read from ASIX chip\n");
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 88d2ab748399..ec167af0e3b2 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -766,7 +766,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
- timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
if (likely(time_before(jiffies, timestamp + HZ / 10)))
return false;
@@ -1024,83 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
-static void ag71xx_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_MII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
- ag71xx_is(ag, AR9340) ||
- ag71xx_is(ag, QCA9530) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_GMII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_SGMII:
- if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RMII:
- if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RGMII:
- if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- default:
- goto unsupported;
- }
-
- phylink_set(mask, MII);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_RGMII ||
- state->interface == PHY_INTERFACE_MODE_GMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-unsupported:
- linkmode_zero(supported);
-}
-
-static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- state->link = 0;
-}
-
-static void ag71xx_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
static void ag71xx_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -1163,9 +1086,7 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
- .validate = ag71xx_mac_validate,
- .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
- .mac_an_restart = ag71xx_mac_an_restart,
+ .validate = phylink_generic_validate,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
@@ -1177,6 +1098,34 @@ static int ag71xx_phylink_setup(struct ag71xx *ag)
ag->phylink_config.dev = &ag->ndev->dev;
ag->phylink_config.type = PHYLINK_NETDEV;
+ ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ ag->phylink_config.supported_interfaces);
phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
ag->phy_if_mode, &ag71xx_phylink_mac_ops);
@@ -1913,15 +1862,12 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
if (IS_ERR(ag->mac_reset)) {
netif_err(ag, probe, ndev, "missing mac reset\n");
- err = PTR_ERR(ag->mac_reset);
- goto err_free;
+ return PTR_ERR(ag->mac_reset);
}
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->mac_base)
+ return -ENOMEM;
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1929,7 +1875,7 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq);
- goto err_free;
+ return err;
}
ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1957,10 +1903,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
- if (!ag->stop_desc) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->stop_desc)
+ return -ENOMEM;
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
@@ -1975,7 +1919,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- goto err_free;
+ return err;
}
netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
@@ -1983,7 +1927,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = clk_prepare_enable(ag->clk_eth);
if (err) {
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- goto err_free;
+ return err;
}
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -2019,8 +1963,6 @@ err_mdio_remove:
ag71xx_mdio_remove(ag);
err_put_clk:
clk_disable_unprepare(ag->clk_eth);
-err_free:
- free_netdev(ndev);
return err;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b4c9e805e981..6a969969d221 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3438,7 +3438,9 @@ static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
}
static void atl1_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
@@ -3451,7 +3453,9 @@ static void atl1_get_ringparam(struct net_device *netdev,
}
static int atl1_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 969591bbc066..e5857e88c207 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1961,7 +1961,9 @@ static int b44_set_link_ksettings(struct net_device *dev,
}
static void b44_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -1972,7 +1974,9 @@ static void b44_get_ringparam(struct net_device *dev,
}
static int b44_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index b07cb9bc5f2d..4a2622b05ee1 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -635,7 +635,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot *slot;
struct device *dev = enet->dev;
- unsigned int bytes = 0;
int handled = 0;
while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
@@ -646,7 +645,6 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
dev_kfree_skb(slot->skb);
- bytes += slot->len;
if (++tx_ring->read_idx == tx_ring->length)
tx_ring->read_idx = 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a568994a03a6..b04e423c446a 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1497,8 +1497,11 @@ static int bcm_enet_set_link_ksettings(struct net_device *dev,
}
}
-static void bcm_enet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -1512,7 +1515,9 @@ static void bcm_enet_get_ringparam(struct net_device *dev,
}
static int bcm_enet_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -2579,8 +2584,11 @@ static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
}
}
-static void bcm_enetsw_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enetsw_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -2595,8 +2603,11 @@ static void bcm_enetsw_get_ringparam(struct net_device *dev,
ering->tx_pending = priv->tx_ring_size;
}
-static int bcm_enetsw_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static int
+bcm_enetsw_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 40933bf5a710..60dde29974bf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct bcm_sysport_tx_ring *ring;
+ unsigned long flags, desc_flags;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 len_status, addr_lo;
unsigned int skb_len;
- unsigned long flags;
dma_addr_t mapping;
u16 queue;
int ret;
@@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->desc_count--;
/* Ports are latched, so write upper address first */
+ spin_lock_irqsave(&priv->desc_lock, desc_flags);
tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+ spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev)
}
/* Initialize both hardware and software ring */
+ spin_lock_init(&priv->desc_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = bcm_sysport_init_tx_ring(priv, i);
if (ret) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 984f76e74b43..16b73bb9acc7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -711,6 +711,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
+ spinlock_t desc_lock;
struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index babc955ba64e..e20aafeb4ca9 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7318,7 +7318,9 @@ static int bnx2_set_coalesce(struct net_device *dev,
}
static void
-bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7389,7 +7391,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
}
static int
-bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2b06d78baa08..a19dd6797070 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1850,6 +1850,14 @@ struct bnx2x {
/* Vxlan/Geneve related information */
u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+
+#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
+ u32 fw_cap;
+
+ u32 fw_major;
+ u32 fw_minor;
+ u32 fw_rev;
+ u32 fw_eng;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2525,5 +2533,6 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-
+int bnx2x_init_firmware(struct bnx2x *bp);
+void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e8c2d593c5..8d36ebbf08e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -2364,10 +2365,8 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
/* build my FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
+ (bp->fw_rev << 16) + (bp->fw_eng << 24);
/* read loaded FW from chip */
u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 472a3a478038..0e319ac7799f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1914,7 +1914,9 @@ static int bnx2x_set_coalesce(struct net_device *dev,
}
static void bnx2x_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1938,7 +1940,9 @@ static void bnx2x_get_ringparam(struct net_device *dev,
}
static int bnx2x_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 3f8435208bf4..a84d015da5df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -241,6 +241,8 @@
IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
+#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
+ (IRO[386].base + ((fid) * IRO[386].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 622fadc50316..611efee75834 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 15
+#define BCM_5710_FW_REVISION_VERSION 21
+#define BCM_5710_FW_REVISION_VERSION_V15 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index aec666e97683..774c1f1a57c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,9 +74,19 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -747,9 +757,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION);
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@@ -12308,6 +12316,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
+ if (IS_PF(bp)) {
+ rc = bnx2x_init_firmware(bp);
+
+ if (rc) {
+ bnx2x_free_mem_bp(bp);
+ return rc;
+ }
+ }
+
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@@ -12320,6 +12337,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
+ bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@@ -13026,19 +13044,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_features_check = bnx2x_features_check,
};
-static int bnx2x_set_coherency_mask(struct bnx2x *bp)
-{
- struct device *dev = &bp->pdev->dev;
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(dev, "System does not support DMA, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
{
if (bp->flags & AER_ENABLED) {
@@ -13116,9 +13121,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
goto err_out_release;
}
- rc = bnx2x_set_coherency_mask(bp);
- if (rc)
+ rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
goto err_out_release;
+ }
dev->mem_start = pci_resource_start(pdev, 0);
dev->base_addr = dev->mem_start;
@@ -13317,16 +13324,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
- if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
- (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
- (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
- (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_ENGINEERING_VERSION);
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@@ -13404,34 +13406,51 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
-static int bnx2x_init_firmware(struct bnx2x *bp)
+int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
+ const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
- if (CHIP_IS_E1(bp))
+ if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
- else {
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
+
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n",
- fw_file_name);
- goto request_firmware_exit;
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@@ -13487,7 +13506,7 @@ request_firmware_exit:
return rc;
}
-static void bnx2x_release_firmware(struct bnx2x *bp)
+void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@@ -14004,6 +14023,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
+ bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:
@@ -15356,11 +15376,6 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- BNX2X_ERR("config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 74a8931ce1d1..11d15cd03600 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
{
+ u16 abs_fid;
+
+ abs_fid = FW_VF_HANDLE(abs_vfid);
+
/* set the VF-PF association in the FW */
- storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
- storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+ storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
+ storm_memset_func_en(bp, abs_fid, 1);
+
+ /* Invalidate fp_hsi version for vfs */
+ if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
/* clear vf errors*/
bnx2x_vf_semi_clear_err(bp, abs_vfid);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c2cf5519787..2dac704dc346 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -586,7 +586,7 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 0b193edb73b8..2bb133ae61c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -849,7 +849,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
memcpy(old, new, sizeof(struct nig_stats));
- memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1]));
+ memcpy(&(estats->shared), &(pstats->mac_stx[1]),
sizeof(struct mac_stx));
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
@@ -1634,9 +1635,9 @@ void bnx2x_stats_init(struct bnx2x *bp)
REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
if (!CHIP_IS_E3(bp)) {
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt0), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt1), 2);
}
/* Prepare statistics ramrod data */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index d55e63692cf3..ae93c078707b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -36,10 +36,14 @@ struct nig_stats {
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
- u32 egress_mac_pkt0_lo;
- u32 egress_mac_pkt0_hi;
- u32 egress_mac_pkt1_lo;
- u32 egress_mac_pkt1_hi;
+ struct_group(egress_mac_pkt0,
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ );
+ struct_group(egress_mac_pkt1,
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+ );
};
enum bnx2x_stats_event {
@@ -83,6 +87,7 @@ struct bnx2x_eth_stats {
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
+ struct_group(shared,
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
@@ -159,6 +164,7 @@ struct bnx2x_eth_stats {
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
+ );
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c04ea83188e2..4f94136a011a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,6 +37,7 @@
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -740,13 +741,16 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
return page;
}
-static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
u8 *data;
struct pci_dev *pdev = bp->pdev;
- data = kmalloc(bp->rx_buf_size, gfp);
+ if (gfp == GFP_ATOMIC)
+ data = napi_alloc_frag(bp->rx_buf_size);
+ else
+ data = netdev_alloc_frag(bp->rx_buf_size);
if (!data)
return NULL;
@@ -755,7 +759,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
- kfree(data);
+ skb_free_frag(data);
data = NULL;
}
return data;
@@ -778,7 +782,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
if (!data)
return -ENOMEM;
@@ -1020,11 +1024,11 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
return NULL;
}
@@ -1612,7 +1616,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1623,13 +1627,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
@@ -2043,13 +2047,22 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
{
- switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
+ u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
+
+ switch (err_type) {
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
+ netdev_warn(bp->dev, "Pause Storm detected!\n");
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
+ netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
+ break;
default:
- netdev_err(bp->dev, "FW reported unknown error type\n");
+ netdev_err(bp->dev, "FW reported unknown error type %u\n",
+ err_type);
break;
}
}
@@ -2073,6 +2086,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
u32 data1 = le32_to_cpu(cmpl->event_data1);
u32 data2 = le32_to_cpu(cmpl->event_data2);
+ netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
+ event_id, data1, data2);
+
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
@@ -2419,7 +2435,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (event & BNXT_REDIRECT_EVENT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -2622,6 +2638,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_cp_ring_info *cpr_rx;
u32 raw_cons = cpr->cp_raw_cons;
struct bnxt *bp = bnapi->bp;
struct nqe_cn *nqcmp;
@@ -2649,7 +2666,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
cpr->cp_raw_cons);
- return work_done;
+ goto poll_done;
}
/* The valid test of the entry must be done first before
@@ -2675,6 +2692,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
cpr->cp_raw_cons = raw_cons;
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
+poll_done:
+ cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ struct dim_sample dim_sample = {};
+
+ dim_update_sample(cpr->event_ctr,
+ cpr_rx->rx_packets,
+ cpr_rx->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
return work_done;
}
@@ -2774,7 +2802,7 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
tpa_info->data = NULL;
- kfree(data);
+ skb_free_frag(data);
}
skip_rx_tpa_free:
@@ -2800,7 +2828,7 @@ skip_rx_tpa_free:
dma_unmap_single_attrs(&pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- kfree(data);
+ skb_free_frag(data);
}
}
@@ -3504,7 +3532,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
+ data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -6474,8 +6502,8 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u16 val, tmr, max, flags = hw_coal->flags;
u32 cmpl_params = coal_cap->cmpl_params;
- u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
@@ -6518,8 +6546,6 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
}
- if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
- flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
@@ -7668,19 +7694,6 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
BNXT_FW_HEALTH_WIN_MAP_OFF);
}
-bool bnxt_is_fw_healthy(struct bnxt *bp)
-{
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 fw_status;
-
- fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
- return false;
- }
-
- return true;
-}
-
static void bnxt_inv_fw_health_reg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8008,6 +8021,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bp->hwrm_cmd_max_timeout)
+ bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
+ else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
+ bp->hwrm_cmd_max_timeout / 1000);
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -8611,7 +8630,10 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
/* Filter for default vnic 0 */
rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
+ else
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
goto err_out;
}
vnic->uc_filter_count = 1;
@@ -9404,6 +9426,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
rc = hwrm_req_send(bp, req);
if (rc) {
hwrm_req_drop(bp, req);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
+ rc = 0;
+ }
return rc;
}
@@ -10802,12 +10828,21 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
- rc);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
+ else
+ netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
+ rc = 0;
+ } else {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ }
vnic->uc_filter_count = i;
return rc;
}
}
+ if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
skip_uc:
if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
@@ -11372,6 +11407,11 @@ static void bnxt_timer(struct timer_list *t)
}
}
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
@@ -11875,7 +11915,13 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
static void bnxt_init_dflt_coal(struct bnxt *bp)
{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
struct bnxt_coal *coal;
+ u16 flags = 0;
+
+ if (coal_cap->cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -11888,6 +11934,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
+ coal->flags = flags;
coal = &bp->tx_coal;
coal->coal_ticks = 28;
@@ -11895,6 +11942,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_ticks_irq = 2;
coal->coal_bufs_irq = 2;
coal->bufs_per_record = 1;
+ coal->flags = flags;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
@@ -12381,8 +12429,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bnxt_init_dflt_coal(bp);
-
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
@@ -13072,7 +13118,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (sh)
@@ -13081,7 +13127,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
/* Rings may have been trimmed, re-reserve the trimmed rings. */
if (bnxt_need_reserve_rings(bp)) {
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
}
@@ -13107,7 +13153,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ else
+ netdev_err(bp->dev, "Not enough rings available.\n");
goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
@@ -13395,13 +13444,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_ring_params(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
- rc = -ENOMEM;
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ } else {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ }
goto init_err_pci_clean;
}
bnxt_fw_init_one_p3(bp);
+ bnxt_init_dflt_coal(bp);
+
if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 4c9507d82fd0..440dfeb4948b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -847,6 +847,7 @@ struct bnxt_coal {
u16 idle_thresh;
u8 bufs_per_record;
u8 budget;
+ u16 flags;
};
struct bnxt_tpa_info {
@@ -1915,6 +1916,7 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_L2_FILTER_RETRY 10
#define BNXT_STATE_FW_ACTIVATE 11
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
@@ -1985,7 +1987,8 @@ struct bnxt {
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
- int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
@@ -2302,7 +2305,6 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
-bool bnxt_is_fw_healthy(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index d3cb2f21946d..c06789882036 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -32,7 +32,7 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
return -ENOMEM;
}
- hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
+ hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
@@ -125,7 +125,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
if (rc)
return rc;
- hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 951c4c569a9b..4da31b1b84f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8188d55722e4..003330e8cd58 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -31,9 +31,6 @@
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
-#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
-#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
-#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static u32 bnxt_get_msglevel(struct net_device *dev)
{
@@ -68,6 +65,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_rx = true;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -75,6 +75,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_tx = true;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
@@ -101,12 +104,22 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !(bp->coal_cap.cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
+ return -EOPNOTSUPP;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_rx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -114,6 +127,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_tx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
@@ -775,7 +793,9 @@ skip_tpa_stats:
}
static void bnxt_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
@@ -794,7 +814,9 @@ static void bnxt_get_ringparam(struct net_device *dev,
}
static int bnxt_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
@@ -2169,7 +2191,7 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
req->host_src_addr = cpu_to_le64(dma_handle);
}
- hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->dir_type = cpu_to_le16(dir_type);
req->dir_ordinal = cpu_to_le16(dir_ordinal);
req->dir_ext = cpu_to_le16(dir_ext);
@@ -2515,8 +2537,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
return rc;
}
- hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT);
- hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT);
+ hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
+ hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
hwrm_req_hold(bp, modify);
modify->host_src_addr = cpu_to_le64(dma_handle);
@@ -3917,7 +3939,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
- ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_CQE,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index bb7327b82d0b..566c9487ef55 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err)
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
default:
return -EIO;
}
@@ -416,6 +418,44 @@ hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
}
+static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNXT_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define hwrm_err(bp, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
+ netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
+ else \
+ netdev_err((bp)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
+{
+ if (req_type == HWRM_VER_GET)
+ return false;
+
+ if (!bp->fw_health || !bp->fw_health->status_reliable)
+ return false;
+
+ *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
+}
+
static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
@@ -427,8 +467,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
unsigned int i, timeout, tmo_count;
u32 *data = (u32 *)ctx->req;
u32 msg_len = ctx->req_len;
+ u32 req_type, sts;
int rc = -EBUSY;
- u32 req_type;
u16 len = 0;
u8 *valid;
@@ -436,8 +476,11 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
- if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
+ if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) {
+ netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
+ req_type);
goto exit;
+ }
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
@@ -490,13 +533,15 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ hwrm_req_dbg(bp, ctx->req);
+
if (!pci_is_enabled(bp->pdev)) {
rc = -ENODEV;
goto exit;
}
/* Limit timeout to an upper limit */
- timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
+ timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -523,17 +568,19 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- break;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
+ req_type, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(ctx->req->req_type));
+ hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
+ req_type);
goto exit;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
@@ -565,7 +612,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (resp_seq != seen_out_of_seq) {
netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
le16_to_cpu(resp_seq),
- le16_to_cpu(ctx->req->req_type),
+ req_type,
le16_to_cpu(ctx->req->seq_id));
seen_out_of_seq = resp_seq;
}
@@ -576,20 +623,22 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- goto timeout_abort;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
+ req_type,
+ le16_to_cpu(ctx->req->seq_id),
+ len, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (i >= tmo_count) {
-timeout_abort:
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
goto exit;
}
@@ -604,12 +653,9 @@ timeout_abort:
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len,
- *valid);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
}
@@ -620,11 +666,12 @@ timeout_abort:
*/
*valid = 0;
rc = le16_to_cpu(ctx->resp->error_code);
- if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
- netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- le16_to_cpu(ctx->resp->req_type),
- le16_to_cpu(ctx->resp->seq_id), rc);
- }
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
+ netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ req_type, token->seq_id, rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 4d17f0d5363b..d52bd2d63aec 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -58,11 +58,10 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000
+#define HWRM_CMD_MAX_TIMEOUT 40000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
-#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
#define BNXT_HWRM_TARGET 0xffff
#define BNXT_HWRM_NO_CMPL_RING -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
@@ -83,10 +82,6 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
-#define HWRM_WAIT_MUST_ABORT(bp, ctx) \
- (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \
- !bnxt_is_fw_healthy(bp))
-
static inline unsigned int hwrm_total_timeout(unsigned int n)
{
return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT :
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 8388be119f9a..48520967746f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -417,9 +417,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 1471b6130a2b..d8afcf8d6b30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1962,7 +1962,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
- if (!bnxt_is_netdev_indr_offload(netdev))
+ if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c8083df5e0ab..52fad0fdeacf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -195,7 +195,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_REDIRECT_EVENT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 226f4403cfed..87f1056e29ff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4020,10 +4020,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
- err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
- dev->name, priv);
- if (!err)
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (priv->wol_irq > 0) {
+ err = devm_request_irq(&pdev->dev, priv->wol_irq,
+ bcmgenet_wol_isr, 0, dev->name, priv);
+ if (!err)
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
/* Set the needed headroom to account for any possible
* features enabling/disabling at runtime
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5f259641437a..c888ddee1fc4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
* Internal or external PHY with MDIO access
*/
phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (!phydev) {
+ if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phydev);
}
} else {
/*
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..a1a38456c9a3 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 85ca3909859d..c28f8cc00d1c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12390,7 +12390,10 @@ static int tg3_nway_reset(struct net_device *dev)
return r;
}
-static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void tg3_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
@@ -12411,7 +12414,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->tx_pending = tp->napi[0].tx_pending;
}
-static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int tg3_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
@@ -13800,9 +13806,6 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index bbdc829c3524..f1d2c4cd5da2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3421,7 +3421,7 @@ static const struct net_device_ops bnad_netdev_ops = {
};
static void
-bnad_netdev_init(struct bnad *bnad, bool using_dac)
+bnad_netdev_init(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
@@ -3434,10 +3434,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if (using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
@@ -3544,8 +3542,7 @@ bnad_lock_uninit(struct bnad *bnad)
/* PCI Initialization */
static int
-bnad_pci_init(struct bnad *bnad,
- struct pci_dev *pdev, bool *using_dac)
+bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
{
int err;
@@ -3555,14 +3552,9 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = true;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- *using_dac = false;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ goto release_regions;
pci_set_master(pdev);
return 0;
@@ -3585,7 +3577,6 @@ static int
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3615,13 +3606,8 @@ bnad_pci_probe(struct pci_dev *pdev,
bnad->id = atomic_inc_return(&bna_id) - 1;
mutex_lock(&bnad->conf_mutex);
- /*
- * PCI initialization
- * Output : using_dac = 1 for 64 bit DMA
- * = 0 for 32 bit DMA
- */
- using_dac = false;
- err = bnad_pci_init(bnad, pdev, &using_dac);
+ /* PCI initialization */
+ err = bnad_pci_init(bnad, pdev);
if (err)
goto unlock_mutex;
@@ -3634,7 +3620,7 @@ bnad_pci_probe(struct pci_dev *pdev,
goto pci_uninit;
/* Initialize netdev structure, set up ethtool ops */
- bnad_netdev_init(bnad, using_dac);
+ bnad_netdev_init(bnad);
/* Set link to down state */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 391b85f25141..8aca768571b2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -235,13 +235,18 @@ static int
bnad_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
@@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -405,7 +405,9 @@ static int bnad_set_coalesce(struct net_device *netdev,
static void
bnad_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -418,7 +420,9 @@ bnad_get_ringparam(struct net_device *netdev,
static int
bnad_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
int i, current_err, err = 0;
struct bnad *bnad = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5620b97b3482..9ddbee7de72b 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1271,7 +1271,8 @@ struct macb {
struct mii_bus *mii_bus;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs phylink_usx_pcs;
+ struct phylink_pcs phylink_sgmii_pcs;
u32 caps;
unsigned int dma_burst_length;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ffce528aa00e..a363da928e8b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -506,79 +506,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
-static void macb_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct macb *bp = netdev_priv(ndev);
-
- /* We only support MII, RMII, GMII, RGMII & SGMII. */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_10GBASER &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- if (!macb_is_gem(bp) &&
- (state->interface == PHY_INTERFACE_MODE_GMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- linkmode_zero(supported);
- return;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
- !(bp->caps & MACB_CAPS_HIGH_SPEED &&
- bp->caps & MACB_CAPS_PCS)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- goto out;
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_GMII ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
- phylink_set(mask, 1000baseT_Half);
- }
-out:
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed,
int duplex)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 config;
config = gem_readl(bp, USX_CONTROL);
@@ -592,7 +524,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 val;
state->speed = SPEED_10000;
@@ -612,7 +544,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
GEM_BIT(SIGNAL_OK));
@@ -795,28 +727,23 @@ static void macb_mac_link_up(struct phylink_config *config,
netif_tx_wake_all_queues(ndev);
}
-static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
if (interface == PHY_INTERFACE_MODE_10GBASER)
- bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
+ return &bp->phylink_usx_pcs;
else if (interface == PHY_INTERFACE_MODE_SGMII)
- bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
+ return &bp->phylink_sgmii_pcs;
else
- bp->phylink_pcs.ops = NULL;
-
- if (bp->phylink_pcs.ops)
- phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
-
- return 0;
+ return NULL;
}
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = macb_validate,
- .mac_prepare = macb_mac_prepare,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = macb_mac_select_pcs,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -874,6 +801,9 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
+ bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
+
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -882,6 +812,35 @@ static int macb_mii_probe(struct net_device *dev)
bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
}
+ bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ bp->phylink_config.supported_interfaces);
+
+ /* Determine what modes are supported */
+ if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
+ bp->phylink_config.mac_capabilities |= MAC_1000FD;
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ bp->phylink_config.mac_capabilities |= MAC_1000HD;
+
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_PCS)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_HIGH_SPEED) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ bp->phylink_config.supported_interfaces);
+ bp->phylink_config.mac_capabilities |= MAC_10000FD;
+ }
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -3101,7 +3060,9 @@ static int macb_set_link_ksettings(struct net_device *netdev,
}
static void macb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
@@ -3113,7 +3074,9 @@ static void macb_get_ringparam(struct net_device *netdev,
}
static int macb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 095c5a2144a7..fb6b27f46b15 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -464,10 +464,6 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(*tstamp_config)))
return -EFAULT;
- /* reserved for future extensions */
- if (tstamp_config->flags)
- return -EINVAL;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 2b9747867d4c..2c10ae3f7fc1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -947,7 +947,9 @@ static int lio_set_phys_id(struct net_device *netdev,
static void
lio_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -1252,8 +1254,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
return 0;
}
-static int lio_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
u32 rx_count, tx_count, rx_count_old, tx_count_old;
struct lio *lio = GET_LIO(netdev);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 12eee2bc7f5c..ba28aa444e5a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2114,9 +2114,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c607756b731f..568f211d91cc 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1254,9 +1254,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4e39d712e121..103591dcea1c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -548,7 +548,7 @@ struct octeon_mgmt_cam_state {
};
static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
- unsigned char *addr)
+ const unsigned char *addr)
{
int i;
@@ -702,9 +702,6 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Check the status of hardware for tiemstamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 7f2882109b16..5a9fad61e9ea 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -467,7 +467,9 @@ static int nicvf_get_coalesce(struct net_device *netdev,
}
static void nicvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -479,7 +481,9 @@ static void nicvf_get_ringparam(struct net_device *netdev,
}
static int nicvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bb45d5df2856..a04aa206fddc 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -590,7 +590,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
default:
- bpf_warn_invalid_xdp_action(action);
+ bpf_warn_invalid_xdp_action(nic->netdev, prog, action);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
@@ -1917,10 +1917,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 50bbe79fb93d..4367edbdd579 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <uapi/linux/bpf.h>
#include "nic_reg.h"
#include "nic.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 609820e214a3..f4054d2553ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -710,7 +710,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -724,7 +726,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = adapter->params.sge.cmdQ_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -940,11 +944,11 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
+ int i, err;
err = pci_enable_device(pdev);
if (err)
@@ -957,17 +961,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
-
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
- pci_name(pdev));
- err = -ENODEV;
- goto out_disable_pdev;
- }
-
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
@@ -1039,10 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX;
+ NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
netdev->features |=
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index cda01f22c71c..12e76fd0ae91 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1359,7 +1359,7 @@ static void restart_sched(struct tasklet_struct *t)
* @fl: the free list that contains the packet buffer
* @len: the packet length
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
*/
static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index bfffcaeee624..63521312cb90 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1948,7 +1948,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1964,7 +1966,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = q->txq_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3200,7 +3204,7 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
+ int i, err;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
@@ -3227,9 +3231,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_release_regions;
}
@@ -3305,8 +3308,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features & VLAN_FEAT;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
netdev->ethtool_ops = &cxgb_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index c3afec1041f8..62dfbdd33365 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -126,8 +126,10 @@ struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
- u8 imm_data[47];
- u8 intr_gen;
+ struct_group(immediate,
+ u8 imm_data[47];
+ u8 intr_gen;
+ );
};
/*
@@ -925,7 +927,8 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
+ skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
}
return skb;
}
@@ -1953,7 +1956,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
* @rx_gather: a gather list of packets if we are building a bundle
* @gather_idx: index of the next available slot in the bundle
*
- * Process an ingress offload pakcet and add it to the offload ingress
+ * Process an ingress offload packet and add it to the offload ingress
* queue. Returns the index of the next available slot in the bundle.
*/
static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
@@ -2079,7 +2082,7 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
* @pad: padding
* @lro: large receive offload
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
* if it was immediate data in a response.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 129352bbe114..6c790af92170 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -890,7 +890,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -906,7 +908,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
int i;
const struct port_info *pi = netdev_priv(dev);
@@ -1989,6 +1993,15 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
+{
+ /* Read port module EEPROM as long as it is plugged-in and
+ * safe to read.
+ */
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
static int cxgb4_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -1997,7 +2010,7 @@ static int cxgb4_get_module_info(struct net_device *dev,
struct adapter *adapter = pi->adapter;
int ret;
- if (!t4_is_inserted_mod_type(pi->mod_type))
+ if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
return -EINVAL;
switch (pi->port_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dde1cf51d0ab..0c78c0db8937 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6608,7 +6608,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int adap_idx = 1;
int s_qpp, qpp, num_seg;
struct port_info *pi;
- bool highdma = false;
enum chip_type chip;
void __iomem *regs;
int func, chip_ver;
@@ -6687,14 +6686,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- highdma = true;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_free_adapter;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_free_adapter;
}
pci_enable_pcie_error_reporting(pdev);
@@ -6823,7 +6818,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_TC | NETIF_F_NTUPLE;
+ NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;
if (chip_ver > CHELSIO_T5) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
@@ -6841,8 +6836,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
}
- if (highdma)
- netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fa5b596ff23a..f889f404305c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1842,8 +1842,10 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ BUILD_BUG_ON(sizeof(wr->firmware) !=
+ (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci)));
+ fw_hdr_copy_len = sizeof(wr->firmware);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
if (ret)
goto out_free;
@@ -1924,7 +1926,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/* If this is a Large Send Offload packet we'll put in an LSO CPL
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fed5f93bf620..26433a62d7f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd;
+ __be32 rsvd[3];
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0a326c054707..2419459a0b85 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -794,10 +794,12 @@ struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be32 r3[2];
- u8 ethmacdst[6];
- u8 ethmacsrc[6];
- __be16 ethtype;
- __be16 vlantci;
+ struct_group(firmware,
+ u8 ethmacdst[ETH_ALEN];
+ u8 ethmacsrc[ETH_ALEN];
+ __be16 ethtype;
+ __be16 vlantci;
+ );
};
#define FW_CMD_MAX_TIMEOUT 10000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ae9cca768d74..7de3800437c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1591,7 +1591,9 @@ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
* first Queue Set.
*/
static void cxgb4vf_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -1614,7 +1616,9 @@ static void cxgb4vf_get_ringparam(struct net_device *dev,
* device -- after vetting them of course!
*/
static int cxgb4vf_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2895,7 +2899,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct port_info *pi;
unsigned int pmask;
- int pci_using_dac;
int err, pidx;
/*
@@ -2916,19 +2919,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
/*
- * Set up our DMA mask: try for 64-bit address masking first and
- * fall back to 32-bit if we can't get 64 bits ...
+ * Set up our DMA mask
*/
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err == 0) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err != 0) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto err_release_regions;
- }
- pci_using_dac = 0;
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_release_regions;
}
/*
@@ -3074,9 +3070,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0295b2406646..43b2ceb6aa32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1167,10 +1167,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
- const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci));
+ const size_t fw_hdr_copy_len = sizeof(wr->firmware);
/*
* The chip minimum packet length is 10 octets but the firmware
@@ -1267,7 +1264,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/*
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index d04a6c163445..da8d10475a08 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/ip6_route.h>
@@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi,
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
- tos, 0);
+ tos & ~INET_ECN_MASK, 0);
if (IS_ERR(rt))
return NULL;
n = dst_neigh_lookup(&rt->dst, &peer_ip);
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 84251b85fc93..21a70b1f0ac5 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -242,12 +242,15 @@ static int mac89x0_device_probe(struct platform_device *pdev)
pr_info("No EEPROM, giving up now.\n");
goto out1;
} else {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < ETH_ALEN; i += 2) {
/* Big-endian (why??!) */
unsigned short s = readreg(dev, PP_IA + i);
- dev->dev_addr[i] = s >> 8;
- dev->dev_addr[i+1] = s & 0xff;
+ addr[i] = s >> 8;
+ addr[i+1] = s & 0xff;
}
+ eth_hw_addr_set(dev, addr);
}
dev->irq = SLOT2IRQ(slot);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index c67a16a48d62..52aaf1bb5205 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -304,7 +304,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
- if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+ if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) {
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
enic->netdev->name);
enic->gen_stats.dma_map_error++;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6ded4d9fa32a..6c11f9d62526 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -177,7 +177,9 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
}
static void enic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
@@ -189,7 +191,9 @@ static void enic_get_ringparam(struct net_device *netdev,
}
static int enic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index aacf141986d5..1c81b161de52 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
- err = irq_set_affinity_hint(enic->msix_entry[i].vector,
- enic->msix[i].affinity_mask);
+ err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
if (err)
- netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err);
}
@@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i;
for (i = 0; i < enic->intr_count; i++)
- irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+ irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
}
static int enic_udp_tunnel_set_port(struct net_device *netdev,
@@ -2718,26 +2718,14 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 32);
- goto err_out_release_regions;
- }
} else {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 47);
- goto err_out_release_regions;
- }
using_dac = 1;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 941f175fb911..c78b99a497df 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -305,21 +305,21 @@ static void gmac_speed_set(struct net_device *netdev)
switch (phydev->speed) {
case 1000:
status.bits.speed = GMAC_SPEED_1000;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
phydev_name(phydev));
@@ -389,6 +389,9 @@ static int gmac_setup_phy(struct net_device *netdev)
status.bits.mii_rmii = GMAC_PHY_GMII;
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
netdev_dbg(netdev,
"RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
@@ -2105,7 +2108,9 @@ static void gmac_get_pauseparam(struct net_device *netdev,
}
static void gmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -2123,7 +2128,9 @@ static void gmac_get_ringparam(struct net_device *netdev,
}
static int gmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err = 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9955308b93d..dfa784339781 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -683,7 +683,9 @@ static int be_get_link_ksettings(struct net_device *netdev,
}
static void be_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d51f24c9e1b8..d0c262f2695a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3491,7 +3491,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status)
goto err_msix;
- irq_set_affinity_hint(vec, eqo->affinity_mask);
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -3552,7 +3552,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */
for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo);
- irq_set_affinity_hint(vec, NULL);
+ irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo);
}
@@ -5194,7 +5194,8 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -5840,14 +5841,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
SET_NETDEV_DEV(netdev, &pdev->dev);
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!status) {
- netdev->features |= NETIF_F_HIGHDMA;
- } else {
- status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (status) {
- dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
- goto free_netdev;
- }
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
}
status = pci_enable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
new file mode 100644
index 000000000000..f4e2b1102d8f
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Engleder network device configuration
+#
+
+config NET_VENDOR_ENGLEDER
+ bool "Engleder devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Engleder devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ENGLEDER
+
+config TSNEP
+ tristate "TSN endpoint support"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ help
+ Support for the Engleder TSN endpoint Ethernet MAC IP Core.
+
+ To compile this driver as a module, choose M here. The module will be
+ called tsnep.
+
+config TSNEP_SELFTESTS
+ bool "TSN endpoint self test support"
+ default n
+ depends on TSNEP
+ help
+ This enables self test support within the TSN endpoint driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_ENGLEDER
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
new file mode 100644
index 000000000000..cce2191cb889
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Engleder Ethernet drivers
+#
+
+obj-$(CONFIG_TSNEP) += tsnep.o
+
+tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
+ $(tsnep-y)
+tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
new file mode 100644
index 000000000000..23bbece6b7de
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#ifndef _TSNEP_H
+#define _TSNEP_H
+
+#include "tsnep_hw.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/miscdevice.h>
+
+#define TSNEP "tsnep"
+
+#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
+#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
+
+#define TSNEP_QUEUES 1
+
+struct tsnep_gcl {
+ void __iomem *addr;
+
+ u64 base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+
+ struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT];
+ int count;
+
+ u64 change_limit;
+
+ u64 start_time;
+ bool change;
+};
+
+struct tsnep_tx_entry {
+ struct tsnep_tx_desc *desc;
+ struct tsnep_tx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+ bool owner_user_flag;
+
+ u32 properties;
+
+ struct sk_buff *skb;
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_tx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ /* TX ring lock */
+ spinlock_t lock;
+ struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+};
+
+struct tsnep_rx_entry {
+ struct tsnep_rx_desc *desc;
+ struct tsnep_rx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+
+ u32 properties;
+
+ struct sk_buff *skb;
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_rx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+ u32 multicast;
+};
+
+struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+
+ struct napi_struct napi;
+
+ u32 irq_mask;
+};
+
+struct tsnep_adapter {
+ struct net_device *netdev;
+ u8 mac_address[ETH_ALEN];
+ struct mii_bus *mdiobus;
+ bool suppress_preamble;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ int msg_enable;
+
+ struct platform_device *pdev;
+ struct device *dmadev;
+ void __iomem *addr;
+ int irq;
+
+ bool gate_control;
+ /* gate control lock */
+ struct mutex gate_control_lock;
+ bool gate_control_active;
+ struct tsnep_gcl gcl[2];
+ int next_gcl;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* ptp clock lock */
+ spinlock_t ptp_lock;
+
+ int num_tx_queues;
+ struct tsnep_tx tx[TSNEP_MAX_QUEUES];
+ int num_rx_queues;
+ struct tsnep_rx rx[TSNEP_MAX_QUEUES];
+
+ int num_queues;
+ struct tsnep_queue queue[TSNEP_MAX_QUEUES];
+};
+
+extern const struct ethtool_ops tsnep_ethtool_ops;
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter);
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+int tsnep_tc_init(struct tsnep_adapter *adapter);
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+
+#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
+int tsnep_ethtool_get_test_count(void);
+void tsnep_ethtool_get_test_strings(u8 *data);
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data);
+#else
+static inline int tsnep_ethtool_get_test_count(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ /* not enabled */
+}
+
+static inline void tsnep_ethtool_self_test(struct net_device *dev,
+ struct ethtool_test *eth_test,
+ u64 *data)
+{
+ /* not enabled */
+}
+#endif /* CONFIG_TSNEP_SELFTESTS */
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+
+#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
new file mode 100644
index 000000000000..e6760dc68ddd
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_dropped",
+ "rx_multicast",
+ "rx_phy_errors",
+ "rx_forwarded_phy_errors",
+ "rx_invalid_frame_errors",
+ "tx_packets",
+ "tx_bytes",
+ "tx_dropped",
+};
+
+struct tsnep_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_phy_errors;
+ u64 rx_forwarded_phy_errors;
+ u64 rx_invalid_frame_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
+
+static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_%d_packets",
+ "rx_%d_bytes",
+ "rx_%d_dropped",
+ "rx_%d_multicast",
+ "rx_%d_no_descriptor_errors",
+ "rx_%d_buffer_too_small_errors",
+ "rx_%d_fifo_overflow_errors",
+ "rx_%d_invalid_frame_errors",
+};
+
+struct tsnep_rx_queue_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_no_descriptor_errors;
+ u64 rx_buffer_too_small_errors;
+ u64 rx_fifo_overflow_errors;
+ u64 rx_invalid_frame_errors;
+};
+
+#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
+ sizeof(u64))
+
+static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "tx_%d_packets",
+ "tx_%d_bytes",
+ "tx_%d_dropped",
+};
+
+struct tsnep_tx_queue_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
+ sizeof(u64))
+
+static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int len;
+ int num_additional_queues;
+
+ len = TSNEP_MAC_SIZE;
+
+ /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
+ * the first queue pair extend the register length by TSNEP_QUEUE_SIZE
+ */
+ num_additional_queues =
+ max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
+ len += TSNEP_QUEUE_SIZE * num_additional_queues;
+
+ return len;
+}
+
+static void tsnep_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ regs->version = 1;
+
+ memcpy_fromio(p, adapter->addr, regs->len);
+}
+
+static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
+ data += sizeof(tsnep_stats_strings);
+
+ for (i = 0; i < rx_count; i++) {
+ for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_rx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_tx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ tsnep_ethtool_get_test_strings(data);
+ break;
+ }
+}
+
+static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ struct tsnep_stats tsnep_stats;
+ struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
+ struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
+ u32 reg;
+ int i;
+
+ memset(&tsnep_stats, 0, sizeof(tsnep_stats));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ tsnep_stats.rx_packets += adapter->rx[i].packets;
+ tsnep_stats.rx_bytes += adapter->rx[i].bytes;
+ tsnep_stats.rx_dropped += adapter->rx[i].dropped;
+ tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ }
+ reg = ioread32(adapter->addr + ECM_STAT);
+ tsnep_stats.rx_phy_errors =
+ (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ tsnep_stats.rx_forwarded_phy_errors =
+ (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ tsnep_stats.rx_invalid_frame_errors =
+ (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tsnep_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_stats.tx_dropped += adapter->tx[i].dropped;
+ }
+ memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
+ data += TSNEP_STATS_COUNT;
+
+ for (i = 0; i < rx_count; i++) {
+ memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
+ tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
+ tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
+ tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
+ tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ tsnep_rx_queue_stats.rx_no_descriptor_errors =
+ (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ tsnep_rx_queue_stats.rx_buffer_too_small_errors =
+ (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ tsnep_rx_queue_stats.rx_fifo_overflow_errors =
+ (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ tsnep_rx_queue_stats.rx_invalid_frame_errors =
+ (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ memcpy(data, &tsnep_rx_queue_stats,
+ sizeof(tsnep_rx_queue_stats));
+ data += TSNEP_RX_QUEUE_STATS_COUNT;
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
+ tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
+ memcpy(data, &tsnep_tx_queue_stats,
+ sizeof(tsnep_tx_queue_stats));
+ data += TSNEP_TX_QUEUE_STATS_COUNT;
+ }
+}
+
+static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count;
+ int tx_count;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ rx_count = adapter->num_rx_queues;
+ tx_count = adapter->num_tx_queues;
+ return TSNEP_STATS_COUNT +
+ TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
+ TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
+ case ETH_SS_TEST:
+ return tsnep_ethtool_get_test_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+const struct ethtool_ops tsnep_ethtool_ops = {
+ .get_drvinfo = tsnep_ethtool_get_drvinfo,
+ .get_regs_len = tsnep_ethtool_get_regs_len,
+ .get_regs = tsnep_ethtool_get_regs,
+ .get_msglevel = tsnep_ethtool_get_msglevel,
+ .set_msglevel = tsnep_ethtool_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .self_test = tsnep_ethtool_self_test,
+ .get_strings = tsnep_ethtool_get_strings,
+ .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
+ .get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
new file mode 100644
index 000000000000..71cc8577d640
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* Hardware definition of TSNEP and EtherCAT MAC device */
+
+#ifndef _TSNEP_HW_H
+#define _TSNEP_HW_H
+
+#include <linux/types.h>
+
+/* type */
+#define ECM_TYPE 0x0000
+#define ECM_REVISION_MASK 0x000000FF
+#define ECM_REVISION_SHIFT 0
+#define ECM_VERSION_MASK 0x0000FF00
+#define ECM_VERSION_SHIFT 8
+#define ECM_QUEUE_COUNT_MASK 0x00070000
+#define ECM_QUEUE_COUNT_SHIFT 16
+#define ECM_GATE_CONTROL 0x02000000
+
+/* system time */
+#define ECM_SYSTEM_TIME_LOW 0x0008
+#define ECM_SYSTEM_TIME_HIGH 0x000C
+
+/* clock */
+#define ECM_CLOCK_RATE 0x0010
+#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF
+#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000
+
+/* interrupt */
+#define ECM_INT_ENABLE 0x0018
+#define ECM_INT_ACTIVE 0x001C
+#define ECM_INT_ACKNOWLEDGE 0x001C
+#define ECM_INT_LINK 0x00000020
+#define ECM_INT_TX_0 0x00000100
+#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_ALL 0x7FFFFFFF
+#define ECM_INT_DISABLE 0x80000000
+
+/* reset */
+#define ECM_RESET 0x0020
+#define ECM_RESET_COMMON 0x00000001
+#define ECM_RESET_CHANNEL 0x00000100
+#define ECM_RESET_TXRX 0x00010000
+
+/* control and status */
+#define ECM_STATUS 0x0080
+#define ECM_LINK_MODE_OFF 0x01000000
+#define ECM_LINK_MODE_100 0x02000000
+#define ECM_LINK_MODE_1000 0x04000000
+#define ECM_NO_LINK 0x01000000
+#define ECM_LINK_MODE_MASK 0x06000000
+
+/* management data */
+#define ECM_MD_CONTROL 0x0084
+#define ECM_MD_STATUS 0x0084
+#define ECM_MD_PREAMBLE 0x00000001
+#define ECM_MD_READ 0x00000004
+#define ECM_MD_WRITE 0x00000002
+#define ECM_MD_ADDR_MASK 0x000000F8
+#define ECM_MD_ADDR_SHIFT 3
+#define ECM_MD_PHY_ADDR_MASK 0x00001F00
+#define ECM_MD_PHY_ADDR_SHIFT 8
+#define ECM_MD_BUSY 0x00000001
+#define ECM_MD_DATA_MASK 0xFFFF0000
+#define ECM_MD_DATA_SHIFT 16
+
+/* statistic */
+#define ECM_STAT 0x00B0
+#define ECM_STAT_RX_ERR_MASK 0x000000FF
+#define ECM_STAT_RX_ERR_SHIFT 0
+#define ECM_STAT_INV_FRM_MASK 0x0000FF00
+#define ECM_STAT_INV_FRM_SHIFT 8
+#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000
+#define ECM_STAT_FWD_RX_ERR_SHIFT 16
+
+/* tsnep */
+#define TSNEP_MAC_SIZE 0x4000
+#define TSNEP_QUEUE_SIZE 0x1000
+#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \
+ (__n) == 0 ? \
+ 0 : \
+ TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); })
+#define TSNEP_MAX_QUEUES 8
+#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_OFFSET 128
+
+/* tsnep register */
+#define TSNEP_INFO 0x0100
+#define TSNEP_INFO_RX_ASSIGN 0x00010000
+#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_CONTROL 0x0108
+#define TSNEP_CONTROL_TX_RESET 0x00000001
+#define TSNEP_CONTROL_TX_ENABLE 0x00000002
+#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010
+#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020
+#define TSNEP_CONTROL_RX_RESET 0x00000100
+#define TSNEP_CONTROL_RX_ENABLE 0x00000200
+#define TSNEP_CONTROL_RX_DISABLE 0x00000400
+#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000
+#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000
+#define TSNEP_TX_DESC_ADDR_LOW 0x0140
+#define TSNEP_TX_DESC_ADDR_HIGH 0x0144
+#define TSNEP_RX_DESC_ADDR_LOW 0x0180
+#define TSNEP_RX_DESC_ADDR_HIGH 0x0184
+#define TSNEP_RESET_OWNER_COUNTER 0x01
+#define TSNEP_RX_STATISTIC 0x0190
+#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF
+#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24
+#define TSNEP_RX_STATISTIC_NO_DESC 0x0190
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
+#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
+#define TSNEP_RX_ASSIGN 0x01A0
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
+#define TSNEP_MAC_ADDRESS_LOW 0x0800
+#define TSNEP_MAC_ADDRESS_HIGH 0x0804
+#define TSNEP_RX_FILTER 0x0806
+#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001
+#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002
+#define TSNEP_GC 0x0808
+#define TSNEP_GC_ENABLE_A 0x00000002
+#define TSNEP_GC_ENABLE_B 0x00000004
+#define TSNEP_GC_DISABLE 0x00000008
+#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010
+#define TSNEP_GC_ACTIVE_A 0x00000002
+#define TSNEP_GC_ACTIVE_B 0x00000004
+#define TSNEP_GC_CHANGE_AB 0x00000008
+#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010
+#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020
+#define TSNEP_GC_LIST_ERROR 0x00000080
+#define TSNEP_GC_OPEN 0x00FF0000
+#define TSNEP_GC_OPEN_SHIFT 16
+#define TSNEP_GC_NEXT_OPEN 0xFF000000
+#define TSNEP_GC_NEXT_OPEN_SHIFT 24
+#define TSNEP_GC_TIMEOUT 131072
+#define TSNEP_GC_TIME 0x080C
+#define TSNEP_GC_CHANGE 0x0810
+#define TSNEP_GCL_A 0x2000
+#define TSNEP_GCL_B 0x2800
+#define TSNEP_GCL_SIZE SZ_2K
+
+/* tsnep gate control list operation */
+struct tsnep_gcl_operation {
+ u32 properties;
+ u32 interval;
+};
+
+#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation))
+#define TSNEP_GCL_MASK 0x000000FF
+#define TSNEP_GCL_INSERT 0x20000000
+#define TSNEP_GCL_CHANGE 0x40000000
+#define TSNEP_GCL_LAST 0x80000000
+#define TSNEP_GCL_MIN_INTERVAL 32
+
+/* tsnep TX/RX descriptor */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_SIZE_DATA_AFTER 2048
+#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
+#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
+#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
+#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000
+#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000
+#define TSNEP_DESC_NO_LINK_FLAG 0x01000000
+
+/* tsnep TX descriptor */
+struct tsnep_tx_desc {
+ __le32 properties;
+ __le32 more_properties;
+ __le32 reserved[2];
+ __le64 next;
+ __le64 tx;
+};
+
+#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000
+#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000
+#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000
+#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000
+
+/* tsnep TX descriptor writeback */
+struct tsnep_tx_desc_wb {
+ __le32 properties;
+ __le32 reserved1[3];
+ __le64 timestamp;
+ __le32 dma_delay;
+ __le32 reserved2;
+};
+
+#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18
+#define TSNEP_TX_DESC_DMA_DELAY_NS 64
+
+/* tsnep RX descriptor */
+struct tsnep_rx_desc {
+ __le32 properties;
+ __le32 reserved[3];
+ __le64 next;
+ __le64 rx;
+};
+
+#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC
+
+/* tsnep RX descriptor writeback */
+struct tsnep_rx_desc_wb {
+ __le32 properties;
+ __le32 reserved[7];
+};
+
+/* tsnep RX inline meta */
+struct tsnep_rx_inline {
+ __le64 reserved;
+ __le64 timestamp;
+};
+
+#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline))
+
+#endif /* _TSNEP_HW_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
new file mode 100644
index 000000000000..904f3304727e
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -0,0 +1,1272 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* TSN endpoint Ethernet MAC driver
+ *
+ * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
+ * communication. It is designed for endpoints within TSN (Time Sensitive
+ * Networking) networks; e.g., for PLCs in the industrial automation case.
+ *
+ * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
+ * by the driver.
+ *
+ * More information can be found here:
+ * - www.embedded-experts.at/tsn
+ * - www.engleder-embedded.com
+ */
+
+#include "tsnep.h"
+#include "tsnep_hw.h"
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+
+#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
+ TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
+#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
+#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+
+static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ mask |= ECM_INT_DISABLE;
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static irqreturn_t tsnep_irq(int irq, void *arg)
+{
+ struct tsnep_adapter *adapter = arg;
+ u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
+
+ /* acknowledge interrupt */
+ if (active != 0)
+ iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
+
+ /* handle link interrupt */
+ if ((active & ECM_INT_LINK) != 0) {
+ if (adapter->netdev->phydev)
+ phy_mac_interrupt(adapter->netdev->phydev);
+ }
+
+ /* handle TX/RX queue 0 interrupt */
+ if ((active & adapter->queue[0].irq_mask) != 0) {
+ if (adapter->netdev) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ md = ECM_MD_READ;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
+}
+
+static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ md = ECM_MD_WRITE;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static void tsnep_phy_link_status_change(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u32 mode;
+
+ if (phydev->link) {
+ switch (phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+ }
+
+ phy_print_status(netdev->phydev);
+}
+
+static int tsnep_phy_open(struct tsnep_adapter *adapter)
+{
+ struct phy_device *phydev;
+ struct ethtool_eee ethtool_eee;
+ int retval;
+
+ retval = phy_connect_direct(adapter->netdev, adapter->phydev,
+ tsnep_phy_link_status_change,
+ adapter->phy_mode);
+ if (retval)
+ return retval;
+ phydev = adapter->netdev->phydev;
+
+ /* MAC supports only 100Mbps|1000Mbps full duplex
+ * SPE (Single Pair Ethernet) is also an option but not implemented yet
+ */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* disable EEE autoneg, EEE not supported by TSNEP */
+ memset(&ethtool_eee, 0, sizeof(ethtool_eee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+
+ adapter->phydev->irq = PHY_MAC_INTERRUPT;
+ phy_start(adapter->phydev);
+
+ return 0;
+}
+
+static void tsnep_phy_close(struct tsnep_adapter *adapter)
+{
+ phy_stop(adapter->netdev->phydev);
+ phy_disconnect(adapter->netdev->phydev);
+ adapter->netdev->phydev = NULL;
+}
+
+static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ int i;
+
+ memset(tx->entry, 0, sizeof(tx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (tx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
+ tx->page_dma[i]);
+ tx->page[i] = NULL;
+ tx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_tx_ring_init(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct tsnep_tx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ tx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
+ GFP_KERNEL);
+ if (!tx->page[i]) {
+ retval = -ENOMEM;
+ goto alloc_failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_tx_desc_wb *)
+ (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_tx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &tx->entry[i];
+ next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_tx_ring_cleanup(tx);
+ return retval;
+}
+
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+{
+ struct tsnep_tx_entry *entry = &tx->entry[index];
+
+ entry->properties = 0;
+ if (entry->skb) {
+ entry->properties =
+ skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+
+ /* toggle user flag to prevent false acknowledge
+ *
+ * Only the first fragment is acknowledged. For all other
+ * fragments no acknowledge is done and the last written owner
+ * counter stays in the writeback descriptor. Therefore, it is
+ * possible that the last written owner counter is identical to
+ * the new incremented owner counter and a false acknowledge is
+ * detected before the real acknowledge has been done by
+ * hardware.
+ *
+ * The user flag is used to prevent this situation. The user
+ * flag is copied to the writeback descriptor by the hardware
+ * and is used as additional acknowledge data. By toggeling the
+ * user flag only for the first fragment (which is
+ * acknowledged), it is guaranteed that the last acknowledge
+ * done for this descriptor has used a different user flag and
+ * cannot be detected as false acknowledge.
+ */
+ entry->owner_user_flag = !entry->owner_user_flag;
+ }
+ if (last)
+ entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
+ if (index == tx->increment_owner_counter) {
+ tx->owner_counter++;
+ if (tx->owner_counter == 4)
+ tx->owner_counter = 1;
+ tx->increment_owner_counter--;
+ if (tx->increment_owner_counter < 0)
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+ if (entry->owner_user_flag)
+ entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
+ entry->desc->more_properties =
+ __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_tx_desc_available(struct tsnep_tx *tx)
+{
+ if (tx->read <= tx->write)
+ return TSNEP_RING_SIZE - tx->write + tx->read - 1;
+ else
+ return tx->read - tx->write - 1;
+}
+
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ unsigned int len;
+ dma_addr_t dma;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
+
+ if (i == 0) {
+ len = skb_headlen(skb);
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+ } else {
+ len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ dma = skb_frag_dma_map(dmadev,
+ &skb_shinfo(skb)->frags[i - 1],
+ 0, len, DMA_TO_DEVICE);
+ }
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+ }
+
+ return 0;
+}
+
+static void tsnep_tx_unmap(struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE];
+
+ if (entry->len) {
+ if (i == 0)
+ dma_unmap_single(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ entry->len = 0;
+ }
+ }
+}
+
+static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ struct tsnep_tx *tx)
+{
+ unsigned long flags;
+ int count = 1;
+ struct tsnep_tx_entry *entry;
+ int i;
+ int retval;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ count += skb_shinfo(skb)->nr_frags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ if (tsnep_tx_desc_available(tx) < count) {
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+ netif_stop_queue(tx->adapter->netdev);
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = &tx->entry[tx->write];
+ entry->skb = skb;
+
+ retval = tsnep_tx_map(skb, tx, count);
+ if (retval != 0) {
+ tsnep_tx_unmap(tx, count);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+
+ tx->dropped++;
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
+
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ i == (count - 1));
+ tx->write = (tx->write + count) % TSNEP_RING_SIZE;
+
+ skb_tx_timestamp(skb);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+ netif_stop_queue(tx->adapter->netdev);
+ }
+
+ tx->packets++;
+ tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+{
+ unsigned long flags;
+ int budget = 128;
+ struct tsnep_tx_entry *entry;
+ int count;
+
+ spin_lock_irqsave(&tx->lock, flags);
+
+ do {
+ if (tx->read == tx->write)
+ break;
+
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) !=
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ count = 1;
+ if (skb_shinfo(entry->skb)->nr_frags > 0)
+ count += skb_shinfo(entry->skb)->nr_frags;
+
+ tsnep_tx_unmap(tx, count);
+
+ if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ (__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(timestamp);
+
+ skb_tstamp_tx(entry->skb, &hwtstamps);
+ }
+
+ napi_consume_skb(entry->skb, budget);
+ entry->skb = NULL;
+
+ tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+
+ budget--;
+ } while (likely(budget));
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+ netif_queue_stopped(tx->adapter->netdev)) {
+ netif_wake_queue(tx->adapter->netdev);
+ }
+
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return (budget != 0);
+}
+
+static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
+ struct tsnep_tx *tx)
+{
+ dma_addr_t dma;
+ int retval;
+
+ memset(tx, 0, sizeof(*tx));
+ tx->adapter = adapter;
+ tx->addr = addr;
+
+ retval = tsnep_tx_ring_init(tx);
+ if (retval)
+ return retval;
+
+ dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
+ tx->owner_counter = 1;
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+
+ spin_lock_init(&tx->lock);
+
+ return 0;
+}
+
+static void tsnep_tx_close(struct tsnep_tx *tx)
+{
+ u32 val;
+
+ readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
+ 1000000);
+
+ tsnep_tx_ring_cleanup(tx);
+}
+
+static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ if (dma_unmap_addr(entry, dma))
+ dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_FROM_DEVICE);
+ if (entry->skb)
+ dev_kfree_skb(entry->skb);
+ }
+
+ memset(rx->entry, 0, sizeof(rx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (rx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
+ rx->page_dma[i]);
+ rx->page[i] = NULL;
+ rx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
+ GFP_ATOMIC | GFP_DMA);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, RX_SKB_RESERVE);
+
+ dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dmadev, dma)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ entry->skb = skb;
+ entry->len = RX_SKB_LENGTH;
+ dma_unmap_addr_set(entry, dma, dma);
+ entry->desc->rx = __cpu_to_le64(dma);
+
+ return 0;
+}
+
+static int tsnep_rx_ring_init(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ struct tsnep_rx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ rx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
+ GFP_KERNEL);
+ if (!rx->page[i]) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_rx_desc_wb *)
+ (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_rx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+
+ retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ if (retval)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ /* RX_SKB_LENGTH is a multiple of 4 */
+ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (index == rx->increment_owner_counter) {
+ rx->owner_counter++;
+ if (rx->owner_counter == 4)
+ rx->owner_counter = 1;
+ rx->increment_owner_counter--;
+ if (rx->increment_owner_counter < 0)
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ int done = 0;
+ struct tsnep_rx_entry *entry;
+ struct sk_buff *skb;
+ size_t len;
+ dma_addr_t dma;
+ int length;
+ bool enable = false;
+ int retval;
+
+ while (likely(done < budget)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ skb = entry->skb;
+ len = dma_unmap_len(entry, len);
+ dma = dma_unmap_addr(entry, dma);
+
+ /* forward skb only if allocation is successful, otherwise
+ * skb is reused and frame dropped
+ */
+ retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ if (!retval) {
+ dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
+
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ skb_put(skb, length - ETH_FCS_LEN);
+ if (rx->adapter->hwtstamp_config.rx_filter ==
+ HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps =
+ skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)skb->data;
+ u64 timestamp =
+ __le64_to_cpu(rx_inline->timestamp);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+ }
+ skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
+ skb->protocol = eth_type_trans(skb,
+ rx->adapter->netdev);
+
+ rx->packets++;
+ rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
+
+ napi_gro_receive(napi, skb);
+ done++;
+ } else {
+ rx->dropped++;
+ }
+
+ tsnep_rx_activate(rx, rx->read);
+
+ enable = true;
+
+ rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
+ }
+
+ if (enable) {
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+ }
+
+ return done;
+}
+
+static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
+ struct tsnep_rx *rx)
+{
+ dma_addr_t dma;
+ int i;
+ int retval;
+
+ memset(rx, 0, sizeof(*rx));
+ rx->adapter = adapter;
+ rx->addr = addr;
+
+ retval = tsnep_rx_ring_init(rx);
+ if (retval)
+ return retval;
+
+ dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
+ rx->owner_counter = 1;
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++)
+ tsnep_rx_activate(rx, i);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+
+ return 0;
+}
+
+static void tsnep_rx_close(struct tsnep_rx *rx)
+{
+ u32 val;
+
+ iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
+ readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
+ 1000000);
+
+ tsnep_rx_ring_cleanup(rx);
+}
+
+static int tsnep_poll(struct napi_struct *napi, int budget)
+{
+ struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
+ napi);
+ bool complete = true;
+ int done = 0;
+
+ if (queue->tx)
+ complete = tsnep_tx_poll(queue->tx, budget);
+
+ if (queue->rx) {
+ done = tsnep_rx_poll(queue->rx, napi, budget);
+ if (done >= budget)
+ complete = false;
+ }
+
+ /* if all work not completed, return budget and keep polling */
+ if (!complete)
+ return budget;
+
+ if (likely(napi_complete_done(napi, done)))
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ return min(done, budget - 1);
+}
+
+static int tsnep_netdev_open(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ void __iomem *addr;
+ int tx_queue_index = 0;
+ int rx_queue_index = 0;
+ int retval;
+
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ adapter->queue[i].adapter = adapter;
+ if (adapter->queue[i].tx) {
+ addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
+ retval = tsnep_tx_open(adapter, addr,
+ adapter->queue[i].tx);
+ if (retval)
+ goto failed;
+ tx_queue_index++;
+ }
+ if (adapter->queue[i].rx) {
+ addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
+ retval = tsnep_rx_open(adapter, addr,
+ adapter->queue[i].rx);
+ if (retval)
+ goto failed;
+ rx_queue_index++;
+ }
+ }
+
+ retval = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (retval)
+ goto failed;
+ retval = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (retval)
+ goto failed;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
+ tsnep_poll, 64);
+ napi_enable(&adapter->queue[i].napi);
+
+ tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
+ }
+
+ return 0;
+
+failed:
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+ tsnep_phy_close(adapter);
+ return retval;
+}
+
+static int tsnep_netdev_close(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
+
+ napi_disable(&adapter->queue[i].napi);
+ netif_napi_del(&adapter->queue[i].napi);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+
+ tsnep_phy_close(adapter);
+
+ return 0;
+}
+
+static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+
+ if (queue_mapping >= adapter->num_tx_queues)
+ queue_mapping = 0;
+
+ return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
+}
+
+static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
+ return tsnep_ptp_ioctl(netdev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void tsnep_netdev_set_multicast(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ u16 rx_filter = 0;
+
+ /* configured MAC address and broadcasts are never filtered */
+ if (netdev->flags & IFF_PROMISC) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
+ } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ }
+ iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
+}
+
+static void tsnep_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ stats->tx_packets += adapter->tx[i].packets;
+ stats->tx_bytes += adapter->tx[i].bytes;
+ stats->tx_dropped += adapter->tx[i].dropped;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ stats->rx_packets += adapter->rx[i].packets;
+ stats->rx_bytes += adapter->rx[i].bytes;
+ stats->rx_dropped += adapter->rx[i].dropped;
+ stats->multicast += adapter->rx[i].multicast;
+
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_fifo_errors += val;
+ val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_frame_errors += val;
+ }
+
+ reg = ioread32(adapter->addr + ECM_STAT);
+ val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+ val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_crc_errors += val;
+ val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+}
+
+static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
+{
+ iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ iowrite16(*(u16 *)(addr + sizeof(u32)),
+ adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
+ addr);
+}
+
+static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (retval)
+ return retval;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ tsnep_mac_set_address(adapter, sock_addr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops tsnep_netdev_ops = {
+ .ndo_open = tsnep_netdev_open,
+ .ndo_stop = tsnep_netdev_close,
+ .ndo_start_xmit = tsnep_netdev_xmit_frame,
+ .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_set_rx_mode = tsnep_netdev_set_multicast,
+
+ .ndo_get_stats64 = tsnep_netdev_get_stats64,
+ .ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_setup_tc = tsnep_tc_setup,
+};
+
+static int tsnep_mac_init(struct tsnep_adapter *adapter)
+{
+ int retval;
+
+ /* initialize RX filtering, at least configured MAC address and
+ * broadcast are not filtered
+ */
+ iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
+
+ /* try to get MAC address in the following order:
+ * - device tree
+ * - valid MAC address already set
+ * - MAC address register if valid
+ * - random MAC address
+ */
+ retval = of_get_mac_address(adapter->pdev->dev.of_node,
+ adapter->mac_address);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ if (retval && !is_valid_ether_addr(adapter->mac_address)) {
+ *(u32 *)adapter->mac_address =
+ ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ *(u16 *)(adapter->mac_address + sizeof(u32)) =
+ ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+ if (!is_valid_ether_addr(adapter->mac_address))
+ eth_random_addr(adapter->mac_address);
+ }
+
+ tsnep_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(adapter->netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int tsnep_mdio_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *np = adapter->pdev->dev.of_node;
+ int retval;
+
+ if (np) {
+ np = of_get_child_by_name(np, "mdio");
+ if (!np)
+ return 0;
+
+ adapter->suppress_preamble =
+ of_property_read_bool(np, "suppress-preamble");
+ }
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!adapter->mdiobus) {
+ retval = -ENOMEM;
+
+ goto out;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ adapter->mdiobus->parent = &adapter->pdev->dev;
+ adapter->mdiobus->read = tsnep_mdiobus_read;
+ adapter->mdiobus->write = tsnep_mdiobus_write;
+ adapter->mdiobus->name = TSNEP "-mdiobus";
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ adapter->pdev->name);
+
+ /* do not scan broadcast address */
+ adapter->mdiobus->phy_mask = 0x0000001;
+
+ retval = of_mdiobus_register(adapter->mdiobus, np);
+
+out:
+ if (np)
+ of_node_put(np);
+
+ return retval;
+}
+
+static int tsnep_phy_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *phy_node;
+ int retval;
+
+ retval = of_get_phy_mode(adapter->pdev->dev.of_node,
+ &adapter->phy_mode);
+ if (retval)
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
+ 0);
+ adapter->phydev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!adapter->phydev && adapter->mdiobus)
+ adapter->phydev = phy_find_first(adapter->mdiobus);
+ if (!adapter->phydev)
+ return -EIO;
+
+ return 0;
+}
+
+static int tsnep_probe(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *io;
+ u32 type;
+ int revision;
+ int version;
+ int retval;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct tsnep_adapter),
+ TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
+ if (!netdev)
+ return -ENODEV;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ platform_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->dmadev = &pdev->dev;
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
+
+ mutex_init(&adapter->gate_control_lock);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(adapter->addr))
+ return PTR_ERR(adapter->addr);
+ adapter->irq = platform_get_irq(pdev, 0);
+ netdev->mem_start = io->start;
+ netdev->mem_end = io->end;
+ netdev->irq = adapter->irq;
+
+ type = ioread32(adapter->addr + ECM_TYPE);
+ revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
+ version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ adapter->gate_control = type & ECM_GATE_CONTROL;
+
+ adapter->num_tx_queues = TSNEP_QUEUES;
+ adapter->num_rx_queues = TSNEP_QUEUES;
+ adapter->num_queues = TSNEP_QUEUES;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+ retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
+ 0, TSNEP, adapter);
+ if (retval != 0) {
+ dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
+ adapter->irq);
+ return retval;
+ }
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+
+ retval = tsnep_mac_init(adapter);
+ if (retval)
+ goto mac_init_failed;
+
+ retval = tsnep_mdio_init(adapter);
+ if (retval)
+ goto mdio_init_failed;
+
+ retval = tsnep_phy_init(adapter);
+ if (retval)
+ goto phy_init_failed;
+
+ retval = tsnep_ptp_init(adapter);
+ if (retval)
+ goto ptp_init_failed;
+
+ retval = tsnep_tc_init(adapter);
+ if (retval)
+ goto tc_init_failed;
+
+ netdev->netdev_ops = &tsnep_netdev_ops;
+ netdev->ethtool_ops = &tsnep_ethtool_ops;
+ netdev->features = NETIF_F_SG;
+ netdev->hw_features = netdev->features;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ retval = register_netdev(netdev);
+ if (retval)
+ goto register_failed;
+
+ dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
+ revision);
+ if (adapter->gate_control)
+ dev_info(&adapter->pdev->dev, "gate control detected\n");
+
+ return 0;
+
+register_failed:
+ tsnep_tc_cleanup(adapter);
+tc_init_failed:
+ tsnep_ptp_cleanup(adapter);
+ptp_init_failed:
+phy_init_failed:
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+mdio_init_failed:
+mac_init_failed:
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+ return retval;
+}
+
+static int tsnep_remove(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
+
+ unregister_netdev(adapter->netdev);
+
+ tsnep_tc_cleanup(adapter);
+
+ tsnep_ptp_cleanup(adapter);
+
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ return 0;
+}
+
+static const struct of_device_id tsnep_of_match[] = {
+ { .compatible = "engleder,tsnep", },
+{ },
+};
+MODULE_DEVICE_TABLE(of, tsnep_of_match);
+
+static struct platform_driver tsnep_driver = {
+ .driver = {
+ .name = TSNEP,
+ .of_match_table = of_match_ptr(tsnep_of_match),
+ },
+ .probe = tsnep_probe,
+ .remove = tsnep_remove,
+};
+module_platform_driver(tsnep_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
+MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
new file mode 100644
index 000000000000..eaad453d487e
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
+{
+ u32 high_before;
+ u32 low;
+ u32 high;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ *time = (((u64)high) << 32) | ((u64)low);
+}
+
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!ifr)
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&adapter->hwtstamp_config, &config,
+ sizeof(adapter->hwtstamp_config));
+ }
+
+ if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ bool negative = false;
+ u64 rate_offset;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ negative = true;
+ }
+
+ /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
+ * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
+ * simplify calculation because 15625 * 8 = 1000000 / 8
+ */
+ rate_offset = scaled_ppm;
+ rate_offset <<= 16 - 3;
+ rate_offset = div_u64(rate_offset, 15625);
+
+ rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
+ if (negative)
+ rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
+ iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
+
+ return 0;
+}
+
+static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ tsnep_get_system_time(adapter, &system_time);
+
+ system_time += delta;
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 system_time;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ system_time = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(system_time);
+
+ return 0;
+}
+
+static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter)
+{
+ int retval = 0;
+
+ adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
+ adapter->ptp_clock_info.owner = THIS_MODULE;
+ /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
+ * stay slightly below because only bits below 2^-1ns are supported
+ */
+ adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
+ adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
+ adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
+ adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
+ adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ netdev_err(adapter->netdev, "ptp_clock_register failed\n");
+
+ retval = PTR_ERR(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ } else if (adapter->ptp_clock) {
+ netdev_info(adapter->netdev, "PHC added\n");
+ }
+
+ return retval;
+}
+
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
+{
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ netdev_info(adapter->netdev, "PHC removed\n");
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c
new file mode 100644
index 000000000000..1581d6b22232
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_selftests.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+enum tsnep_test {
+ TSNEP_TEST_ENABLE = 0,
+ TSNEP_TEST_TAPRIO,
+ TSNEP_TEST_TAPRIO_CHANGE,
+ TSNEP_TEST_TAPRIO_EXTENSION,
+};
+
+static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
+ "Enable timeout (offline)",
+ "TAPRIO (offline)",
+ "TAPRIO change (offline)",
+ "TAPRIO extension (offline)",
+};
+
+#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
+
+static bool enable_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
+ return false;
+
+ return true;
+}
+
+static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
+{
+ if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
+ return true;
+
+ return false;
+}
+
+static bool ack_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
+ return false;
+ return true;
+}
+
+static bool enable_gc(struct tsnep_adapter *adapter, bool a)
+{
+ u8 enable;
+ u8 active;
+
+ if (a) {
+ enable = TSNEP_GC_ENABLE_A;
+ active = TSNEP_GC_ACTIVE_A;
+ } else {
+ enable = TSNEP_GC_ENABLE_B;
+ active = TSNEP_GC_ACTIVE_B;
+ }
+
+ iowrite8(enable, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & active))
+ return false;
+
+ return true;
+}
+
+static bool disable_gc(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
+ return false;
+
+ return true;
+}
+
+static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
+{
+ u64 before, after;
+ u32 time;
+ bool enabled;
+
+ if (!disable_gc(adapter))
+ return false;
+
+ before = ktime_get_ns();
+
+ if (!enable_gc_timeout(adapter))
+ return false;
+
+ /* for start time after timeout, the timeout can guarantee, that enable
+ * is blocked if too late
+ */
+ time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ time += TSNEP_GC_TIMEOUT;
+ iowrite32(time, adapter->addr + TSNEP_GC_TIME);
+
+ ndelay(delay);
+
+ enabled = enable_gc(adapter, a);
+ after = ktime_get_ns();
+
+ if (delay > TSNEP_GC_TIMEOUT) {
+ /* timeout must have blocked enable */
+ if (enabled)
+ return false;
+ } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
+ /* timeout must not have blocked enable */
+ if (!enabled)
+ return false;
+ }
+
+ if (enabled) {
+ if (gc_timeout_signaled(adapter))
+ return false;
+ } else {
+ if (!gc_timeout_signaled(adapter))
+ return false;
+ if (!ack_gc_timeout(adapter))
+ return false;
+ }
+
+ if (!disable_gc(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, true, i))
+ return false;
+ }
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, false, i))
+ return false;
+ }
+
+ return true;
+}
+
+static void delay_base_time(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ u64 system_time;
+ u64 base_time = ktime_to_ns(qopt->base_time);
+ u64 n;
+
+ tsnep_get_system_time(adapter, &system_time);
+ system_time += ms * 1000000;
+ n = div64_u64(system_time - base_time, qopt->cycle_time);
+
+ qopt->base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+}
+
+static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
+ u64 *system_time)
+{
+ u32 time_high_before;
+ u32 time_low;
+ u32 time_high;
+ u32 gc_time_before;
+
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ do {
+ time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ *gc = ioread32(adapter->addr + TSNEP_GC);
+
+ gc_time_before = *gc_time;
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ time_high_before = time_high;
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while ((time_high != time_high_before) ||
+ (*gc_time != gc_time_before));
+
+ *system_time = (((u64)time_high) << 32) | ((u64)time_low);
+}
+
+static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
+{
+ u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
+ u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
+ int i;
+
+ *next = cycle_start;
+ for (i = 0; i < gcl->count; i++) {
+ *next += gcl->operation[i].interval;
+ if (*next > system_time)
+ break;
+ }
+
+ return i;
+}
+
+static bool check_gate(struct tsnep_adapter *adapter)
+{
+ u32 gc_time;
+ u32 gc;
+ u64 system_time;
+ struct tsnep_gcl *curr;
+ struct tsnep_gcl *prev;
+ u64 next_time;
+ u8 gate_open;
+ u8 next_gate_open;
+
+ get_gate_state(adapter, &gc, &gc_time, &system_time);
+
+ if (gc & TSNEP_GC_ACTIVE_A) {
+ curr = &adapter->gcl[0];
+ prev = &adapter->gcl[1];
+ } else if (gc & TSNEP_GC_ACTIVE_B) {
+ curr = &adapter->gcl[1];
+ prev = &adapter->gcl[0];
+ } else {
+ return false;
+ }
+ if (curr->start_time <= system_time) {
+ /* GCL is already active */
+ int index;
+
+ index = get_operation(curr, system_time, &next_time);
+ gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
+ if (index == curr->count - 1)
+ index = 0;
+ else
+ index++;
+ next_gate_open =
+ curr->operation[index].properties & TSNEP_GCL_MASK;
+ } else if (curr->change) {
+ /* operation of previous GCL is active */
+ int index;
+ u64 start_before;
+ u64 n;
+
+ index = get_operation(prev, system_time, &next_time);
+ next_time = curr->start_time;
+ start_before = prev->base_time;
+ n = div64_u64(curr->start_time - start_before,
+ prev->cycle_time);
+ start_before += n * prev->cycle_time;
+ if (curr->start_time == start_before)
+ start_before -= prev->cycle_time;
+ if (((start_before + prev->cycle_time_extension) >=
+ curr->start_time) &&
+ (curr->start_time - prev->cycle_time_extension <=
+ system_time)) {
+ /* extend */
+ index = prev->count - 1;
+ }
+ gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
+ next_gate_open =
+ curr->operation[0].properties & TSNEP_GCL_MASK;
+ } else {
+ /* GCL is waiting for start */
+ next_time = curr->start_time;
+ gate_open = 0xFF;
+ next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
+ }
+
+ if (gc_time != (next_time & 0xFFFFFFFF)) {
+ dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
+ gc_time, next_time);
+ return false;
+ }
+ if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
+ gate_open);
+ return false;
+ }
+ if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
+ next_gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control next open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
+ next_gate_open);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
+{
+ ktime_t start = ktime_get();
+
+ do {
+ if (!check_gate(adapter))
+ return false;
+ } while (ktime_ms_delta(ktime_get(), start) < ms);
+
+ return true;
+}
+
+static bool enable_check_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ int retval;
+
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
+ if (retval)
+ return false;
+
+ if (!check_gate_duration(adapter, ms))
+ return false;
+
+ return true;
+}
+
+static bool disable_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload qopt;
+ int retval;
+
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.enable = 0;
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
+ if (retval)
+ return false;
+
+ return true;
+}
+
+static bool run_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ if (!enable_check_taprio(adapter, qopt, ms))
+ return false;
+
+ if (!disable_taprio(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x02;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x03;
+ qopt->entries[1].interval = 800000;
+ qopt->entries[2].gate_mask = 0x07;
+ qopt->entries[2].interval = 240000;
+ qopt->entries[3].gate_mask = 0x01;
+ qopt->entries[3].interval = 80000;
+ qopt->entries[4].gate_mask = 0x04;
+ qopt->entries[4].interval = 70000;
+ qopt->entries[5].gate_mask = 0x06;
+ qopt->entries[5].interval = 60000;
+ qopt->entries[6].gate_mask = 0x0F;
+ qopt->entries[6].interval = 50000;
+ qopt->num_entries = 7;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 411854;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x17;
+ qopt->entries[0].interval = 23842;
+ qopt->entries[1].gate_mask = 0x16;
+ qopt->entries[1].interval = 13482;
+ qopt->entries[2].gate_mask = 0x15;
+ qopt->entries[2].interval = 49428;
+ qopt->entries[3].gate_mask = 0x14;
+ qopt->entries[3].interval = 38189;
+ qopt->entries[4].gate_mask = 0x13;
+ qopt->entries[4].interval = 92321;
+ qopt->entries[5].gate_mask = 0x12;
+ qopt->entries[5].interval = 71239;
+ qopt->entries[6].gate_mask = 0x11;
+ qopt->entries[6].interval = 69932;
+ qopt->entries[7].gate_mask = 0x10;
+ qopt->entries[7].interval = 53421;
+ qopt->num_entries = 8;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ delay_base_time(adapter, qopt, 12);
+ qopt->cycle_time = 125000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x27;
+ qopt->entries[0].interval = 15000;
+ qopt->entries[1].gate_mask = 0x26;
+ qopt->entries[1].interval = 15000;
+ qopt->entries[2].gate_mask = 0x25;
+ qopt->entries[2].interval = 12500;
+ qopt->entries[3].gate_mask = 0x24;
+ qopt->entries[3].interval = 17500;
+ qopt->entries[4].gate_mask = 0x23;
+ qopt->entries[4].interval = 10000;
+ qopt->entries[5].gate_mask = 0x22;
+ qopt->entries[5].interval = 11000;
+ qopt->entries[6].gate_mask = 0x21;
+ qopt->entries[6].interval = 9000;
+ qopt->entries[7].gate_mask = 0x20;
+ qopt->entries[7].interval = 10000;
+ qopt->entries[8].gate_mask = 0x20;
+ qopt->entries[8].interval = 12500;
+ qopt->entries[9].gate_mask = 0x20;
+ qopt->entries[9].interval = 12500;
+ qopt->num_entries = 10;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x30;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x31;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to identical */
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ delay_base_time(adapter, qopt, 17);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to same cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x42;
+ qopt->entries[1].gate_mask = 0x43;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x54;
+ qopt->entries[0].interval = 33333;
+ qopt->entries[1].gate_mask = 0x55;
+ qopt->entries[1].interval = 66667;
+ delay_base_time(adapter, qopt, 23);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x66;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x67;
+ qopt->entries[1].interval = 25000;
+ qopt->entries[2].gate_mask = 0x68;
+ qopt->entries[2].interval = 25000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to multiple of cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 200000;
+ qopt->entries[0].gate_mask = 0x79;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x7A;
+ qopt->entries[1].interval = 150000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->entries[0].gate_mask = 0x7B;
+ qopt->entries[0].interval = 125000;
+ qopt->entries[1].gate_mask = 0x7C;
+ qopt->entries[1].interval = 250000;
+ qopt->entries[2].gate_mask = 0x7D;
+ qopt->entries[2].interval = 375000;
+ qopt->entries[3].gate_mask = 0x7E;
+ qopt->entries[3].interval = 250000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 333333;
+ qopt->entries[0].gate_mask = 0x8F;
+ qopt->entries[0].interval = 166666;
+ qopt->entries[1].gate_mask = 0x80;
+ qopt->entries[1].interval = 166667;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 62500;
+ qopt->entries[0].gate_mask = 0x81;
+ qopt->entries[0].interval = 31250;
+ qopt->entries[1].gate_mask = 0x82;
+ qopt->entries[1].interval = 15625;
+ qopt->entries[2].gate_mask = 0x83;
+ qopt->entries[2].interval = 15625;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->entries[0].gate_mask = 0x84;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0x85;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0x86;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0x87;
+ qopt->entries[3].interval = 100000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1700000;
+ qopt->entries[0].gate_mask = 0x88;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x89;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0x8A;
+ qopt->entries[2].interval = 600000;
+ qopt->entries[3].gate_mask = 0x8B;
+ qopt->entries[3].interval = 100000;
+ qopt->entries[4].gate_mask = 0x8C;
+ qopt->entries[4].interval = 500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 6);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 50000;
+ qopt->entries[0].gate_mask = 0x90;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x91;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase */
+ qopt->base_time = ktime_set(0, 50000);
+ qopt->entries[0].gate_mask = 0x92;
+ qopt->entries[0].interval = 33000;
+ qopt->entries[1].gate_mask = 0x93;
+ qopt->entries[1].interval = 67000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x94;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x95;
+ qopt->entries[1].interval = 600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 700000);
+ qopt->cycle_time = 2000000;
+ qopt->cycle_time_extension = 1900000;
+ qopt->entries[0].gate_mask = 0x96;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x97;
+ qopt->entries[1].interval = 1600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x98;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x99;
+ qopt->entries[1].interval = 600000;
+ qopt->entries[2].gate_mask = 0x9A;
+ qopt->entries[2].interval = 500000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 100000);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 300000;
+ qopt->entries[0].gate_mask = 0x9B;
+ qopt->entries[0].interval = 150000;
+ qopt->entries[1].gate_mask = 0x9C;
+ qopt->entries[1].interval = 350000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0xAD;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0xAE;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0xAF;
+ qopt->entries[2].interval = 300000;
+ qopt->num_entries = 3;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->cycle_time_extension = 100000;
+ qopt->entries[0].gate_mask = 0xA0;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0xA1;
+ qopt->entries[1].interval = 200000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 499999;
+ qopt->entries[0].gate_mask = 0xB2;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0xB3;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0xB4;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0xB5;
+ qopt->entries[3].interval = 200000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 6000000;
+ qopt->cycle_time_extension = 5999999;
+ qopt->entries[0].gate_mask = 0xC6;
+ qopt->entries[0].interval = 1000000;
+ qopt->entries[1].gate_mask = 0xC7;
+ qopt->entries[1].interval = 1000000;
+ qopt->entries[2].gate_mask = 0xC8;
+ qopt->entries[2].interval = 1000000;
+ qopt->entries[3].gate_mask = 0xC9;
+ qopt->entries[3].interval = 1500000;
+ qopt->entries[4].gate_mask = 0xCA;
+ qopt->entries[4].interval = 1500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+int tsnep_ethtool_get_test_count(void)
+{
+ return TSNEP_TEST_COUNT;
+}
+
+void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
+}
+
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ eth_test->len = TSNEP_TEST_COUNT;
+
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
+ /* no tests are done online */
+ data[TSNEP_TEST_ENABLE] = 0;
+ data[TSNEP_TEST_TAPRIO] = 0;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+
+ return;
+ }
+
+ if (tsnep_test_gc_enable(adapter)) {
+ data[TSNEP_TEST_ENABLE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_ENABLE] = 1;
+ }
+
+ if (tsnep_test_taprio(adapter)) {
+ data[TSNEP_TEST_TAPRIO] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO] = 1;
+ }
+
+ if (tsnep_test_taprio_change(adapter)) {
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
+ }
+
+ if (tsnep_test_taprio_extension(adapter)) {
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
new file mode 100644
index 000000000000..c4c6e1357317
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+/* save one operation at the end for additional operation at list change */
+#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
+
+static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u64 cycle_time;
+
+ if (!qopt->cycle_time)
+ return -ERANGE;
+ if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
+ return -EINVAL;
+ cycle_time = 0;
+ for (i = 0; i < qopt->num_entries; i++) {
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+ if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
+ return -EINVAL;
+ if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
+ return -EINVAL;
+ cycle_time += qopt->entries[i].interval;
+ }
+ if (qopt->cycle_time != cycle_time)
+ return -EINVAL;
+ if (qopt->cycle_time_extension >= qopt->cycle_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
+ u32 properties, u32 interval, bool flush)
+{
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties = properties;
+ gcl->operation[index].interval = interval;
+
+ iowrite32(properties, addr);
+ iowrite32(interval, addr + sizeof(u32));
+
+ if (flush) {
+ /* flush write with read access */
+ ioread32(addr);
+ }
+}
+
+static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
+{
+ u64 duration;
+ int count;
+
+ /* change needs to be triggered one or two operations before start of
+ * new gate control list
+ * - change is triggered at start of operation (minimum one operation)
+ * - operation with adjusted interval is inserted on demand to exactly
+ * meet the start of the new gate control list (optional)
+ *
+ * additionally properties are read directly after start of previous
+ * operation
+ *
+ * therefore, three operations needs to be considered for the limit
+ */
+ duration = 0;
+ count = 3;
+ while (count) {
+ duration += gcl->operation[index].interval;
+
+ index--;
+ if (index < 0)
+ index = gcl->count - 1;
+
+ count--;
+ }
+
+ return duration;
+}
+
+static void tsnep_write_gcl(struct tsnep_gcl *gcl,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u32 properties;
+ u64 extend;
+ u64 cut;
+
+ gcl->base_time = ktime_to_ns(qopt->base_time);
+ gcl->cycle_time = qopt->cycle_time;
+ gcl->cycle_time_extension = qopt->cycle_time_extension;
+
+ for (i = 0; i < qopt->num_entries; i++) {
+ properties = qopt->entries[i].gate_mask;
+ if (i == (qopt->num_entries - 1))
+ properties |= TSNEP_GCL_LAST;
+
+ tsnep_write_gcl_operation(gcl, i, properties,
+ qopt->entries[i].interval, true);
+ }
+ gcl->count = qopt->num_entries;
+
+ /* calculate change limit; i.e., the time needed between enable and
+ * start of new gate control list
+ */
+
+ /* case 1: extend cycle time for change
+ * - change duration of last operation
+ * - cycle time extension
+ */
+ extend = tsnep_change_duration(gcl, gcl->count - 1);
+ extend += gcl->cycle_time_extension;
+
+ /* case 2: cut cycle time for change
+ * - maximum change duration
+ */
+ cut = 0;
+ for (i = 0; i < gcl->count; i++)
+ cut = max(cut, tsnep_change_duration(gcl, i));
+
+ /* use maximum, because the actual case (extend or cut) can be
+ * determined only after limit is known (chicken-and-egg problem)
+ */
+ gcl->change_limit = max(extend, cut);
+}
+
+static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ if (start <= limit) {
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += (n + 1) * gcl->cycle_time;
+ }
+
+ return start;
+}
+
+static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += n * gcl->cycle_time;
+ if (start == limit)
+ start -= gcl->cycle_time;
+
+ return start;
+}
+
+static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
+ bool insert)
+{
+ /* previous operation triggers change and properties are evaluated at
+ * start of operation
+ */
+ if (index == 0)
+ index = gcl->count - 1;
+ else
+ index = index - 1;
+ change -= gcl->operation[index].interval;
+
+ /* optionally change to new list with additional operation in between */
+ if (insert) {
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties |= TSNEP_GCL_INSERT;
+ iowrite32(gcl->operation[index].properties, addr);
+ }
+
+ return change;
+}
+
+static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
+{
+ int i;
+ u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
+ void __iomem *addr;
+
+ /* search for insert operation and reset properties */
+ for (i = 0; i < gcl->count; i++) {
+ if (gcl->operation[i].properties & ~mask) {
+ addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * i;
+
+ gcl->operation[i].properties &= mask;
+ iowrite32(gcl->operation[i].properties, addr);
+
+ break;
+ }
+ }
+}
+
+static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
+ u64 change, u32 interval)
+{
+ u32 properties;
+
+ properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
+ /* change to new list directly after inserted operation */
+ properties |= TSNEP_GCL_CHANGE;
+
+ /* last operation of list is reserved to insert operation */
+ tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
+ interval, false);
+
+ return tsnep_set_gcl_change(gcl, ref, change, true);
+}
+
+static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
+{
+ int ref = gcl->count - 1;
+ u32 interval = gcl->operation[ref].interval + extension;
+
+ start -= gcl->operation[ref].interval;
+
+ return tsnep_insert_gcl_operation(gcl, ref, start, interval);
+}
+
+static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
+{
+ u64 sum = 0;
+ int i;
+
+ /* find operation which shall be cutted */
+ for (i = 0; i < gcl->count; i++) {
+ u64 sum_tmp = sum + gcl->operation[i].interval;
+ u64 interval;
+
+ /* sum up operations as long as cycle time is not exceeded */
+ if (sum_tmp > cycle_time)
+ break;
+
+ /* remaining interval must be big enough for hardware */
+ interval = cycle_time - sum_tmp;
+ if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
+ break;
+
+ sum = sum_tmp;
+ }
+ if (sum == cycle_time) {
+ /* no need to cut operation itself or whole cycle
+ * => change exactly at operation
+ */
+ return tsnep_set_gcl_change(gcl, i, start + sum, false);
+ }
+ return tsnep_insert_gcl_operation(gcl, i, start + sum,
+ cycle_time - sum);
+}
+
+static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
+ struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
+{
+ u64 system_time;
+ u64 timeout;
+ u64 limit;
+
+ /* estimate timeout limit after timeout enable, actually timeout limit
+ * in hardware will be earlier than estimate so we are on the safe side
+ */
+ tsnep_get_system_time(adapter, &system_time);
+ timeout = system_time + TSNEP_GC_TIMEOUT;
+
+ if (curr)
+ limit = timeout + curr->change_limit;
+ else
+ limit = timeout;
+
+ gcl->start_time = tsnep_gcl_start_after(gcl, limit);
+
+ /* gate control time register is only 32bit => time shall be in the near
+ * future (no driver support for far future implemented)
+ */
+ if ((gcl->start_time - system_time) >= U32_MAX)
+ return -EAGAIN;
+
+ if (curr) {
+ /* change gate control list */
+ u64 last;
+ u64 change;
+
+ last = tsnep_gcl_start_before(curr, gcl->start_time);
+ if ((last + curr->cycle_time) == gcl->start_time)
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+ else if (((gcl->start_time - last) <=
+ curr->cycle_time_extension) ||
+ ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
+ change = tsnep_extend_gcl(curr, last,
+ gcl->start_time - last);
+ else
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+
+ WARN_ON(change <= timeout);
+ gcl->change = true;
+ iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
+ } else {
+ /* start gate control list */
+ WARN_ON(gcl->start_time <= timeout);
+ gcl->change = false;
+ iowrite32(gcl->start_time & 0xFFFFFFFF,
+ adapter->addr + TSNEP_GC_TIME);
+ }
+
+ return 0;
+}
+
+static int tsnep_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tsnep_gcl *gcl;
+ struct tsnep_gcl *curr;
+ int retval;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ if (!qopt->enable) {
+ /* disable gate control if active */
+ mutex_lock(&adapter->gate_control_lock);
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+ }
+
+ retval = tsnep_validate_gcl(qopt);
+ if (retval)
+ return retval;
+
+ mutex_lock(&adapter->gate_control_lock);
+
+ gcl = &adapter->gcl[adapter->next_gcl];
+ tsnep_write_gcl(gcl, qopt);
+
+ /* select current gate control list if active */
+ if (adapter->gate_control_active) {
+ if (adapter->next_gcl == 0)
+ curr = &adapter->gcl[1];
+ else
+ curr = &adapter->gcl[0];
+ } else {
+ curr = NULL;
+ }
+
+ for (;;) {
+ /* start timeout which discards late enable, this helps ensuring
+ * that start/change time are in the future at enable
+ */
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+
+ retval = tsnep_enable_gcl(adapter, gcl, curr);
+ if (retval) {
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return retval;
+ }
+
+ /* enable gate control list */
+ if (adapter->next_gcl == 0)
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+ else
+ iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
+
+ /* done if timeout did not happen */
+ if (!(ioread32(adapter->addr + TSNEP_GC) &
+ TSNEP_GC_TIMEOUT_SIGNAL))
+ break;
+
+ /* timeout is acknowledged with any enable */
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+
+ if (curr)
+ tsnep_clean_gcl(curr);
+
+ /* retry because of timeout */
+ }
+
+ adapter->gate_control_active = true;
+
+ if (adapter->next_gcl == 0)
+ adapter->next_gcl = 1;
+ else
+ adapter->next_gcl = 0;
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+}
+
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return tsnep_taprio(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_init(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return 0;
+
+ /* open all gates */
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
+
+ adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
+ adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
+
+ return 0;
+}
+
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return;
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index b1c8ffea6ad2..437c5acfe222 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -945,7 +945,9 @@ static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
static void ethoc_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -961,7 +963,9 @@ static void ethoc_get_ringparam(struct net_device *dev,
}
static int ethoc_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -1074,14 +1078,11 @@ static int ethoc_probe(struct platform_device *pdev)
/* obtain device IRQ number */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain IRQ\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto free;
- }
- netdev->irq = res->start;
+ netdev->irq = ret;
/* setup driver-private data */
priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 97c5d70de76e..691605c15265 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1178,8 +1178,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static void ftgmac100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1190,8 +1193,11 @@ static void ftgmac100_get_ringparam(struct net_device *netdev,
ering->tx_pending = priv->tx_q_entries;
}
-static int ftgmac100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6b2927d863e2..c78883c3a2c8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2325,7 +2325,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
txq = netdev_get_tx_queue(net_dev, queue_mapping);
/* LLTX requires to do our own update of trans_start */
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
@@ -2531,7 +2531,7 @@ static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
/* Bump the trans_start */
txq = netdev_get_tx_queue(net_dev, smp_processor_id());
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
if (err) {
@@ -2623,7 +2623,7 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
}
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 8e643567abce..e985ae008a97 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -374,7 +374,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
@@ -4246,7 +4246,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
}
irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
NULL, dpni_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
@@ -4273,7 +4273,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
return 0;
free_irq:
- devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
free_mc_irq:
fsl_mc_free_irqs(ls_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 2085844227fe..e54e70ebdd05 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats {
__u64 bytes_per_cdan;
};
+#define DPAA2_ETH_CH_STATS 7
+
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index adb8ce5306ee..3fdbf87dccb1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ef8f0a055024..623d113b6581 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -2,6 +2,7 @@
/* Copyright 2019 NXP */
#include <linux/acpi.h>
+#include <linux/pcs-lynx.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -40,7 +41,7 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
- struct fwnode_handle *fwnode, *parent, *child = NULL;
+ struct fwnode_handle *fwnode, *parent = NULL, *child = NULL;
struct device_node *dpmacs = NULL;
int err;
u32 id;
@@ -53,8 +54,17 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
parent = of_fwnode_handle(dpmacs);
} else if (is_acpi_node(fwnode)) {
parent = fwnode;
+ } else {
+ /* The root dprc device didn't yet get to finalize it's probe,
+ * thus the fwnode field is not yet set. Defer probe if we are
+ * facing this situation.
+ */
+ return ERR_PTR(-EPROBE_DEFER);
}
+ if (!parent)
+ return NULL;
+
fwnode_for_each_child_node(parent, child) {
err = -EINVAL;
if (is_acpi_device_node(child))
@@ -90,88 +100,6 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
-static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
- phy_interface_t interface)
-{
- switch (interface) {
- /* We can switch between SGMII and 1000BASE-X at runtime with
- * pcs-lynx
- */
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (mac->pcs &&
- (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
- mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
- return false;
- return interface != mac->if_mode;
-
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (interface != mac->if_mode);
- default:
- return true;
- }
-}
-
-static void dpaa2_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
- goto empty_set;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- phylink_set_10g_modes(mask);
- if (state->interface == PHY_INTERFACE_MODE_10GBASER)
- break;
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseT_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- break;
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-empty_set:
- linkmode_zero(supported);
-}
-
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -243,7 +171,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
- .validate = dpaa2_mac_validate,
+ .validate = phylink_generic_validate,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -286,11 +214,13 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
{
- struct lynx_pcs *pcs = mac->pcs;
+ struct phylink_pcs *phylink_pcs = mac->pcs;
- if (pcs) {
- struct device *dev = &pcs->mdio->dev;
- lynx_pcs_destroy(pcs);
+ if (phylink_pcs) {
+ struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs);
+ struct device *dev = &mdio->dev;
+
+ lynx_pcs_destroy(phylink_pcs);
put_device(dev);
mac->pcs = NULL;
}
@@ -336,9 +266,34 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return err;
}
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the PLLs to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
@@ -349,7 +304,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
mac->phylink = phylink;
if (mac->pcs)
- phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
+ phylink_set_pcs(mac->phylink, mac->pcs);
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
@@ -381,6 +336,7 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *fw_node;
int err;
err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
@@ -400,7 +356,13 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
- mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ if (IS_ERR(fw_node)) {
+ err = PTR_ERR(fw_node);
+ goto err_close_dpmac;
+ }
+
+ mac->fw_node = fw_node;
net_dev->dev.of_node = to_of_node(mac->fw_node);
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 7842cbb2207a..1331a8477fe4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,7 +7,6 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
-#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -23,7 +22,7 @@ struct dpaa2_mac {
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa87bb8..5f5f8c53c4a0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -129,7 +129,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct fsl_mc_device_irq *irq;
struct ptp_qoriq *ptp_qoriq;
struct device_node *node;
void __iomem *base;
@@ -177,8 +176,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_unmap;
}
- irq = mc_dev->irqs[0];
- ptp_qoriq->irq = irq->msi_desc->irq;
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index d039457928b0..9a561072aa4a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -394,7 +394,8 @@ static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
ppriv_local = ethsw->ports[i];
- ppriv_local->vlans[vid] = 0;
+ if (ppriv_local)
+ ppriv_local->vlans[vid] = 0;
}
return 0;
@@ -1553,8 +1554,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
@@ -1580,7 +1580,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
return 0;
free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
+ devm_free_irq(dev, irq->virq, dev);
free_irq:
fsl_mc_free_irqs(sw_dev);
return err;
@@ -1896,9 +1896,11 @@ static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid
/* Delete VLAN from switch if it is no longer configured on
* any port
*/
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ if (ethsw->ports[i] &&
+ ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
return 0; /* Found a port member in VID */
+ }
ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 504e12554079..d6930a797c6c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1547,7 +1547,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
switch (xdp_act) {
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
@@ -2897,12 +2897,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, name);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 910b9f722504..fa5b4f885b17 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -562,7 +562,9 @@ static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
}
static void enetc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 0e87c7043b77..ed16a5ac9ad0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -8,6 +8,7 @@
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
#include "enetc_pf.h"
@@ -828,8 +829,8 @@ static int enetc_imdio_create(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct lynx_pcs *pcs_lynx;
- struct mdio_device *pcs;
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
struct mii_bus *bus;
int err;
@@ -853,23 +854,23 @@ static int enetc_imdio_create(struct enetc_pf *pf)
goto free_mdio_bus;
}
- pcs = mdio_device_create(bus, 0);
- if (IS_ERR(pcs)) {
- err = PTR_ERR(pcs);
- dev_err(dev, "cannot create pcs (%d)\n", err);
+ mdio_device = mdio_device_create(bus, 0);
+ if (IS_ERR(mdio_device)) {
+ err = PTR_ERR(mdio_device);
+ dev_err(dev, "cannot create mdio device (%d)\n", err);
goto unregister_mdiobus;
}
- pcs_lynx = lynx_pcs_create(pcs);
- if (!pcs_lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
err = -ENOMEM;
dev_err(dev, "cannot create lynx pcs (%d)\n", err);
goto unregister_mdiobus;
}
pf->imdio = bus;
- pf->pcs = pcs_lynx;
+ pf->pcs = phylink_pcs;
return 0;
@@ -882,8 +883,11 @@ free_mdio_bus:
static void enetc_imdio_remove(struct enetc_pf *pf)
{
+ struct mdio_device *mdio_device;
+
if (pf->pcs) {
- mdio_device_free(pf->pcs->mdio);
+ mdio_device = lynx_get_mdio_device(pf->pcs);
+ mdio_device_free(mdio_device);
lynx_pcs_destroy(pf->pcs);
}
if (pf->imdio) {
@@ -930,45 +934,6 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
-static void enetc_pl_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_2500BASEX &&
- state->interface != PHY_INTERFACE_MODE_USXGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void enetc_pl_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -980,7 +945,7 @@ static void enetc_pl_mac_config(struct phylink_config *config,
priv = netdev_priv(pf->si->ndev);
if (pf->pcs)
- phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
+ phylink_set_pcs(priv->phylink, pf->pcs);
}
static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
@@ -1096,7 +1061,7 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
- .validate = enetc_pl_mac_validate,
+ .validate = phylink_generic_validate,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
@@ -1111,6 +1076,18 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
pf->phylink_config.dev = &priv->ndev->dev;
pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
pf->if_mode, &enetc_mac_phylink_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 263946c51e37..c26bd66e4597 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -2,7 +2,7 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
-#include <linux/pcs-lynx.h>
+#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
@@ -46,7 +46,7 @@ struct enetc_pf {
struct mii_bus *mdio; /* saved for cleanup */
struct mii_bus *imdio;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
phy_interface_t if_mode;
struct phylink_config phylink_config;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 36b4f51dd297..17c097cef7d4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -42,15 +42,10 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
return dev_err_probe(&pdev->dev, err, "device enable failed\n");
- /* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0536d2c76fbc..3555c12edb45 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1182,7 +1182,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
/* parsing gate action */
- if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
err = -ENOSPC;
goto free_filter;
@@ -1202,7 +1202,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
refcount_set(&sgi->refcount, 1);
- sgi->index = entryg->gate.index;
+ sgi->index = entryg->hw_index;
sgi->init_ipv = entryg->gate.prio;
sgi->basetime = entryg->gate.basetime;
sgi->cycletime = entryg->gate.cycletime;
@@ -1244,7 +1244,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
refcount_set(&fmi->refcount, 1);
fmi->cir = entryp->police.rate_bytes_ps;
fmi->cbs = entryp->police.burst;
- fmi->index = entryp->police.index;
+ fmi->index = entryp->hw_index;
filter->flags |= ENETC_PSFP_FLAGS_FMI;
filter->fmi_index = fmi->index;
sfi->meter_id = fmi->index;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b1f7f2a6130..796133de527e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1185,6 +1185,21 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
}
}
+static void fec_irqs_disable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+}
+
+static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+}
+
static void
fec_stop(struct net_device *ndev)
{
@@ -1211,15 +1226,13 @@ fec_stop(struct net_device *ndev)
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
}
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} else {
- writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
- fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
@@ -2877,15 +2890,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
- if (device_may_wakeup(&ndev->dev)) {
+ if (device_may_wakeup(&ndev->dev))
fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
- if (fep->wake_irq > 0)
- enable_irq_wake(fep->wake_irq);
- } else {
+ else
fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
- if (fep->wake_irq > 0)
- disable_irq_wake(fep->wake_irq);
- }
return 0;
}
@@ -3558,7 +3566,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
- ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(ndev, FEC_MAX_TSO_SEGS);
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
@@ -4057,9 +4065,19 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
- fec_enet_clk_enable(ndev, false);
- if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ fec_irqs_disable(ndev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ } else {
+ fec_irqs_disable_except_wakeup(ndev);
+ if (fep->wake_irq > 0) {
+ disable_irq(fep->wake_irq);
+ enable_irq_wake(fep->wake_irq);
+ }
+ fec_enet_stop_mode(fep, true);
+ }
+ /* It's safe to disable clocks since interrupts are masked */
+ fec_enet_clk_enable(ndev, false);
}
rtnl_unlock();
@@ -4097,6 +4115,10 @@ static int __maybe_unused fec_resume(struct device *dev)
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
fec_enet_stop_mode(fep, false);
+ if (fep->wake_irq) {
+ disable_irq_wake(fep->wake_irq);
+ enable_irq(fep->wake_irq);
+ }
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..be0bd4b44926 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -99,13 +99,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -893,13 +893,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index d71eac7e1924..af99017a5453 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -473,10 +473,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ce0a121580f6..8f0db61cb1f6 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2727,7 +2727,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
- return NULL;
+ return ERR_PTR(-ENOMEM);
fm_node = of_node_get(of_dev->dev.of_node);
@@ -2740,26 +2740,21 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.id = (u8)val;
/* Get the FM interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 0);
+ if (err < 0)
goto fman_node_put;
- }
- irq = res->start;
+ irq = err;
/* Get the FM error interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 1);
+ if (err < 0)
goto fman_node_put;
- }
- fman->dts_params.err_irq = res->start;
+ fman->dts_params.err_irq = err;
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
@@ -2770,6 +2765,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2777,6 +2773,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2797,6 +2794,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the MURAM base address and size */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
__func__);
goto fman_free;
@@ -2836,6 +2834,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
devm_request_mem_region(&of_dev->dev, phys_base_addr,
mem_size, "fman");
if (!fman->dts_params.res) {
+ err = -EBUSY;
dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
__func__);
goto fman_free;
@@ -2844,6 +2843,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
if (!fman->dts_params.base_addr) {
+ err = -ENOMEM;
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
@@ -2868,7 +2868,7 @@ fman_node_put:
of_node_put(fm_node);
fman_free:
kfree(fman);
- return NULL;
+ return ERR_PTR(err);
}
static int fman_probe(struct platform_device *of_dev)
@@ -2880,8 +2880,8 @@ static int fman_probe(struct platform_device *of_dev)
dev = &of_dev->dev;
fman = read_dts_node(of_dev);
- if (!fman)
- return -EIO;
+ if (IS_ERR(fman))
+ return PTR_ERR(fman);
err = fman_config(fman);
if (err) {
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index d9baac0dbc7d..4c9d05c45c03 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
__func__, port_node);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port_id = (u8)val;
port->dts_params.id = port_id;
@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else {
dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.type = port_type;
@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
__func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.qman_channel_id = qman_channel_id;
}
@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
__func__);
err = -ENOMEM;
- goto return_err;
+ goto put_device;
}
port->dts_params.fman = fman;
@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
return 0;
+put_device:
+ put_device(&fm_pdev->dev);
return_err:
of_node_put(port_node);
free_port:
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d9fc5c456bf3..39ae965cd4f6 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -94,14 +94,17 @@ static void mac_exception(void *handle, enum fman_mac_exceptions ex)
__func__, ex);
}
-static void set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
+static int set_fman_mac_params(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct mac_priv_s *priv = mac_dev->priv;
params->base_addr = (typeof(params->base_addr))
devm_ioremap(priv->dev, mac_dev->res->start,
resource_size(mac_dev->res));
+ if (!params->base_addr)
+ return -ENOMEM;
+
memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
params->max_speed = priv->max_speed;
params->phy_if = mac_dev->phy_if;
@@ -112,6 +115,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
params->event_cb = mac_exception;
params->dev_id = mac_dev;
params->internal_phy_node = priv->internal_phy_node;
+
+ return 0;
}
static int tgec_initialization(struct mac_device *mac_dev)
@@ -123,7 +128,9 @@ static int tgec_initialization(struct mac_device *mac_dev)
priv = mac_dev->priv;
- set_fman_mac_params(mac_dev, &params);
+ err = set_fman_mac_params(mac_dev, &params);
+ if (err)
+ goto _return;
mac_dev->fman_mac = tgec_config(&params);
if (!mac_dev->fman_mac) {
@@ -169,7 +176,9 @@ static int dtsec_initialization(struct mac_device *mac_dev)
priv = mac_dev->priv;
- set_fman_mac_params(mac_dev, &params);
+ err = set_fman_mac_params(mac_dev, &params);
+ if (err)
+ goto _return;
mac_dev->fman_mac = dtsec_config(&params);
if (!mac_dev->fman_mac) {
@@ -218,7 +227,9 @@ static int memac_initialization(struct mac_device *mac_dev)
priv = mac_dev->priv;
- set_fman_mac_params(mac_dev, &params);
+ err = set_fman_mac_params(mac_dev, &params);
+ if (err)
+ goto _return;
if (priv->max_speed == SPEED_10000)
params.phy_if = PHY_INTERFACE_MODE_XGMII;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index acab58fd3db3..206b7a35eaf5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2076,10 +2076,6 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7b32ed29bf4c..ff756265d58f 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -372,7 +372,9 @@ static int gfar_scoalesce(struct net_device *dev,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
static void gfar_gringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -399,7 +401,9 @@ static void gfar_gringparam(struct net_device *dev,
* necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 14c08a868190..69b2b98b1525 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -207,7 +207,9 @@ uec_get_regs(struct net_device *netdev,
static void
uec_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -226,7 +228,9 @@ uec_get_ringparam(struct net_device *netdev,
static int
uec_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 0b68852379da..266e562bd67a 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -47,11 +47,11 @@ struct tgec_mdio_controller {
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) (x & 0xffff)
-#define MDIO_DATA_BSY BIT(31)
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a009885;
bool has_a011043;
};
@@ -187,10 +187,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ unsigned long flags;
uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
- uint16_t value;
int ret;
bool endian = priv->is_little_endian;
@@ -222,12 +222,18 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
/* Initiate the read */
xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
- return ret;
+ goto irq_restore;
/* Return all Fs if nothing was there */
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
@@ -235,13 +241,17 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
- return 0xffff;
+ ret = 0xffff;
+ } else {
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
- value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
- dev_dbg(&bus->dev, "read %04x\n", value);
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
- return value;
+ return ret;
}
static int xgmac_mdio_probe(struct platform_device *pdev)
@@ -288,6 +298,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
+ priv->has_a009885 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a009885");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
@@ -319,9 +331,10 @@ err_ioremap:
static int xgmac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct mdio_fsl_priv *priv = bus->priv;
mdiobus_unregister(bus);
- iounmap(bus->priv);
+ iounmap(priv->mdio_base);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index b719f72281c4..160735484465 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -229,6 +229,7 @@ struct gve_rx_ring {
/* A TX desc ring entry */
union gve_tx_desc {
struct gve_tx_pkt_desc pkt; /* first desc for a packet */
+ struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
};
@@ -441,13 +442,13 @@ struct gve_tx_ring {
* associated with that irq.
*/
struct gve_notify_block {
- __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
+ __be32 *irq_db_index; /* pointer to idx into Bar2 */
char name[IFNAMSIZ + 16]; /* name registered with the kernel */
struct napi_struct napi; /* kernel napi struct for this block */
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
-} ____cacheline_aligned;
+};
/* Tracks allowed and current queue settings */
struct gve_queue_config {
@@ -466,6 +467,10 @@ struct gve_options_dqo_rda {
u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
};
+struct gve_irq_db {
+ __be32 index;
+} ____cacheline_aligned;
+
struct gve_ptype {
u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
@@ -492,7 +497,8 @@ struct gve_priv {
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
- dma_addr_t ntfy_block_bus;
+ struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
+ dma_addr_t irq_db_indices_bus;
struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
char mgmt_msix_name[IFNAMSIZ + 16];
u32 mgmt_msix_idx;
@@ -551,6 +557,8 @@ struct gve_priv {
u32 page_alloc_fail; /* count of page alloc fails */
u32 dma_mapping_error; /* count of dma mapping errors */
u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
+ u32 suspend_cnt; /* count of times suspended */
+ u32 resume_cnt; /* count of times resumed */
struct workqueue_struct *gve_wq;
struct work_struct service_task;
struct work_struct stats_report_task;
@@ -567,6 +575,7 @@ struct gve_priv {
/* Gvnic device link speed from hypervisor. */
u64 link_speed;
+ bool up_before_suspend; /* True if dev was up before suspend */
struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
@@ -575,6 +584,10 @@ struct gve_priv {
int data_buffer_size_dqo;
enum gve_queue_format queue_format;
+
+ /* Interrupt coalescing settings */
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
};
enum gve_service_task_flags_bit {
@@ -733,7 +746,7 @@ static inline void gve_clear_report_stats(struct gve_priv *priv)
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
struct gve_notify_block *block)
{
- return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
+ return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
}
/* Returns the index into ntfy_blocks of the given tx ring's block
@@ -830,7 +843,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction);
+ enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 83ae56c310d3..2ad7f57f7e5b 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -462,7 +462,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
.num_counters = cpu_to_be32(num_counters),
.irq_db_addr = cpu_to_be64(db_array_bus_addr),
.num_irq_dbs = cpu_to_be32(num_ntfy_blks),
- .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
+ .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
.ntfy_blk_msix_base_idx =
cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
.queue_format = priv->queue_format,
@@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
* is not set to GqiRda, choose the queue format in a priority order:
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
*/
- if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
- dev_info(&priv->pdev->dev,
- "Driver is running with GQI RDA queue format.\n");
- } else if (dev_op_dqo_rda) {
+ if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
@@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+ } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
if (dev_op_gqi_qpl)
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index 4d225a18d8ce..f4ae9e19b844 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -33,6 +33,14 @@ struct gve_tx_pkt_desc {
__be64 seg_addr; /* Base address (see note) of this segment */
} __packed;
+struct gve_tx_mtd_desc {
+ u8 type_flags; /* type is lower 4 bits, subtype upper */
+ u8 path_state; /* state is lower 4 bits, hash type upper */
+ __be16 reserved0;
+ __be32 path_hash;
+ __be64 reserved1;
+} __packed;
+
struct gve_tx_seg_desc {
u8 type_flags; /* type is lower 4 bits, flags upper */
u8 l3_offset; /* TSO: 2 byte units to start of IPH */
@@ -46,6 +54,7 @@ struct gve_tx_seg_desc {
#define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */
#define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */
#define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */
+#define GVE_TXD_MTD (0x3 << 4) /* Metadata */
/* GVE Transmit Descriptor Flags for Std Pkts */
#define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */
@@ -54,6 +63,17 @@ struct gve_tx_seg_desc {
/* GVE Transmit Descriptor Flags for TSO Segs */
#define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */
+/* GVE Transmit Descriptor Options for MTD Segs */
+#define GVE_MTD_SUBTYPE_PATH 0
+
+#define GVE_MTD_PATH_STATE_DEFAULT 0
+#define GVE_MTD_PATH_STATE_TIMEOUT 1
+#define GVE_MTD_PATH_STATE_CONGESTION 2
+#define GVE_MTD_PATH_STATE_RETRANSMIT 3
+
+#define GVE_MTD_PATH_HASH_NONE (0x0 << 4)
+#define GVE_MTD_PATH_HASH_L4 (0x1 << 4)
+
/* GVE Receive Packet Descriptor */
/* The start of an ethernet packet comes 2 bytes into the rx buffer.
* gVNIC adds this padding so that both the DMA and the L3/4 protocol header
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 836042364124..1eb4d5fd8561 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -18,6 +18,7 @@
#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+#define GVE_MAX_ITR_INTERVAL_DQO (GVE_ITR_INTERVAL_DQO_MASK * 2)
/* Timeout in seconds to wait for a reinjection completion after receiving
* its corresponding miss completion.
@@ -54,17 +55,17 @@ gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
}
/* Builds register value to write to DQO IRQ doorbell to enable with specified
- * ratelimit.
+ * ITR interval.
*/
-static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us)
+static inline u32 gve_setup_itr_interval_dqo(u32 interval_us)
{
u32 result = GVE_ITR_ENABLE_BIT_DQO;
/* Interval has 2us granularity. */
- ratelimit_us >>= 1;
+ interval_us >>= 1;
- ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK;
- result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+ interval_us &= GVE_ITR_INTERVAL_DQO_MASK;
+ result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT);
return result;
}
@@ -73,9 +74,20 @@ static inline void
gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
const struct gve_notify_block *block, u32 val)
{
- u32 index = be32_to_cpu(block->irq_db_index);
+ u32 index = be32_to_cpu(*block->irq_db_index);
iowrite32(val, &priv->db_bar2[index]);
}
+/* Sets interrupt throttling interval and enables interrupt
+ * by writing to IRQ doorbell.
+ */
+static inline void
+gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
+ struct gve_notify_block *block,
+ u32 usecs)
+{
+ gve_write_irq_doorbell_dqo(priv, block,
+ gve_setup_itr_interval_dqo(usecs));
+}
#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index c8df47a97fa4..50b384910c83 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -8,6 +8,7 @@
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -42,7 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
@@ -50,7 +51,7 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
};
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
- "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
+ "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
"tx_dma_mapping_error[%u]",
};
@@ -139,10 +140,11 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
- tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
+ u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
+ tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ tmp_tx_pkts, tmp_tx_bytes;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -191,7 +193,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
}
}
- for (tx_pkts = 0, tx_bytes = 0, ring = 0;
+ for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
ring < priv->tx_cfg.num_queues; ring++) {
if (priv->tx) {
do {
@@ -203,6 +205,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
+ tx_dropped += priv->tx[ring].dropped_pkt;
}
}
@@ -214,9 +217,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* total rx dropped packets */
data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
rx_desc_err_dropped_pkt;
- /* Skip tx_dropped */
- i++;
-
+ data[i++] = tx_dropped;
data[i++] = priv->tx_timeo_cnt;
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
@@ -255,6 +256,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt;
data[i++] = rx->cnt;
+ data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
@@ -318,12 +320,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (gve_is_gqi(priv)) {
data[i++] = tx->req;
data[i++] = tx->done;
+ data[i++] = tx->req - tx->done;
} else {
/* DQO doesn't currently support
* posted/completed descriptor counts;
*/
data[i++] = 0;
data[i++] = 0;
+ data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
}
do {
start =
@@ -419,7 +423,9 @@ static int gve_set_channels(struct net_device *netdev,
}
static void gve_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *cmd)
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -535,7 +541,65 @@ static int gve_get_link_ksettings(struct net_device *netdev,
return err;
}
+static int gve_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_ec,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (gve_is_gqi(priv))
+ return -EOPNOTSUPP;
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int gve_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_ec,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 tx_usecs_orig = priv->tx_coalesce_usecs;
+ u32 rx_usecs_orig = priv->rx_coalesce_usecs;
+ int idx;
+
+ if (gve_is_gqi(priv))
+ return -EOPNOTSUPP;
+
+ if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
+ ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
+ return -EINVAL;
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
+
+ if (tx_usecs_orig != priv->tx_coalesce_usecs) {
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->tx_coalesce_usecs);
+ }
+ }
+
+ if (rx_usecs_orig != priv->rx_coalesce_usecs) {
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->rx_coalesce_usecs);
+ }
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -545,6 +609,8 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
.get_link = ethtool_op_get_link,
+ .get_coalesce = gve_get_coalesce,
+ .set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 59b66f679e46..54e51c8221b8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -334,15 +334,23 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
goto abort_with_msix_enabled;
}
- priv->ntfy_blocks =
+ priv->irq_db_indices =
dma_alloc_coherent(&priv->pdev->dev,
priv->num_ntfy_blks *
- sizeof(*priv->ntfy_blocks),
- &priv->ntfy_block_bus, GFP_KERNEL);
- if (!priv->ntfy_blocks) {
+ sizeof(*priv->irq_db_indices),
+ &priv->irq_db_indices_bus, GFP_KERNEL);
+ if (!priv->irq_db_indices) {
err = -ENOMEM;
goto abort_with_mgmt_vector;
}
+
+ priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
+ sizeof(*priv->ntfy_blocks), GFP_KERNEL);
+ if (!priv->ntfy_blocks) {
+ err = -ENOMEM;
+ goto abort_with_irq_db_indices;
+ }
+
/* Setup the other blocks - the first n-1 vectors */
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
@@ -361,6 +369,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
+ block->irq_db_index = &priv->irq_db_indices[i].index;
}
return 0;
abort_with_some_ntfy_blocks:
@@ -372,10 +381,13 @@ abort_with_some_ntfy_blocks:
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
}
- dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
- sizeof(*priv->ntfy_blocks),
- priv->ntfy_blocks, priv->ntfy_block_bus);
+ kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
+abort_with_irq_db_indices:
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->irq_db_indices),
+ priv->irq_db_indices, priv->irq_db_indices_bus);
+ priv->irq_db_indices = NULL;
abort_with_mgmt_vector:
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
abort_with_msix_enabled:
@@ -403,10 +415,12 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
free_irq(priv->msix_vectors[msix_idx].vector, block);
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
- dma_free_coherent(&priv->pdev->dev,
- priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
- priv->ntfy_blocks, priv->ntfy_block_bus);
+ kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->irq_db_indices),
+ priv->irq_db_indices, priv->irq_db_indices_bus);
+ priv->irq_db_indices = NULL;
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
@@ -428,7 +442,7 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_adminq_configure_device_resources(priv,
priv->counter_array_bus,
priv->num_event_counters,
- priv->ntfy_block_bus,
+ priv->irq_db_indices_bus,
priv->num_ntfy_blks);
if (unlikely(err)) {
dev_err(&priv->pdev->dev,
@@ -752,9 +766,9 @@ static void gve_free_rings(struct gve_priv *priv)
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(GFP_KERNEL);
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -797,7 +811,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
- gve_qpl_dma_dir(priv, id));
+ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
@@ -817,8 +831,7 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- int id)
+static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
{
struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
@@ -1100,9 +1113,8 @@ static void gve_turnup(struct gve_priv *priv)
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
- u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
-
- gve_write_irq_doorbell_dqo(priv, block, val);
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->tx_coalesce_usecs);
}
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
@@ -1113,9 +1125,8 @@ static void gve_turnup(struct gve_priv *priv)
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
- u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
-
- gve_write_irq_doorbell_dqo(priv, block, val);
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->rx_coalesce_usecs);
}
}
@@ -1412,6 +1423,11 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ if (!gve_is_gqi(priv)) {
+ priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
+ priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
+ }
+
setup_device:
err = gve_setup_device_resources(priv);
if (!err)
@@ -1663,6 +1679,58 @@ static void gve_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void gve_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* If the dev wasn't up or close worked, finish tearing down */
+ gve_teardown_priv_resources(priv);
+ }
+ rtnl_unlock();
+}
+
+#ifdef CONFIG_PM
+static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
+ priv->suspend_cnt++;
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* If the dev wasn't up or close worked, finish tearing down */
+ gve_teardown_priv_resources(priv);
+ }
+ priv->up_before_suspend = was_up;
+ rtnl_unlock();
+ return 0;
+}
+
+static int gve_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err;
+
+ priv->resume_cnt++;
+ rtnl_lock();
+ err = gve_reset_recovery(priv, priv->up_before_suspend);
+ rtnl_unlock();
+ return err;
+}
+#endif /* CONFIG_PM */
+
static const struct pci_device_id gve_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
{ }
@@ -1673,6 +1741,11 @@ static struct pci_driver gvnic_driver = {
.id_table = gve_id_table,
.probe = gve_probe,
.remove = gve_remove,
+ .shutdown = gve_shutdown,
+#ifdef CONFIG_PM
+ .suspend = gve_suspend,
+ .resume = gve_resume,
+#endif
};
module_pci_driver(gvnic_driver);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 3d04b5aff331..2068199445bd 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
dma_addr_t dma;
int err;
- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
+ GFP_ATOMIC);
if (err)
return err;
@@ -639,8 +640,6 @@ bool gve_rx_work_pending(struct gve_rx_ring *rx)
desc = rx->desc.desc_ring + next_idx;
flags_seq = desc->flags_seq;
- /* Make sure we have synchronized the seq no with the device */
- smp_rmb();
return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index beb8bb079023..8c939628e2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
if (err)
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index a9cb241fedf4..4888bf05fbed 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -296,11 +296,14 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
return bytes;
}
-/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag,
- * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor,
- * and +1 if the payload wraps to the beginning of the FIFO.
+/* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
+ * 1 for each skb frag
+ * 1 for the skb linear portion
+ * 1 for when tcp hdr needs to be in separate descriptor
+ * 1 if the payload wraps to the beginning of the FIFO
+ * 1 for metadata descriptor
*/
-#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3)
+#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
@@ -395,6 +398,19 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
}
+static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
+
+ mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
+ mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
+ GVE_MTD_PATH_HASH_L4;
+ mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
+ mtd_desc->mtd.reserved0 = 0;
+ mtd_desc->mtd.reserved1 = 0;
+}
+
static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
struct sk_buff *skb, bool is_gso,
u16 len, u64 addr)
@@ -426,6 +442,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
int payload_iov = 2;
@@ -457,7 +474,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
&info->iov[payload_iov]);
gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen,
+ 1 + mtd_desc_nr + payload_nfrags, hlen,
info->iov[hdr_nfrags - 1].iov_offset);
skb_copy_bits(skb, 0,
@@ -468,8 +485,13 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
+ if (mtd_desc_nr) {
+ next_idx = (tx->req + 1) & tx->mask;
+ gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
+ }
+
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
- next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
+ next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
seg_desc = &tx->desc[next_idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
@@ -485,16 +507,17 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
copy_offset += info->iov[i].iov_len;
}
- return 1 + payload_nfrags;
+ return 1 + mtd_desc_nr + payload_nfrags;
}
static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- int hlen, payload_nfrags, l4_hdr_offset;
- union gve_tx_desc *pkt_desc, *seg_desc;
+ int hlen, num_descriptors, l4_hdr_offset;
+ union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
u64 addr;
@@ -523,23 +546,30 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
dma_unmap_len_set(info, len, len);
dma_unmap_addr_set(info, dma, addr);
- payload_nfrags = shinfo->nr_frags;
+ num_descriptors = 1 + shinfo->nr_frags;
+ if (hlen < len)
+ num_descriptors++;
+ if (mtd_desc_nr)
+ num_descriptors++;
+
+ gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
+ num_descriptors, hlen, addr);
+
+ if (mtd_desc_nr) {
+ idx = (idx + 1) & tx->mask;
+ mtd_desc = &tx->desc[idx];
+ gve_tx_fill_mtd_desc(mtd_desc, skb);
+ }
+
if (hlen < len) {
/* For gso the rest of the linear portion of the skb needs to
* be in its own descriptor.
*/
- payload_nfrags++;
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
-
len -= hlen;
addr += hlen;
- idx = (tx->req + 1) & tx->mask;
+ idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
- } else {
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
}
for (i = 0; i < shinfo->nr_frags; i++) {
@@ -560,11 +590,14 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
- return 1 + payload_nfrags;
+ return num_descriptors;
unmap_drop:
- i += (payload_nfrags == shinfo->nr_frags ? 1 : 2);
+ i += num_descriptors - shinfo->nr_frags;
while (i--) {
+ /* Skip metadata descriptor, if set */
+ if (i == 1 && mtd_desc_nr == 1)
+ continue;
idx--;
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index ab7390225942..d7a27c244d48 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -663,9 +663,13 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* hns_get_ringparam - get ring parameter
* @net_dev: net device
* @param: ethtool parameter
+ * @kernel_param: ethtool external parameter
+ * @extack: netlink extended ACK report struct
*/
static void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 7aa2fac76c5e..6efea4662858 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -4,9 +4,9 @@
#
ccflags-y += -I$(srctree)/$(src)
-
-obj-$(CONFIG_HNS3) += hns3pf/
-obj-$(CONFIG_HNS3) += hns3vf/
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
obj-$(CONFIG_HNS3) += hnae3.o
@@ -14,3 +14,16 @@ obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
+ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+
+hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index c2bd2584201f..b668df6193be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -80,6 +80,9 @@ enum hclge_mbx_tbl_cfg_subcode {
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ)
+#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2)
+
struct hclge_ring_chain_param {
u8 ring_type;
u8 tqp_index;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3f7a9a4c59d5..9298fbecb31a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -839,6 +839,8 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+ /* protects concurrent contention between debugfs commands */
+ struct mutex dbgfs_lock;
/* Network interface message level enabled bits */
u32 msg_enable;
@@ -859,6 +861,20 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field(origin, 0x1 << (shift), shift)
+#define HNAE3_FORMAT_MAC_ADDR_LEN 18
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5
+
+static inline void hnae3_format_mac_addr(char *format_mac_addr,
+ const u8 *mac_addr)
+{
+ snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x",
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]);
+}
+
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
new file mode 100644
index 000000000000..c15ca710dabb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
+ struct hclge_comm_cmq_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ u32 reg_val;
+
+ if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
+}
+
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
+}
+
+static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
+ bool is_pf)
+{
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
+ }
+}
+
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hclge_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+}
+
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en)
+{
+ struct hclge_comm_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
+
+ if (en) {
+ req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hclge_comm_dev_phy_imp_supported(ae_dev))
+ hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
+
+ return hclge_comm_cmd_send(hw, &desc, 1);
+}
+
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(&ring->pdev->dev, size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+}
+
+static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ ring->desc = dma_alloc_coherent(&ring->pdev->dev,
+ size, &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __le32 hclge_comm_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
+static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
+ {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
+ {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
+ {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+};
+
+static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+};
+
+static void
+hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
+ struct hclge_comm_query_version_cmd *cmd)
+{
+ const struct hclge_comm_caps_bit_map *caps_map =
+ is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
+ u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
+ ARRAY_SIZE(hclge_vf_cmd_caps);
+ u32 caps, i;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+ for (i = 0; i < size; i++)
+ if (hnae3_get_bit(caps, caps_map[i].imp_bit))
+ set_bit(caps_map[i].local_bit, ae_dev->caps);
+}
+
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
+{
+ struct hclge_comm_cmq_ring *ring =
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
+ &hw->cmq.crq;
+ int ret;
+
+ ring->ring_type = ring_type;
+
+ ret = hclge_comm_alloc_cmd_desc(ring);
+ if (ret)
+ dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
+ ret);
+
+ return ret;
+}
+
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf)
+{
+ struct hclge_comm_query_version_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
+ resp = (struct hclge_comm_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_comm_build_api_caps();
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= ae_dev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclge_comm_set_default_capability(ae_dev, is_pf);
+
+ hclge_comm_parse_capability(ae_dev, is_pf, resp);
+
+ return ret;
+}
+
+static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO };
+
+static bool hclge_comm_is_special_opcode(u16 opcode)
+{
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ if (spec_opcode[i] == opcode)
+ return true;
+
+ return false;
+}
+
+static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num)
+{
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
+static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
+ int head)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int clean;
+ u32 head;
+
+ head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
+ head, csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
+{
+ u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+ bool *is_completed)
+{
+ u32 timeout = 0;
+
+ do {
+ if (hclge_comm_cmd_csq_done(hw)) {
+ *is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+}
+
+static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
+{
+ struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
+ { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
+ { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
+ { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
+ { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
+ { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
+ { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
+ { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
+ { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
+ { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
+ { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
+ { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
+ { HCLGE_COMM_CMD_INVALID, -EBADR },
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_comm_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num,
+ int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc >= hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ if (likely(!hclge_comm_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+
+ hw->cmq.last_status = desc_ret;
+
+ return hclge_comm_cmd_convert_err_code(desc_ret);
+}
+
+static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, int ntc)
+{
+ bool is_completed = false;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ hclge_comm_wait_for_resp(hw, &is_completed);
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_comm_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
+/**
+ * hclge_comm_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int ret;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean =
+ hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /**
+ * Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+
+ hclge_comm_cmd_copy_desc(hw, desc, num);
+
+ /* Write to hardware */
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return ret;
+}
+
+static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ hclge_comm_firmware_compat_config(ae_dev, hw, false);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* wait to ensure that the firmware completes the possible left
+ * over commands.
+ */
+ msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+ hclge_comm_cmd_uninit_regs(hw);
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ hclge_comm_free_cmd_desc(&cmdq->csq);
+ hclge_comm_free_cmd_desc(&cmdq->crq);
+}
+
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&cmdq->csq.lock);
+ spin_lock_init(&cmdq->crq.lock);
+
+ cmdq->csq.pdev = pdev;
+ cmdq->crq.pdev = pdev;
+
+ /* Setup the queue entries for use cmd queue */
+ cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+ cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+
+ /* Setup Tx write back timeout */
+ cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+
+ /* Setup queue rings */
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
+ goto err_csq;
+ }
+
+ return 0;
+err_csq:
+ hclge_comm_free_cmd_desc(&hw->cmq.csq);
+ return ret;
+}
+
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+
+ cmdq->csq.next_to_clean = 0;
+ cmdq->csq.next_to_use = 0;
+ cmdq->crq.next_to_clean = 0;
+ cmdq->crq.next_to_use = 0;
+
+ hclge_comm_cmd_init_regs(hw);
+
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (reset_pending) {
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
+
+ /* get version and device capabilities */
+ ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
+ fw_version, is_pf);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
+ goto err_cmd_init;
+ }
+
+ dev_info(&ae_dev->pdev->dev,
+ "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
+ if (ret)
+ dev_warn(&ae_dev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
+ return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
new file mode 100644
index 000000000000..876650eddac4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_CMD_H
+#define __HCLGE_COMM_CMD_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+
+#define HCLGE_COMM_CMD_FLAG_IN BIT(0)
+#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_COMM_CMD_FLAG_WR BIT(3)
+#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4)
+
+#define HCLGE_COMM_SEND_SYNC(flag) \
+ ((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR)
+
+#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1
+#define HCLGE_COMM_PHY_IMP_EN_B 2
+#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
+#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+
+#define hclge_comm_dev_phy_imp_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
+
+#define HCLGE_COMM_TYPE_CRQ 0
+#define HCLGE_COMM_TYPE_CSQ 1
+
+#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200
+
+/* bar registers for cmdq */
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C
+#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104
+#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C
+#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_COMM_NIC_SW_RST_RDY_B 16
+#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
+
+enum hclge_opcode_type {
+ /* Generic commands */
+ HCLGE_OPC_QUERY_FW_VER = 0x0001,
+ HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
+ HCLGE_OPC_GBL_RST_STATUS = 0x0021,
+ HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
+ HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
+ HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
+
+ HCLGE_OPC_STATS_64_BIT = 0x0030,
+ HCLGE_OPC_STATS_32_BIT = 0x0031,
+ HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
+
+ /* MAC command */
+ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
+ HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
+ HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
+ HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
+ HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+ HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
+
+ /* PTP commands */
+ HCLGE_OPC_PTP_INT_EN = 0x0501,
+ HCLGE_OPC_PTP_MODE_CFG = 0x0507,
+
+ /* PFC/Pause commands */
+ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
+ HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
+ HCLGE_OPC_CFG_MAC_PARA = 0x0703,
+ HCLGE_OPC_CFG_PFC_PARA = 0x0704,
+ HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
+ HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
+ HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
+ HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
+ HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
+ HCLGE_OPC_QOS_MAP = 0x070A,
+
+ /* ETS/scheduler commands */
+ HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
+ HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
+ HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
+ HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
+ HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
+ HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
+ HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
+ HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
+ HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
+ HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
+ HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
+ HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
+ HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
+ HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
+ HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
+ HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
+
+ /* Packet buffer allocate commands */
+ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
+ HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
+ HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
+ HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
+ HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
+ HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
+
+ /* TQP management command */
+ HCLGE_OPC_SET_TQP_MAP = 0x0A01,
+
+ /* TQP commands */
+ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
+ HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
+ HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
+ HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
+ HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
+ HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
+ HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
+ /* TSO command */
+ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+
+ /* RSS commands */
+ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGE_OPC_RSS_TC_MODE = 0x0D08,
+ HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
+
+ /* Promisuous mode command */
+ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+
+ /* Vlan offload commands */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
+ /* Interrupts commands */
+ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
+ HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
+
+ /* MAC commands */
+ HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
+ HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
+ HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
+ HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
+ HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
+ HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
+ /* VLAN commands */
+ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
+ HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
+ HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
+
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+ HCLGE_OPC_FD_CNT_OP = 0x1205,
+ HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
+ HCLGE_OPC_FD_QB_CTRL = 0x1210,
+ HCLGE_OPC_FD_QB_AD_OP = 0x1211,
+
+ /* MDIO command */
+ HCLGE_OPC_MDIO_CONFIG = 0x1900,
+
+ /* QCN commands */
+ HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
+ HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
+ HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
+ HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
+ HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
+ HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+
+ /* Mailbox command */
+ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* clear hardware resource command */
+ HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
+
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
+ /* IMP stats command */
+ HCLGE_OPC_IMP_STATS_BD = 0x7012,
+ HCLGE_OPC_IMP_STATS_INFO = 0x7013,
+ HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
+
+ /* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
+
+ /* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
+ HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
+ HCLGE_NCSI_INT_EN = 0x2401,
+
+ /* ROH MAC commands */
+ HCLGE_OPC_MAC_ADDR_CHECK = 0x9004,
+
+ /* PHY command */
+ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
+ HCLGE_OPC_PHY_REG = 0x7026,
+
+ /* Query link diagnosis info command */
+ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
+};
+
+enum hclge_comm_cmd_return_status {
+ HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
+ HCLGE_COMM_CMD_NO_AUTH = 1,
+ HCLGE_COMM_CMD_NOT_SUPPORTED = 2,
+ HCLGE_COMM_CMD_QUEUE_FULL = 3,
+ HCLGE_COMM_CMD_NEXT_ERR = 4,
+ HCLGE_COMM_CMD_UNEXE_ERR = 5,
+ HCLGE_COMM_CMD_PARA_ERR = 6,
+ HCLGE_COMM_CMD_RESULT_ERR = 7,
+ HCLGE_COMM_CMD_TIMEOUT = 8,
+ HCLGE_COMM_CMD_HILINK_ERR = 9,
+ HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_COMM_CMD_INVALID = 11,
+};
+
+enum HCLGE_COMM_CAP_BITS {
+ HCLGE_COMM_CAP_UDP_GSO_B,
+ HCLGE_COMM_CAP_QB_B,
+ HCLGE_COMM_CAP_FD_FORWARD_TC_B,
+ HCLGE_COMM_CAP_PTP_B,
+ HCLGE_COMM_CAP_INT_QL_B,
+ HCLGE_COMM_CAP_HW_TX_CSUM_B,
+ HCLGE_COMM_CAP_TX_PUSH_B,
+ HCLGE_COMM_CAP_PHY_IMP_B,
+ HCLGE_COMM_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_COMM_CAP_HW_PAD_B,
+ HCLGE_COMM_CAP_STASH_B,
+ HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_COMM_CAP_RAS_IMP_B = 12,
+ HCLGE_COMM_CAP_FEC_B = 13,
+ HCLGE_COMM_CAP_PAUSE_B = 14,
+ HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
+ HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+};
+
+enum HCLGE_COMM_API_CAP_BITS {
+ HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B,
+};
+
+/* capabilities bits map between imp firmware and local driver */
+struct hclge_comm_caps_bit_map {
+ u16 imp_bit;
+ u16 local_bit;
+};
+
+struct hclge_comm_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
+enum hclge_comm_cmd_state {
+ HCLGE_COMM_STATE_CMD_DISABLE,
+};
+
+struct hclge_comm_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+#define HCLGE_COMM_QUERY_CAP_LENGTH 3
+struct hclge_comm_query_version_cmd {
+ __le32 firmware;
+ __le32 hardware;
+ __le32 api_caps;
+ __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
+};
+
+#define HCLGE_DESC_DATA_LEN 6
+struct hclge_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[HCLGE_DESC_DATA_LEN];
+};
+
+struct hclge_comm_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclge_desc *desc;
+ struct pci_dev *pdev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 ring_type; /* cmq ring type */
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclge_comm_cmd_status {
+ HCLGE_COMM_STATUS_SUCCESS = 0,
+ HCLGE_COMM_ERR_CSQ_FULL = -1,
+ HCLGE_COMM_ERR_CSQ_TIMEOUT = -2,
+ HCLGE_COMM_ERR_CSQ_ERROR = -3,
+};
+
+struct hclge_comm_cmq {
+ struct hclge_comm_cmq_ring csq;
+ struct hclge_comm_cmq_ring crq;
+ u16 tx_timeout;
+ enum hclge_comm_cmd_status last_status;
+};
+
+struct hclge_comm_hw {
+ void __iomem *io_base;
+ void __iomem *mem_base;
+ struct hclge_comm_cmq cmq;
+ unsigned long comm_state;
+};
+
+static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclge_comm_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->io_base, reg, value)
+#define hclge_comm_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->io_base, reg)
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw);
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf);
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type);
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num);
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en);
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring);
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read);
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw);
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
new file mode 100644
index 000000000000..e23729ac3bb8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+#include <linux/skbuff.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_rss.h"
+
+static const u8 hclge_comm_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static void
+hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
+{
+ rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_sctp_en =
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+}
+
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
+ int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ u16 *rss_ind_tbl;
+
+ if (nic->flags & HNAE3_SUPPORT_VF)
+ rss_cfg->rss_size = nic->kinfo.rss_size;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+
+ hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
+
+ rss_cfg->rss_algo = rss_algo;
+
+ rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
+ memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
+ HCLGE_COMM_RSS_KEY_SIZE);
+
+ hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
+
+ return 0;
+}
+
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ u16 roundup_size;
+ u32 i;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
+ }
+}
+
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ struct hclge_comm_rss_tc_mode_cmd *req;
+ struct hclge_desc desc;
+ unsigned int i;
+ int ret;
+
+ req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
+ HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
+ tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
+ 0x1);
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
+ HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tc mode, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc)
+{
+ u8 hash_algo;
+ int ret;
+
+ ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
+ } else {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+ }
+ rss_cfg->rss_algo = hash_algo;
+
+ return 0;
+}
+
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to init rss tuple cmd, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tuple, ret = %d.\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGE_COMM_RSS_KEY_SIZE;
+}
+
+void hclge_comm_get_rss_type(struct hnae3_handle *nic,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_sets)
+{
+ if (rss_tuple_sets->ipv4_tcp_en ||
+ rss_tuple_sets->ipv4_udp_en ||
+ rss_tuple_sets->ipv4_sctp_en ||
+ rss_tuple_sets->ipv6_tcp_en ||
+ rss_tuple_sets->ipv6_udp_en ||
+ rss_tuple_sets->ipv6_sctp_en)
+ nic->kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (rss_tuple_sets->ipv4_fragment_en ||
+ rss_tuple_sets->ipv6_fragment_en)
+ nic->kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ nic->kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = rss_cfg->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 i;
+ /* Initialize RSS indirect table */
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
+ u16 qid, u32 j)
+{
+ u8 rss_msb_oft;
+ u8 rss_msb_val;
+
+ rss_msb_oft =
+ j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
+ rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
+ (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
+ req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
+}
+
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir)
+{
+ struct hclge_comm_rss_ind_tbl_cmd *req;
+ struct hclge_desc desc;
+ u16 rss_cfg_tbl_num;
+ int ret;
+ u16 qid;
+ u16 i;
+ u32 j;
+
+ req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
+ rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_COMM_RSS_CFG_TBL_SIZE;
+
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_INDIR_TABLE,
+ false);
+
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap =
+ cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
+ for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
+ qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
+ req->rss_qid_l[j] = qid & 0xff;
+ hclge_comm_append_rss_msb_info(req, qid, j);
+ }
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss table, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
+ struct hclge_comm_hw *hw, bool is_pf,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ if (is_pf)
+ hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets);
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss input, ret = %d.\n", ret);
+ return ret;
+}
+
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc)
+{
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->rss_algo) {
+ case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Get the RSS Key required by the user */
+ if (key)
+ memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
+}
+
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, u16 rss_ind_tbl_size)
+{
+ u16 i;
+
+ if (!indir)
+ return;
+
+ for (i = 0; i < rss_ind_tbl_size; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+}
+
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key)
+{
+ struct hclge_comm_rss_config_cmd *req;
+ unsigned int key_offset = 0;
+ struct hclge_desc desc;
+ int key_counts;
+ int key_size;
+ int ret;
+
+ key_counts = HCLGE_COMM_RSS_KEY_SIZE;
+ req = (struct hclge_comm_rss_config_cmd *)desc.data;
+
+ while (key_counts) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
+
+ key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
+ memcpy(req->hash_key,
+ key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
+ key_size);
+
+ key_counts -= key_size;
+ key_offset++;
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure RSS key, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_COMM_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_COMM_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_COMM_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_COMM_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req)
+{
+ u8 tuple_sets;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
+
+ if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
+ tuple_data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
+ tuple_data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_COMM_D_IP_BIT)
+ tuple_data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_COMM_S_IP_BIT)
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
new file mode 100644
index 000000000000..aa1d7a6ff4ca
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_RSS_H
+#define __HCLGE_COMM_RSS_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2
+
+#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+
+#define HCLGE_COMM_D_PORT_BIT BIT(0)
+#define HCLGE_COMM_S_PORT_BIT BIT(1)
+#define HCLGE_COMM_D_IP_BIT BIT(2)
+#define HCLGE_COMM_S_IP_BIT BIT(3)
+#define HCLGE_COMM_V_TAG_BIT BIT(4)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT)
+#define HCLGE_COMM_MAX_TC_NUM 8
+
+#define HCLGE_COMM_RSS_TC_OFFSET_S 0
+#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11
+#define HCLGE_COMM_RSS_TC_SIZE_S 12
+#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_COMM_RSS_TC_VALID_B 15
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3
+
+struct hclge_comm_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
+#define HCLGE_COMM_RSS_KEY_SIZE 40
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16
+#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U
+#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0)
+#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0)
+#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4
+
+#define HCLGE_COMM_RSS_HASH_KEY_NUM 16
+struct hclge_comm_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM];
+};
+
+struct hclge_comm_rss_cfg {
+ u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */
+
+ /* shadow table */
+ u16 *rss_indirection_tbl;
+ u32 rss_algo;
+
+ struct hclge_comm_rss_tuple_cfg rss_tuple_sets;
+ u32 rss_size;
+};
+
+struct hclge_comm_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+struct hclge_comm_rss_ind_tbl_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
+ u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H];
+ u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE];
+};
+
+struct hclge_comm_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
+void hclge_comm_get_rss_type(struct hnae3_handle *nic,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets);
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo);
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc);
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, __le16 rss_ind_tbl_size);
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key);
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req);
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
+int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
+ struct hclge_comm_hw *hw, bool is_pf,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir);
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc);
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
new file mode 100644
index 000000000000..0c60f41fca8a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include <linux/err.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_tqp_stats.h"
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ u64 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+
+ return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
+}
+
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u8 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hclge_desc desc;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hnae3_queue *queue;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = kinfo->tqp[i];
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
new file mode 100644
index 000000000000..a46350162ee8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_TQP_STATS_H
+#define __HCLGE_COMM_TQP_STATS_H
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "hnae3.h"
+
+/* each tqp has TX & RX two queues */
+#define HCLGE_COMM_QUEUE_PAIR_SIZE 2
+
+/* TQP stats */
+struct hclge_comm_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclge_comm_tqp {
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
+ struct hnae3_queue q;
+ struct hclge_comm_tqp_stats tqp_stats;
+ u16 index; /* Global index in a NIC controller */
+
+ bool alloced;
+};
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 081295bff765..f726a5b70f9e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1083,7 +1083,7 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", index);
sprintf(result[j++], "%u",
READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%u",
+ sprintf(result[j++], "%d",
atomic_read(&ring->page_pool->pages_state_release_cnt));
sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
sprintf(result[j++], "%u", ring->page_pool->p.order);
@@ -1226,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
if (ret)
return ret;
+ mutex_lock(&handle->dbgfs_lock);
save_buf = &hns3_dbg_cmd[index].buf;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
@@ -1238,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
read_buf = *save_buf;
} else {
read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
+ if (!read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* save the buffer addr until the last read operation */
*save_buf = read_buf;
- }
- /* get data ready for the first time to read */
- if (!*ppos) {
+ /* get data ready for the first time to read */
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
read_buf, hns3_dbg_cmd[index].buf_len);
if (ret)
@@ -1255,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size = simple_read_from_buffer(buffer, count, ppos, read_buf,
strlen(read_buf));
- if (size > 0)
+ if (size > 0) {
+ mutex_unlock(&handle->dbgfs_lock);
return size;
+ }
out:
/* free the buffer for the last read operation */
@@ -1265,6 +1268,7 @@ out:
*save_buf = NULL;
}
+ mutex_unlock(&handle->dbgfs_lock);
return ret;
}
@@ -1337,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
+ mutex_init(&handle->dbgfs_lock);
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1363,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
return ret;
@@ -1378,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
hns3_dbg_cmd[i].buf = NULL;
}
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index bd8801065e02..83aa1450ab9f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -4,6 +4,8 @@
#ifndef __HNS3_DEBUGFS_H
#define __HNS3_DEBUGFS_H
+#include "hnae3.h"
+
#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9ccebbaa0d69..babc5d7a3b52 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -17,6 +17,7 @@
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <net/gre.h>
+#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
@@ -53,10 +54,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
-static unsigned int tx_spare_buf_size;
-module_param(tx_spare_buf_size, uint, 0400);
-MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
-
static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
@@ -1005,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false;
if (ALIGN(len, dma_get_cache_alignment()) > space) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1024,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false;
if (space < HNS3_MAX_SGL_SIZE) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1041,8 +1034,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
dma_addr_t dma;
int order;
- alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
- ring->tqp->handle->kinfo.tx_spare_buf_size;
+ alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
if (!alloc_size)
return;
@@ -1306,7 +1298,7 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
if (!(!skb->encapsulation &&
(l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
l4.udp->dest == htons(GENEVE_UDP_PORT) ||
- l4.udp->dest == htons(4790))))
+ l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
return false;
return true;
@@ -1359,44 +1351,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
HNS3_TUN_NVGRE);
}
-static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
+static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
+ u32 *type_cs_vlan_tso)
{
- unsigned char *l2_hdr = skb->data;
- u32 l4_proto = ol4_proto;
- union l4_hdr_info l4;
- union l3_hdr_info l3;
- u32 l2_len, l3_len;
-
- l4.hdr = skb_transport_header(skb);
- l3.hdr = skb_network_header(skb);
-
- /* handle encapsulation skb */
- if (skb->encapsulation) {
- /* If this is a not UDP/GRE encapsulation skb */
- if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
- /* drop the skb tunnel packet if hardware don't support,
- * because hardware can't calculate csum when TSO.
- */
- if (skb_is_gso(skb))
- return -EDOM;
-
- /* the stack computes the IP header already,
- * driver calculate l4 checksum when not TSO.
- */
- return skb_checksum_help(skb);
- }
-
- hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
-
- /* switch to inner header */
- l2_hdr = skb_inner_mac_header(skb);
- l3.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- l4_proto = il4_proto;
- }
-
if (l3.v4->version == 4) {
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV4);
@@ -1410,15 +1367,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV6);
}
+}
- /* compute inner(/normal) L2 header size, defined in 2 Bytes */
- l2_len = l3.hdr - l2_hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
-
- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
- l3_len = l4.hdr - l3.hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
-
+static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
+ u32 l4_proto, u32 *type_cs_vlan_tso)
+{
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -1464,6 +1417,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = il4_proto;
+ }
+
+ hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
+
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
+}
+
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
@@ -1540,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
return true;
}
-static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, struct hns3_desc *desc,
- struct hns3_desc_cb *desc_cb)
+struct hns3_desc_param {
+ u32 paylen_ol4cs;
+ u32 ol_type_vlan_len_msec;
+ u32 type_cs_vlan_tso;
+ u16 mss_hw_csum;
+ u16 inner_vtag;
+ u16 out_vtag;
+};
+
+static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
+{
+ pa->paylen_ol4cs = skb->len;
+ pa->ol_type_vlan_len_msec = 0;
+ pa->type_cs_vlan_tso = 0;
+ pa->mss_hw_csum = 0;
+ pa->inner_vtag = 0;
+ pa->out_vtag = 0;
+}
+
+static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_param *param)
{
- u32 ol_type_vlan_len_msec = 0;
- u32 paylen_ol4cs = skb->len;
- u32 type_cs_vlan_tso = 0;
- u16 mss_hw_csum = 0;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_vlan_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_vlan_err);
return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) {
- inner_vtag = skb_vlan_tag_get(skb);
- inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->inner_vtag = skb_vlan_tag_get(skb);
+ param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
} else if (ret == HNS3_OUTER_VLAN_TAG) {
- out_vtag = skb_vlan_tag_get(skb);
- out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->out_vtag = skb_vlan_tag_get(skb);
+ param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1);
}
+ return 0;
+}
- desc_cb->send_bytes = skb->len;
+static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ struct hns3_desc_param *param)
+{
+ u8 ol4_proto, il4_proto;
+ int ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- if (hns3_check_hw_tx_csum(skb)) {
- /* set checksum start and offset, defined in 2 Bytes */
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
- skb_checksum_start_offset(skb) >> 1);
- hns3_set_field(ol_type_vlan_len_msec,
- HNS3_TXD_CSUM_OFFSET_S,
- skb->csum_offset >> 1);
- mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
- goto out_hw_tx_csum;
- }
+ if (hns3_check_hw_tx_csum(skb)) {
+ /* set checksum start and offset, defined in 2 Bytes */
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
+ skb_checksum_start_offset(skb) >> 1);
+ hns3_set_field(param->ol_type_vlan_len_msec,
+ HNS3_TXD_CSUM_OFFSET_S,
+ skb->csum_offset >> 1);
+ param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
+ return 0;
+ }
- skb_reset_mac_len(skb);
+ skb_reset_mac_len(skb);
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l4_proto_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l4_proto_err);
+ return ret;
+ }
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l2l3l4_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &param->type_cs_vlan_tso,
+ &param->ol_type_vlan_len_msec);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l2l3l4_err);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
+ &param->type_cs_vlan_tso, &desc_cb->send_bytes);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_tso_err);
+ return ret;
+ }
+ return 0;
+}
+
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc_param param;
+ int ret;
+
+ hns3_init_desc_data(skb, &param);
+ ret = hns3_handle_vlan_info(ring, skb, &param);
+ if (unlikely(ret < 0))
+ return ret;
+
+ desc_cb->send_bytes = skb->len;
- ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
- &type_cs_vlan_tso, &desc_cb->send_bytes);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_tso_err++;
- u64_stats_update_end(&ring->syncp);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
+ if (ret)
return ret;
- }
}
-out_hw_tx_csum:
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
- desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+ cpu_to_le32(param.ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
+ desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
+ desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
+ desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
return 0;
}
@@ -1705,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
}
if (unlikely(dma_mapping_error(dev, dma))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1853,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/
if (bd_num == UINT_MAX) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.over_max_recursion++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, over_max_recursion);
return -ENOMEM;
}
@@ -1864,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.hw_limitation++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, hw_limitation);
return -ENOMEM;
}
if (__skb_linearize(skb)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1903,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_copy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_copy);
}
out:
@@ -1925,9 +1949,7 @@ out:
return bd_num;
}
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_busy);
return -EBUSY;
}
@@ -2012,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->pending_buf += num;
if (!doorbell) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_more++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_more);
return;
}
@@ -2064,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.copy_bits_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, copy_bits_err);
return ret;
}
@@ -2089,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_bounce++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_bounce);
+
return bd_num;
}
@@ -2121,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.skb2sgl_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, skb2sgl_err);
return -ENOMEM;
}
@@ -2132,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.map_sg_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, map_sg_err);
return -ENOMEM;
}
@@ -2146,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i));
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_sgl++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_sgl);
return bd_num;
}
@@ -2174,23 +2184,45 @@ out:
return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
}
+static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ int next_to_use_head)
+{
+ int ret;
+
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
+ desc_cb);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
+ ret = hns3_handle_desc_filling(ring, skb);
+ if (likely(ret > 0))
+ return ret;
+
+fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+ return ret;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue;
- int pre_ntu, next_to_use_head;
+ int pre_ntu, ret;
bool doorbell;
- int ret;
/* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return NETDEV_TX_OK;
}
@@ -2209,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out_err_tx_ok;
}
- next_to_use_head = ring->next_to_use;
-
- ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
- desc_cb);
- if (unlikely(ret < 0))
- goto fill_err;
-
- /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
- * zero, which is unlikely, and 'ret > 0' means how many tx desc
- * need to be notified to the hw.
- */
- ret = hns3_handle_desc_filling(ring, skb);
+ ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
if (unlikely(ret <= 0))
- goto fill_err;
+ goto out_err_tx_ok;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
@@ -2244,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-fill_err:
- hns3_clear_desc(ring, next_to_use_head);
-
out_err_tx_ok:
dev_kfree_skb_any(skb);
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
@@ -2255,6 +2273,8 @@ out_err_tx_ok:
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
+ char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
+ char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = hns3_get_handle(netdev);
struct sockaddr *mac_addr = p;
int ret;
@@ -2263,8 +2283,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
- netdev_info(netdev, "already using mac address %pM\n",
- mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_info(netdev, "already using mac address %s\n",
+ format_mac_addr_sa);
return 0;
}
@@ -2273,8 +2294,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
*/
if (!hns3_is_phys_func(h->pdev) &&
!is_zero_ether_addr(netdev->perm_addr)) {
- netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
- netdev->perm_addr, mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
+ format_mac_addr_perm, format_mac_addr_sa);
return -EPERM;
}
@@ -2382,90 +2405,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
}
+static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
+ struct hns3_enet_ring *ring, bool is_tx)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ if (is_tx) {
+ stats->tx_bytes += ring->stats.tx_bytes;
+ stats->tx_packets += ring->stats.tx_pkts;
+ stats->tx_dropped += ring->stats.sw_err_cnt;
+ stats->tx_dropped += ring->stats.tx_vlan_err;
+ stats->tx_dropped += ring->stats.tx_l4_proto_err;
+ stats->tx_dropped += ring->stats.tx_l2l3l4_err;
+ stats->tx_dropped += ring->stats.tx_tso_err;
+ stats->tx_dropped += ring->stats.over_max_recursion;
+ stats->tx_dropped += ring->stats.hw_limitation;
+ stats->tx_dropped += ring->stats.copy_bits_err;
+ stats->tx_dropped += ring->stats.skb2sgl_err;
+ stats->tx_dropped += ring->stats.map_sg_err;
+ stats->tx_errors += ring->stats.sw_err_cnt;
+ stats->tx_errors += ring->stats.tx_vlan_err;
+ stats->tx_errors += ring->stats.tx_l4_proto_err;
+ stats->tx_errors += ring->stats.tx_l2l3l4_err;
+ stats->tx_errors += ring->stats.tx_tso_err;
+ stats->tx_errors += ring->stats.over_max_recursion;
+ stats->tx_errors += ring->stats.hw_limitation;
+ stats->tx_errors += ring->stats.copy_bits_err;
+ stats->tx_errors += ring->stats.skb2sgl_err;
+ stats->tx_errors += ring->stats.map_sg_err;
+ } else {
+ stats->rx_bytes += ring->stats.rx_bytes;
+ stats->rx_packets += ring->stats.rx_pkts;
+ stats->rx_dropped += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l3l4_csum_err;
+ stats->rx_crc_errors += ring->stats.l2_err;
+ stats->multicast += ring->stats.rx_multicast;
+ stats->rx_length_errors += ring->stats.err_pkt_len;
+ }
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+}
+
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle;
+ struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- u64 rx_length_errors = 0;
- u64 rx_crc_errors = 0;
- u64 rx_multicast = 0;
- unsigned int start;
- u64 tx_errors = 0;
- u64 rx_errors = 0;
unsigned int idx;
- u64 tx_bytes = 0;
- u64 rx_bytes = 0;
- u64 tx_pkts = 0;
- u64 rx_pkts = 0;
- u64 tx_drop = 0;
- u64 rx_drop = 0;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
handle->ae_algo->ops->update_stats(handle, &netdev->stats);
+ memset(&ring_total_stats, 0, sizeof(ring_total_stats));
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
ring = &priv->ring[idx];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- tx_bytes += ring->stats.tx_bytes;
- tx_pkts += ring->stats.tx_pkts;
- tx_drop += ring->stats.sw_err_cnt;
- tx_drop += ring->stats.tx_vlan_err;
- tx_drop += ring->stats.tx_l4_proto_err;
- tx_drop += ring->stats.tx_l2l3l4_err;
- tx_drop += ring->stats.tx_tso_err;
- tx_drop += ring->stats.over_max_recursion;
- tx_drop += ring->stats.hw_limitation;
- tx_drop += ring->stats.copy_bits_err;
- tx_drop += ring->stats.skb2sgl_err;
- tx_drop += ring->stats.map_sg_err;
- tx_errors += ring->stats.sw_err_cnt;
- tx_errors += ring->stats.tx_vlan_err;
- tx_errors += ring->stats.tx_l4_proto_err;
- tx_errors += ring->stats.tx_l2l3l4_err;
- tx_errors += ring->stats.tx_tso_err;
- tx_errors += ring->stats.over_max_recursion;
- tx_errors += ring->stats.hw_limitation;
- tx_errors += ring->stats.copy_bits_err;
- tx_errors += ring->stats.skb2sgl_err;
- tx_errors += ring->stats.map_sg_err;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ hns3_fetch_stats(&ring_total_stats, ring, true);
/* fetch the rx stats */
ring = &priv->ring[idx + queue_num];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- rx_bytes += ring->stats.rx_bytes;
- rx_pkts += ring->stats.rx_pkts;
- rx_drop += ring->stats.l2_err;
- rx_errors += ring->stats.l2_err;
- rx_errors += ring->stats.l3l4_csum_err;
- rx_crc_errors += ring->stats.l2_err;
- rx_multicast += ring->stats.rx_multicast;
- rx_length_errors += ring->stats.err_pkt_len;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
- }
-
- stats->tx_bytes = tx_bytes;
- stats->tx_packets = tx_pkts;
- stats->rx_bytes = rx_bytes;
- stats->rx_packets = rx_pkts;
-
- stats->rx_errors = rx_errors;
- stats->multicast = rx_multicast;
- stats->rx_length_errors = rx_length_errors;
- stats->rx_crc_errors = rx_crc_errors;
+ hns3_fetch_stats(&ring_total_stats, ring, false);
+ }
+
+ stats->tx_bytes = ring_total_stats.tx_bytes;
+ stats->tx_packets = ring_total_stats.tx_packets;
+ stats->rx_bytes = ring_total_stats.rx_bytes;
+ stats->rx_packets = ring_total_stats.rx_packets;
+
+ stats->rx_errors = ring_total_stats.rx_errors;
+ stats->multicast = ring_total_stats.multicast;
+ stats->rx_length_errors = ring_total_stats.rx_length_errors;
+ stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- stats->tx_errors = tx_errors;
- stats->rx_dropped = rx_drop;
- stats->tx_dropped = tx_drop;
+ stats->tx_errors = ring_total_stats.tx_errors;
+ stats->rx_dropped = ring_total_stats.rx_dropped;
+ stats->tx_dropped = ring_total_stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -2658,18 +2680,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
-static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+static int hns3_get_timeout_queue(struct net_device *ndev)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hns3_enet_ring *tx_ring;
- struct napi_struct *napi;
- int timeout_queue = 0;
- int hw_head, hw_tail;
- int fbd_num, fbd_oft;
- int ebd_num, ebd_oft;
- int bd_num, bd_err;
- int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
@@ -2678,11 +2690,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
unsigned long trans_start;
q = netdev_get_tx_queue(ndev, i);
- trans_start = q->trans_start;
+ trans_start = READ_ONCE(q->trans_start);
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
- timeout_queue = i;
+#ifdef CONFIG_BQL
+ struct dql *dql = &q->dql;
+
+ netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
+ dql->last_obj_cnt, dql->num_queued,
+ dql->adj_limit, dql->num_completed);
+#endif
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
q->state,
jiffies_to_msecs(jiffies - trans_start));
@@ -2690,17 +2708,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
}
}
- if (i == ndev->num_tx_queues) {
- netdev_info(ndev,
- "no netdev TX timeout queue found, timeout count: %llu\n",
- priv->tx_timeout_count);
- return false;
- }
-
- priv->tx_timeout_count++;
+ return i;
+}
- tx_ring = &priv->ring[timeout_queue];
- napi = &tx_ring->tqp_vector->napi;
+static void hns3_dump_queue_stats(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring,
+ int timeout_queue)
+{
+ struct napi_struct *napi = &tx_ring->tqp_vector->napi;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
netdev_info(ndev,
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
@@ -2716,6 +2732,48 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
"seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+}
+
+static void hns3_dump_queue_reg(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring)
+{
+ netdev_info(ndev,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
+ readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring,
+ HNS3_RING_TX_RING_EBD_OFFSET_REG));
+}
+
+static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring;
+ int timeout_queue;
+
+ timeout_queue = hns3_get_timeout_queue(ndev);
+ if (timeout_queue >= ndev->num_tx_queues) {
+ netdev_info(ndev,
+ "no netdev TX timeout queue found, timeout count: %llu\n",
+ priv->tx_timeout_count);
+ return false;
+ }
+
+ priv->tx_timeout_count++;
+
+ tx_ring = &priv->ring[timeout_queue];
+ hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
@@ -2728,32 +2786,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
- hw_head = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG);
- hw_tail = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG);
- fbd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG);
- fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG);
- ebd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_EBDNUM_REG);
- ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_EBD_OFFSET_REG);
- bd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG);
- bd_err = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_ERR_REG);
- ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
- tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
-
- netdev_info(ndev,
- "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
- bd_num, hw_head, hw_tail, bd_err,
- readl(tx_ring->tqp_vector->mask_addr));
- netdev_info(ndev,
- "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
- ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
+ hns3_dump_queue_reg(ndev, tx_ring);
return true;
}
@@ -2836,14 +2869,16 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
if (!h->ae_algo->ops->set_vf_mac)
return -EOPNOTSUPP;
if (is_multicast_ether_addr(mac)) {
+ hnae3_format_mac_addr(format_mac_addr, mac);
netdev_err(netdev,
- "Invalid MAC:%pM specified. Could not set MAC\n",
- mac);
+ "Invalid MAC:%s specified. Could not set MAC\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -3497,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.reuse_pg_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, reuse_pg_cnt);
hns3_reuse_buffer(ring, ring->next_to_use);
} else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
@@ -3519,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_reuse_pg++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, non_reuse_pg);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -3536,6 +3565,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias;
}
+static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
+ struct hns3_enet_ring *ring,
+ int pull_len,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
+ int size = le16_to_cpu(desc->rx.size);
+ u32 frag_size = size - pull_len;
+ void *frag = napi_alloc_frag(frag_size);
+
+ if (unlikely(!frag)) {
+ hns3_ring_stats_update(ring, frag_alloc_err);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "failed to allocate rx frag\n");
+ return -ENOMEM;
+ }
+
+ desc_cb->reuse_flag = 1;
+ memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+ skb_add_rx_frag(skb, i, virt_to_page(frag),
+ offset_in_page(frag), frag_size, frag_size);
+
+ hns3_ring_stats_update(ring, frag_alloc);
+ return 0;
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -3545,6 +3602,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len;
+ int ret = 0;
bool reused;
if (ring->page_pool) {
@@ -3579,27 +3637,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) {
- void *frag = napi_alloc_frag(frag_size);
-
- if (unlikely(!frag)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc_err++;
- u64_stats_update_end(&ring->syncp);
-
- hns3_rl_err(ring_to_netdev(ring),
- "failed to allocate rx frag\n");
+ ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
+ if (ret)
goto out;
- }
-
- desc_cb->reuse_flag = 1;
- memcpy(frag, desc_cb->buf + frag_offset, frag_size);
- skb_add_rx_frag(skb, i, virt_to_page(frag),
- offset_in_page(frag), frag_size, frag_size);
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc++;
- u64_stats_update_end(&ring->syncp);
- return;
}
out:
@@ -3682,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false;
- u64_stats_update_begin(&ring->syncp);
- ring->stats.csum_complete++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, csum_complete);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum);
@@ -3758,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.l3l4_csum_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, l3l4_csum_err);
return;
}
@@ -3851,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb;
if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -3885,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool)
skb_mark_for_recycle(skb);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, seg_pkt_cnt);
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
@@ -4015,6 +4046,39 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
skb_set_hash(skb, rss_hash, rss_type);
}
+static void hns3_handle_rx_ts_info(struct net_device *netdev,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 bd_base_info)
+{
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 nsec = le32_to_cpu(desc->ts_nsec);
+ u32 sec = le32_to_cpu(desc->ts_sec);
+
+ if (h->ae_algo->ops->get_rx_hwts)
+ h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
+ }
+}
+
+static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 l234info)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring_to_netdev(ring);
@@ -4037,26 +4101,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum);
- if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
- struct hnae3_handle *h = hns3_get_handle(netdev);
- u32 nsec = le32_to_cpu(desc->ts_nsec);
- u32 sec = le32_to_cpu(desc->ts_sec);
-
- if (h->ae_algo->ops->get_rx_hwts)
- h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
- }
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
+ hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_tag);
- }
+ hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
@@ -4079,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum);
if (unlikely(ret)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.rx_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, rx_err_cnt);
return ret;
}
@@ -4297,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
return rx_pkt_total;
}
-static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
- struct hnae3_ring_chain_node *head)
+static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node **head,
+ bool is_tx)
{
+ u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
+ u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
+ struct hnae3_ring_chain_node *cur_chain = *head;
struct pci_dev *pdev = tqp_vector->handle->pdev;
- struct hnae3_ring_chain_node *cur_chain = head;
struct hnae3_ring_chain_node *chain;
- struct hns3_enet_ring *tx_ring;
- struct hns3_enet_ring *rx_ring;
-
- tx_ring = tqp_vector->tx_group.ring;
- if (tx_ring) {
- cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
-
- cur_chain->next = NULL;
-
- while (tx_ring->next) {
- tx_ring = tx_ring->next;
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
- GFP_KERNEL);
- if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
-
- cur_chain = chain;
- }
- }
+ struct hns3_enet_ring *ring;
- rx_ring = tqp_vector->rx_group.ring;
- if (!tx_ring && rx_ring) {
- cur_chain->next = NULL;
- cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
- rx_ring = rx_ring->next;
+ if (cur_chain) {
+ while (cur_chain->next)
+ cur_chain = cur_chain->next;
}
- while (rx_ring) {
+ while (ring) {
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = rx_ring->tqp->tqp_index;
+ return -ENOMEM;
+ if (cur_chain)
+ cur_chain->next = chain;
+ else
+ *head = chain;
+ chain->tqp_index = ring->tqp->tqp_index;
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ bit_value);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, field_value);
cur_chain = chain;
- rx_ring = rx_ring->next;
+ ring = ring->next;
}
return 0;
+}
+
+static struct hnae3_ring_chain_node *
+hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *cur_chain = NULL;
+ struct hnae3_ring_chain_node *chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
+ goto err_free_chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
+ goto err_free_chain;
+
+ return cur_chain;
err_free_chain:
- cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
- head->next = NULL;
- return -ENOMEM;
+ return NULL;
}
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -4386,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
struct pci_dev *pdev = tqp_vector->handle->pdev;
struct hnae3_ring_chain_node *chain_tmp, *chain;
- chain = head->next;
+ chain = head;
while (chain) {
chain_tmp = chain->next;
@@ -4501,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
}
for (i = 0; i < priv->vector_num; i++) {
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
tqp_vector = &priv->tqp_vector[i];
@@ -4511,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->tx_group.total_packets = 0;
tqp_vector->handle = h;
- ret = hns3_get_vector_ring_chain(tqp_vector,
- &vector_ring_chain);
- if (ret)
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain) {
+ ret = -ENOMEM;
goto map_ring_fail;
+ }
ret = h->ae_algo->ops->map_ring_to_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
if (ret)
goto map_ring_fail;
@@ -4618,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int i;
@@ -4633,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
* chain between vector and ring, we should go on to deal with
* the remaining options.
*/
- if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain)
dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
@@ -4934,6 +4964,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
@@ -4944,8 +4975,9 @@ static int hns3_init_mac_addr(struct net_device *netdev)
/* Check if the MAC address is valid, if not get a random one */
if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
- dev_warn(priv->dev, "using random MAC address %pM\n",
- netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
+ dev_warn(priv->dev, "using random MAC address %s\n",
+ format_mac_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
@@ -4997,8 +5029,10 @@ static void hns3_client_stop(struct hnae3_handle *handle)
static void hns3_info_show(struct hns3_nic_priv *priv)
{
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
- dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
+ dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
@@ -5287,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
@@ -5531,8 +5563,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
-static int hns3_reset_notify(struct hnae3_handle *handle,
- enum hnae3_reset_notify_type type)
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
{
int ret = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1715c98d906d..a05a0c7423ce 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -10,6 +10,9 @@
#include "hnae3.h"
+struct iphdr;
+struct ipv6hdr;
+
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -621,6 +624,11 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1;
}
+static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
+{
+ return readl_relaxed(ring->tqp->io_base + reg);
+}
+
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
@@ -655,6 +663,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_buf_size(_ring) ((_ring)->buf_size)
+#define hns3_ring_stats_update(ring, cnt) do { \
+ typeof(ring) (tmp) = (ring); \
+ u64_stats_update_begin(&(tmp)->syncp); \
+ ((tmp)->stats.cnt)++; \
+ u64_stats_update_end(&(tmp)->syncp); \
+} while (0) \
+
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
@@ -705,6 +720,8 @@ void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c9b4568d7a8d..c06c39ece80d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -643,11 +643,13 @@ static u32 hns3_get_link(struct net_device *netdev)
}
static void hns3_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int queue_num = h->kinfo.num_tqps;
+ int rx_queue_index = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev)) {
netdev_err(netdev, "dev resetting!");
@@ -658,7 +660,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = HNS3_RING_MAX_PENDING;
param->tx_pending = priv->ring[0].desc_num;
- param->rx_pending = priv->ring[queue_num].desc_num;
+ param->rx_pending = priv->ring[rx_queue_index].desc_num;
+ kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -1064,14 +1067,23 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
}
static int hns3_check_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
+#define RX_BUF_LEN_2K 2048
+#define RX_BUF_LEN_4K 4096
if (hns3_nic_resetting(ndev))
return -EBUSY;
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
+ if (kernel_param->rx_buf_len != RX_BUF_LEN_2K &&
+ kernel_param->rx_buf_len != RX_BUF_LEN_4K) {
+ netdev_err(ndev, "Rx buf len only support 2048 and 4096\n");
+ return -EINVAL;
+ }
+
if (param->tx_pending > HNS3_RING_MAX_PENDING ||
param->tx_pending < HNS3_RING_MIN_PENDING ||
param->rx_pending > HNS3_RING_MAX_PENDING ||
@@ -1084,8 +1096,26 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.rx_buf_len = rx_buf_len;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->kinfo.tqp[i]->buf_size = rx_buf_len;
+ priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len;
+ }
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
@@ -1094,9 +1124,10 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_tx_desc_num, new_tx_desc_num;
u32 old_rx_desc_num, new_rx_desc_num;
u16 queue_num = h->kinfo.num_tqps;
+ u32 old_rx_buf_len;
int ret, i;
- ret = hns3_check_ringparam(ndev, param);
+ ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
@@ -1105,8 +1136,10 @@ static int hns3_set_ringparam(struct net_device *ndev,
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring[queue_num].desc_num;
+ old_rx_buf_len = priv->ring[queue_num].buf_size;
if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num)
+ old_rx_desc_num == new_rx_desc_num &&
+ kernel_param->rx_buf_len == old_rx_buf_len)
return 0;
tmp_rings = hns3_backup_ringparam(priv);
@@ -1117,19 +1150,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num);
+ new_tx_desc_num, new_rx_desc_num,
+ old_rx_buf_len, kernel_param->rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
- netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
+ hns3_change_rx_buf_len(ndev, old_rx_buf_len);
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
@@ -1699,6 +1735,7 @@ static int hns3_get_tunable(struct net_device *netdev,
void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
int ret = 0;
switch (tuna->id) {
@@ -1709,6 +1746,9 @@ static int hns3_get_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ *(u32 *)data = h->kinfo.tx_spare_buf_size;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1717,11 +1757,43 @@ static int hns3_get_tunable(struct net_device *netdev,
return ret;
}
+static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
+ u32 data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ h->kinfo.tx_spare_buf_size = data;
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 old_tx_spare_buf_size, new_tx_spare_buf_size;
struct hnae3_handle *h = priv->ae_handle;
int i, ret = 0;
@@ -1740,6 +1812,26 @@ static int hns3_set_tunable(struct net_device *netdev,
priv->ring[i].rx_copybreak = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
+ new_tx_spare_buf_size = *(u32 *)data;
+ ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
+ if (ret) {
+ int ret1;
+
+ netdev_warn(netdev,
+ "change tx spare buf size fail, revert to old value\n");
+ ret1 = hns3_set_tx_spare_buf_size(netdev,
+ old_tx_spare_buf_size);
+ if (ret1) {
+ netdev_err(netdev,
+ "revert to old tx spare buf size fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1755,6 +1847,8 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
+#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN
+
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
@@ -1833,6 +1927,7 @@ static int hns3_get_link_ext_state(struct net_device *netdev,
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1864,6 +1959,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
deleted file mode 100644
index d1bf5c4c0abb..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-ccflags-y += -I $(srctree)/$(src)
-
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
-
-hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
deleted file mode 100644
index c5d5466810bb..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ /dev/null
@@ -1,591 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/dma-direction.h>
-#include "hclge_cmd.h"
-#include "hnae3.h"
-#include "hclge_main.h"
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclge_ring_space(struct hclge_cmq_ring *ring)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
- int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
-{
- struct hclge_hw *hw = &hdev->hw;
- struct hclge_cmq_ring *ring =
- (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->ring_type = ring_type;
- ring->dev = hdev;
-
- ret = hclge_alloc_cmd_desc(ring);
- if (ret) {
- dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
- (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
-{
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
-}
-
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read)
-{
- memset((void *)desc, 0, sizeof(struct hclge_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
-
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
-}
-
-static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
-{
- dma_addr_t dma = ring->desc_dma_addr;
- struct hclge_dev *hdev = ring->dev;
- struct hclge_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->ring_type == HCLGE_TYPE_CSQ) {
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGE_NIC_SW_RST_RDY;
- reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- } else {
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclge_cmd_init_regs(struct hclge_hw *hw)
-{
- hclge_cmd_config_regs(&hw->cmq.csq);
- hclge_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclge_cmd_csq_clean(struct hclge_hw *hw)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u32 head;
- int clean;
-
- head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- dev_warn(&hdev->pdev->dev,
- "IMP firmware watchdog reset soon expected!\n");
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static int hclge_cmd_csq_done(struct hclge_hw *hw)
-{
- u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclge_is_special_opcode(u16 opcode)
-{
- /* these commands have several descriptors,
- * and use the first one to save opcode and return value
- */
- static const u16 spec_opcode[] = {
- HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT,
- HCLGE_OPC_STATS_MAC,
- HCLGE_OPC_STATS_MAC_ALL,
- HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT,
- HCLGE_QUERY_CLEAR_PF_RAS_INT,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
- HCLGE_QUERY_ALL_ERR_INFO
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-struct errcode {
- u32 imp_errcode;
- int common_errno;
-};
-
-static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
- int num)
-{
- struct hclge_desc *desc_to_use;
- int handle = 0;
-
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-}
-
-static int hclge_cmd_convert_err_code(u16 desc_ret)
-{
- struct errcode hclge_cmd_errcode[] = {
- {HCLGE_CMD_EXEC_SUCCESS, 0},
- {HCLGE_CMD_NO_AUTH, -EPERM},
- {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
- {HCLGE_CMD_QUEUE_FULL, -EXFULL},
- {HCLGE_CMD_NEXT_ERR, -ENOSR},
- {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
- {HCLGE_CMD_PARA_ERR, -EINVAL},
- {HCLGE_CMD_RESULT_ERR, -ERANGE},
- {HCLGE_CMD_TIMEOUT, -ETIME},
- {HCLGE_CMD_HILINK_ERR, -ENOLINK},
- {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
- {HCLGE_CMD_INVALID, -EBADR},
- };
- u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
- u32 i;
-
- for (i = 0; i < errcode_count; i++)
- if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
- return hclge_cmd_errcode[i].common_errno;
-
- return -EIO;
-}
-
-static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc >= hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
-
- hw->cmq.last_status = desc_ret;
-
- return hclge_cmd_convert_err_code(desc_ret);
-}
-
-static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- bool is_completed = false;
- u32 timeout = 0;
- int handle, ret;
-
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- is_completed = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!is_completed)
- ret = -EBADE;
- else
- ret = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- ret = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
- return ret;
-}
-
-/**
- * hclge_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- **/
-int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- int ret;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclge_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /**
- * Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
-
- hclge_cmd_copy_desc(hw, desc, num);
-
- /* Write to hardware */
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
-
- ret = hclge_cmd_check_result(hw, desc, num, ntc);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return ret;
-}
-
-static void hclge_set_default_capability(struct hclge_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
- }
-}
-
-static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
- {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
- {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
- {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
- {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
- {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
- {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
- {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
- {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
- {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
- {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
- {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
- {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
- {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
- {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
-};
-
-static void hclge_parse_capability(struct hclge_dev *hdev,
- struct hclge_query_version_cmd *cmd)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps, i;
-
- caps = __le32_to_cpu(cmd->caps[0]);
- for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++)
- if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit))
- set_bit(hclge_cmd_caps_bit_map0[i].local_bit,
- ae_dev->caps);
-}
-
-static __le32 hclge_build_api_caps(void)
-{
- u32 api_caps = 0;
-
- hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
-
- return cpu_to_le32(api_caps);
-}
-
-static enum hclge_cmd_status
-hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hclge_query_version_cmd *resp;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
- resp = (struct hclge_query_version_cmd *)desc.data;
- resp->api_caps = hclge_build_api_caps();
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- return ret;
-
- hdev->fw_version = le32_to_cpu(resp->firmware);
-
- ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
- HNAE3_PCI_REVISION_BIT_SIZE;
- ae_dev->dev_version |= hdev->pdev->revision;
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- hclge_set_default_capability(hdev);
-
- hclge_parse_capability(hdev, resp);
-
- return ret;
-}
-
-int hclge_cmd_queue_init(struct hclge_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- /* Setup the queue entries for use cmd queue */
- hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
-
- /* Setup Tx write back timeout */
- hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
-
- /* Setup queue rings */
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
-{
- struct hclge_firmware_compat_cmd *req;
- struct hclge_desc desc;
- u32 compat = 0;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
-
- if (en) {
- req = (struct hclge_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
- if (hnae3_dev_phy_imp_supported(hdev))
- hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_SYNC_RX_RING_HEAD_EN_B, 1);
-
- req->compat = cpu_to_le32(compat);
- }
-
- return hclge_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclge_cmd_init(struct hclge_dev *hdev)
-{
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclge_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if ((hclge_is_reset_pending(hdev))) {
- dev_err(&hdev->pdev->dev,
- "failed to init cmd since reset %#lx pending\n",
- hdev->reset_pending);
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- /* get version and device capabilities */
- ret = hclge_cmd_query_version_and_capability(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to query version and capabilities, ret = %d\n",
- ret);
- goto err_cmd_init;
- }
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- /* ask the firmware to enable some features, driver can work without
- * it.
- */
- ret = hclge_firmware_compat_config(hdev, true);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
-{
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclge_cmd_uninit(struct hclge_dev *hdev)
-{
- hclge_firmware_compat_config(hdev, false);
-
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- /* wait to ensure that the firmware completes the possible left
- * over commands.
- */
- msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- hclge_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- hclge_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d24e59028798..f9d89511eb32 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -7,323 +7,21 @@
#include <linux/io.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
-
-#define HCLGE_CMDQ_TX_TIMEOUT 30000
-#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
-#define HCLGE_DESC_DATA_LEN 6
+#include "hclge_comm_cmd.h"
struct hclge_dev;
-struct hclge_desc {
- __le16 opcode;
#define HCLGE_CMDQ_RX_INVLD_B 0
#define HCLGE_CMDQ_RX_OUTVLD_B 1
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[HCLGE_DESC_DATA_LEN];
-};
-
-struct hclge_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclge_desc *desc;
- struct hclge_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 ring_type; /* cmq ring type */
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclge_cmd_return_status {
- HCLGE_CMD_EXEC_SUCCESS = 0,
- HCLGE_CMD_NO_AUTH = 1,
- HCLGE_CMD_NOT_SUPPORTED = 2,
- HCLGE_CMD_QUEUE_FULL = 3,
- HCLGE_CMD_NEXT_ERR = 4,
- HCLGE_CMD_UNEXE_ERR = 5,
- HCLGE_CMD_PARA_ERR = 6,
- HCLGE_CMD_RESULT_ERR = 7,
- HCLGE_CMD_TIMEOUT = 8,
- HCLGE_CMD_HILINK_ERR = 9,
- HCLGE_CMD_QUEUE_ILLEGAL = 10,
- HCLGE_CMD_INVALID = 11,
-};
-
-enum hclge_cmd_status {
- HCLGE_STATUS_SUCCESS = 0,
- HCLGE_ERR_CSQ_FULL = -1,
- HCLGE_ERR_CSQ_TIMEOUT = -2,
- HCLGE_ERR_CSQ_ERROR = -3,
-};
-
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
char name[HNAE3_INT_NAME_LEN];
};
-struct hclge_cmq {
- struct hclge_cmq_ring csq;
- struct hclge_cmq_ring crq;
- u16 tx_timeout;
- enum hclge_cmd_status last_status;
-};
-
-#define HCLGE_CMD_FLAG_IN BIT(0)
-#define HCLGE_CMD_FLAG_OUT BIT(1)
-#define HCLGE_CMD_FLAG_NEXT BIT(2)
-#define HCLGE_CMD_FLAG_WR BIT(3)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
-
-enum hclge_opcode_type {
- /* Generic commands */
- HCLGE_OPC_QUERY_FW_VER = 0x0001,
- HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
- HCLGE_OPC_GBL_RST_STATUS = 0x0021,
- HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
- HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
- HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGE_OPC_GET_CFG_PARAM = 0x0025,
- HCLGE_OPC_PF_RST_DONE = 0x0026,
- HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
-
- HCLGE_OPC_STATS_64_BIT = 0x0030,
- HCLGE_OPC_STATS_32_BIT = 0x0031,
- HCLGE_OPC_STATS_MAC = 0x0032,
- HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
- HCLGE_OPC_STATS_MAC_ALL = 0x0034,
-
- HCLGE_OPC_QUERY_REG_NUM = 0x0040,
- HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
- HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- HCLGE_OPC_DFX_BD_NUM = 0x0043,
- HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
- HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
- HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
- HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
- HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
- HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
- HCLGE_OPC_DFX_NCSI_REG = 0x004A,
- HCLGE_OPC_DFX_RTC_REG = 0x004B,
- HCLGE_OPC_DFX_PPP_REG = 0x004C,
- HCLGE_OPC_DFX_RCB_REG = 0x004D,
- HCLGE_OPC_DFX_TQP_REG = 0x004E,
- HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
-
- HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
-
- /* MAC command */
- HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
- HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
- HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
- HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
- HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
- HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
- HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
- HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
-
- /* PTP commands */
- HCLGE_OPC_PTP_INT_EN = 0x0501,
- HCLGE_OPC_PTP_MODE_CFG = 0x0507,
-
- /* PFC/Pause commands */
- HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
- HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
- HCLGE_OPC_CFG_MAC_PARA = 0x0703,
- HCLGE_OPC_CFG_PFC_PARA = 0x0704,
- HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
- HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
- HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
- HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
- HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
- HCLGE_OPC_QOS_MAP = 0x070A,
-
- /* ETS/scheduler commands */
- HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
- HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
- HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
- HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
- HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
- HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
- HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
- HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
- HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
- HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
- HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
- HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
- HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
- HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
- HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
- HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
- HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- HCLGE_OPC_TM_NODES = 0x0816,
- HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
- HCLGE_OPC_QSET_DFX_STS = 0x0844,
- HCLGE_OPC_PRI_DFX_STS = 0x0845,
- HCLGE_OPC_PG_DFX_STS = 0x0846,
- HCLGE_OPC_PORT_DFX_STS = 0x0847,
- HCLGE_OPC_SCH_NQ_CNT = 0x0848,
- HCLGE_OPC_SCH_RQ_CNT = 0x0849,
- HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
- HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
- HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
-
- /* Packet buffer allocate commands */
- HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
- HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
- HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
- HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
- HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
- HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
-
- /* TQP management command */
- HCLGE_OPC_SET_TQP_MAP = 0x0A01,
-
- /* TQP commands */
- HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
- HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
- HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
- HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
- HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
- HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
- HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
- HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
-
- /* PPU commands */
- HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
-
- /* TSO command */
- HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
-
- /* RSS commands */
- HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGE_OPC_RSS_TC_MODE = 0x0D08,
- HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
-
- /* Promisuous mode command */
- HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
-
- /* Vlan offload commands */
- HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
- HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
-
- /* Interrupts commands */
- HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
- HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
-
- /* MAC commands */
- HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
- HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
- HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
- HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
- HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
- HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
- HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
-
- /* MAC VLAN commands */
- HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
-
- /* VLAN commands */
- HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
- HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
- HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
- HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
-
- /* Flow Director commands */
- HCLGE_OPC_FD_MODE_CTRL = 0x1200,
- HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
- HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
- HCLGE_OPC_FD_TCAM_OP = 0x1203,
- HCLGE_OPC_FD_AD_OP = 0x1204,
- HCLGE_OPC_FD_CNT_OP = 0x1205,
- HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
-
- /* MDIO command */
- HCLGE_OPC_MDIO_CONFIG = 0x1900,
-
- /* QCN commands */
- HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
- HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
- HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
- HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
- HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
- HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
- HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
-
- /* Mailbox command */
- HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
-
- /* Led command */
- HCLGE_OPC_LED_STATUS_CFG = 0xB000,
-
- /* clear hardware resource command */
- HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
-
- /* NCL config command */
- HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
-
- /* IMP stats command */
- HCLGE_OPC_IMP_STATS_BD = 0x7012,
- HCLGE_OPC_IMP_STATS_INFO = 0x7013,
- HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
-
- /* SFP command */
- HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
- HCLGE_OPC_GET_SFP_EXIST = 0x7101,
- HCLGE_OPC_GET_SFP_INFO = 0x7104,
-
- /* Error INT commands */
- HCLGE_MAC_COMMON_INT_EN = 0x030E,
- HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
- HCLGE_SSU_ECC_INT_CMD = 0x0989,
- HCLGE_SSU_COMMON_INT_CMD = 0x098C,
- HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
- HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
- HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
- HCLGE_COMMON_ECC_INT_CFG = 0x1505,
- HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
- HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
- HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
- HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
- HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
- HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
- HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
- HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
- HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
- HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
- HCLGE_IGU_COMMON_INT_EN = 0x1806,
- HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
- HCLGE_PPP_CMD0_INT_CMD = 0x2100,
- HCLGE_PPP_CMD1_INT_CMD = 0x2101,
- HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
- HCLGE_NCSI_INT_EN = 0x2401,
-
- /* PHY command */
- HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
- HCLGE_OPC_PHY_REG = 0x7026,
-
- /* Query link diagnosis info command */
- HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
-};
+#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
@@ -391,38 +89,6 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
-enum HCLGE_CAP_BITS {
- HCLGE_CAP_UDP_GSO_B,
- HCLGE_CAP_QB_B,
- HCLGE_CAP_FD_FORWARD_TC_B,
- HCLGE_CAP_PTP_B,
- HCLGE_CAP_INT_QL_B,
- HCLGE_CAP_HW_TX_CSUM_B,
- HCLGE_CAP_TX_PUSH_B,
- HCLGE_CAP_PHY_IMP_B,
- HCLGE_CAP_TQP_TXRX_INDEP_B,
- HCLGE_CAP_HW_PAD_B,
- HCLGE_CAP_STASH_B,
- HCLGE_CAP_UDP_TUNNEL_CSUM_B,
- HCLGE_CAP_RAS_IMP_B = 12,
- HCLGE_CAP_FEC_B = 13,
- HCLGE_CAP_PAUSE_B = 14,
- HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
- HCLGE_CAP_PORT_VLAN_BYPASS_B = 17,
-};
-
-enum HCLGE_API_CAP_BITS {
- HCLGE_API_CAP_FLEX_RSS_TBL_B,
-};
-
-#define HCLGE_QUERY_CAP_LENGTH 3
-struct hclge_query_version_cmd {
- __le32 firmware;
- __le32 hardware;
- __le32 api_caps;
- __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
-};
-
#define HCLGE_RX_PRIV_EN_B 15
#define HCLGE_TC_NUM_ONE_DESC 4
struct hclge_priv_wl {
@@ -571,38 +237,10 @@ struct hclge_vf_num_cmd {
};
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGE_RSS_HASH_KEY_NUM 16
-struct hclge_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
-};
-
-struct hclge_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-#define HCLGE_RSS_CFG_TBL_SIZE 16
#define HCLGE_RSS_CFG_TBL_SIZE_H 4
-#define HCLGE_RSS_CFG_TBL_BW_H 2U
#define HCLGE_RSS_CFG_TBL_BW_L 8U
-struct hclge_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
- u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
-};
-
#define HCLGE_RSS_TC_OFFSET_S 0
#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_RSS_TC_SIZE_MSB_B 11
@@ -610,10 +248,6 @@ struct hclge_rss_indirection_table_cmd {
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15
-struct hclge_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
- u8 rsv[8];
-};
#define HCLGE_LINK_STATUS_UP_B 0
#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
@@ -1015,16 +649,6 @@ struct hclge_common_lb_cmd {
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
-#define HCLGE_TYPE_CRQ 0
-#define HCLGE_TYPE_CSQ 1
-
-/* this bit indicates that the driver is ready for hardware reset */
-#define HCLGE_NIC_SW_RST_RDY_B 16
-#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
-
-#define HCLGE_NIC_CMQ_DESC_NUM 1024
-#define HCLGE_NIC_CMQ_DESC_NUM_S 3
-
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
@@ -1147,16 +771,6 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
-#define HCLGE_LINK_EVENT_REPORT_EN_B 0
-#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
-#define HCLGE_PHY_IMP_EN_B 2
-#define HCLGE_MAC_STATS_EXT_EN_B 3
-#define HCLGE_SYNC_RX_RING_HEAD_EN_B 4
-struct hclge_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
-};
-
#define HCLGE_SFP_INFO_CMD_NUM 6
#define HCLGE_SFP_INFO_BD0_LEN 20
#define HCLGE_SFP_INFO_BDX_LEN 24
@@ -1239,44 +853,10 @@ struct hclge_phy_reg_cmd {
u8 rsv1[18];
};
-/* capabilities bits map between imp firmware and local driver */
-struct hclge_caps_bit_map {
- u16 imp_bit;
- u16 local_bit;
-};
-
-int hclge_cmd_init(struct hclge_dev *hdev);
-static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
-
-#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, reg, value)
-#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, reg)
-
-static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
-
- return readl(reg_addr + reg);
-}
-
-#define HCLGE_SEND_SYNC(flag) \
- ((flag) & HCLGE_CMD_FLAG_NO_INTR)
-
struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read);
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
-
-enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
- struct hclge_desc *desc);
-enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
- struct hclge_desc *desc);
-
-void hclge_cmd_uninit(struct hclge_dev *hdev);
-int hclge_cmd_queue_init(struct hclge_dev *hdev);
+enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
+ struct hclge_desc *desc);
+enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
+ struct hclge_desc *desc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 375ebf105a9a..69b8673436ca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- hclge_rss_indir_init_cfg(hdev);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
return hclge_rss_init_hw(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 4e0a8c2f7c05..9b870e79c290 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -77,6 +77,10 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
+/* make sure: len(name) + interval >= maxlen(item data) + 2,
+ * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
+ * and print as "%u"(maxlen: 10), so the interval should be at least 5.
+ */
static void hclge_dbg_fill_content(char *content, u16 len,
const struct hclge_dbg_item *items,
const char **result, u16 size)
@@ -99,7 +103,7 @@ static void hclge_dbg_fill_content(char *content, u16 len,
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
- sprintf(buf, "vf%u", id - 1);
+ sprintf(buf, "vf%u", id - 1U);
else
sprintf(buf, "pf");
@@ -146,7 +150,7 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
desc->data[0] = cpu_to_le32(index);
for (i = 1; i < bd_num; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -258,12 +262,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return 0;
}
+static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
+ {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
+ {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
+ {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
+ {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
+ {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
+ {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
+ {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
+ {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
+ {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
+ {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
+ {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
+ {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
+ {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
+ {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
+};
+
static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
int len, int *pos)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
- u32 loop_en;
+ u32 loop_en, i, offset;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
@@ -278,39 +299,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_mode_cmd *)desc.data;
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
- *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_rx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en,
- HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_tx_under_min_err_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_tx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en,
- HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
+ offset = hclge_dbg_mac_en_status[i].offset;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
+ }
return 0;
}
@@ -788,7 +782,6 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
-
if (!data_str)
return -ENOMEM;
@@ -1273,7 +1266,7 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
@@ -1309,7 +1302,7 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
@@ -1454,9 +1447,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
u32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -1614,8 +1607,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
+ {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
+ {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
+ {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
+ {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
+ {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
+ {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
+ {HCLGE_FUN_RST_ING, "function reset status"}
+};
+
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
{
+ u32 i, offset;
int pos = 0;
pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
@@ -1634,22 +1638,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
hdev->rst_stats.reset_cnt);
pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt);
- pos += scnprintf(buf + pos, len - pos,
- "vector0 interrupt enable status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
- pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
- pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
- pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw,
- HCLGE_RAS_PF_OTHER_INT_STS_REG));
- pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
- pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
- pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
hdev->state);
@@ -1771,7 +1767,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int *len, int *pos)
+ char *buf, int len, int *pos)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -1783,7 +1779,7 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, *len - *pos,
+ *pos += scnprintf(buf + *pos, len - *pos,
"0x%04x | 0x%08x\n", offset,
le32_to_cpu(desc[i].data[j]));
@@ -1821,7 +1817,7 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
+ hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index c526591a7240..724052928b88 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -94,6 +94,11 @@ struct hclge_dbg_func {
char *buf, int len);
};
+struct hclge_dbg_status_dfx_info {
+ u32 offset;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
@@ -321,10 +326,10 @@ static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "IGU_RX_OUT_UDP0_PKT"},
{true, "IGU_RX_IN_UDP0_PKT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
{false, "Reserved"},
{true, "IGU_RX_OVERSIZE_PKT_L"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 20e628c2bd44..42a9e73d8588 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1399,7 +1399,7 @@ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
@@ -1498,7 +1498,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
/* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
@@ -1633,7 +1633,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
desc[0].data[0] =
@@ -1718,7 +1718,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU ecc error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
if (en) {
desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
@@ -1740,7 +1740,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
@@ -1963,7 +1963,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
&ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -2036,7 +2036,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
}
/* clear all PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -2087,8 +2087,8 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
@@ -2119,7 +2119,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT);
+ HCLGE_COMM_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
@@ -2235,7 +2235,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
/* clear error status */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
@@ -2405,7 +2405,8 @@ static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
else
desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
- desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2a58101144e..24f7afacae02 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -24,6 +24,7 @@
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
+#include "hclge_comm_cmd.h"
#define HCLGE_NAME "hclge"
@@ -90,20 +91,20 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CSQ_TAIL_REG,
- HCLGE_NIC_CSQ_HEAD_REG,
- HCLGE_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_NIC_CRQ_DEPTH_REG,
- HCLGE_NIC_CRQ_TAIL_REG,
- HCLGE_NIC_CRQ_HEAD_REG,
- HCLGE_VECTOR0_CMDQ_SRC_REG,
- HCLGE_CMDQ_INTR_STS_REG,
- HCLGE_CMDQ_INTR_EN_REG,
- HCLGE_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
HCLGE_PF_OTHER_INT_REG,
@@ -370,14 +371,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u8 hclge_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
@@ -478,6 +471,20 @@ static const struct key_info tuple_key_info[] = {
offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -604,111 +611,6 @@ int hclge_mac_update_stats(struct hclge_dev *hdev)
return hclge_mac_update_stats_defective(hdev);
}
-static int hclge_tqps_update_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hnae3_queue *queue;
- struct hclge_desc desc[1];
- struct hclge_tqp *tqp;
- int ret, i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- return 0;
-}
-
-static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- /* each tqp has TX & RX two queues */
- return kinfo->num_tqps * (2);
-}
-
-static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- return buff;
-}
-
static int hclge_comm_get_count(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
u32 size)
@@ -769,7 +671,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
if (handle->client) {
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -799,7 +701,7 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -848,7 +750,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
} else if (stringset == ETH_SS_STATS) {
count = hclge_comm_get_count(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string)) +
- hclge_tqps_get_sset_count(handle, stringset);
+ hclge_comm_tqps_get_sset_count(handle);
}
return count;
@@ -866,7 +768,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
- p = hclge_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
@@ -900,7 +802,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
- p = hclge_tqps_get_stats(handle, p);
+ p = hclge_comm_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
@@ -1480,7 +1382,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
@@ -1520,7 +1422,7 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
@@ -1567,7 +1469,7 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
@@ -1613,12 +1515,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- unsigned int i;
int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
@@ -1662,29 +1591,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
- if ((hdev->tc_max > HNAE3_MAX_TC) ||
- (hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
- hdev->tc_max);
- hdev->tc_max = 1;
- }
-
- /* Dev does not support DCB */
- if (!hnae3_dev_dcb_supported(hdev)) {
- hdev->tc_max = 1;
- hdev->pfc_max = 0;
- } else {
- hdev->pfc_max = hdev->tc_max;
- }
-
- hdev->tm_info.num_tc = 1;
-
- /* Currently not support uncontiuous tc */
- for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae3_set_bit(hdev->hw_tc_map, i, 1);
-
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-
+ hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */
@@ -1736,11 +1643,11 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
- struct hclge_tqp *tqp;
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclge_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -1759,11 +1666,11 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
* HCLGE_TQP_MAX_SIZE_DEV_V2
*/
if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
HCLGE_TQP_EXT_REG_OFFSET +
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
@@ -1864,8 +1771,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
- struct hclge_tqp *q =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ struct hclge_comm_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
bool is_pf;
int ret;
@@ -1885,7 +1792,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
u16 i, num_vport;
num_vport = hdev->num_req_vfs + 1;
- for (i = 0; i < num_vport; i++) {
+ for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
@@ -1907,7 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
@@ -2416,9 +2323,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
@@ -2461,9 +2368,9 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
@@ -2592,8 +2499,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->rinfo.base_vector = hdev->num_nic_msi;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2653,11 +2560,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed)
return duplex;
}
+static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
+ {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
+ {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
+ {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
+ {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
+ {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
+ {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
+ {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
+ {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
+};
+
+static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
+ if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
+ *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
u8 duplex)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
+ u32 speed_fw;
int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
@@ -2667,48 +2601,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
- break;
- case HCLGE_MAC_SPEED_100M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
- break;
- case HCLGE_MAC_SPEED_1G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
- break;
- case HCLGE_MAC_SPEED_10G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
- break;
- case HCLGE_MAC_SPEED_25G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
- break;
- case HCLGE_MAC_SPEED_40G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
- break;
- case HCLGE_MAC_SPEED_50G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
- break;
- case HCLGE_MAC_SPEED_100G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
- break;
- case HCLGE_MAC_SPEED_200G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
- break;
- default:
+ ret = hclge_convert_to_fw_speed(speed, &speed_fw);
+ if (ret) {
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
- return -EINVAL;
+ return ret;
}
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
+ speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
@@ -2933,16 +2833,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
@@ -3237,7 +3141,7 @@ static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
true);
@@ -3294,7 +3198,7 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
false);
@@ -3501,7 +3405,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3509,7 +3413,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
@@ -3643,7 +3547,7 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
- vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
hdev->vector_status[0] = 0;
hdev->num_msi_left -= 1;
@@ -3827,10 +3731,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
static void hclge_mailbox_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
hclge_mbx_handler(hdev);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -3865,7 +3776,7 @@ static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
- hclge_cmd_reuse_desc(&desc, true);
+ hclge_comm_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
@@ -4022,13 +3933,13 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
- reg_val |= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
else
- reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+ reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
}
static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
@@ -4065,9 +3976,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* After performaning pf reset, it is not necessary to do the
* mailbox handling or send any command to firmware, because
* any mailbox handling or command to firmware is only valid
- * after hclge_cmd_init is called.
+ * after hclge_comm_cmd_init is called.
*/
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
@@ -4480,6 +4391,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
@@ -4614,11 +4532,11 @@ static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
/* need an extend offset to config vector >= 64 */
if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_REG_BASE +
(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
else
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_EXT_REG_BASE +
(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
HCLGE_VECTOR_REG_OFFSET_H +
@@ -4688,334 +4606,43 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_KEY_SIZE;
-}
-
-static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclge_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclge_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGE_RSS_KEY_SIZE;
- req = (struct hclge_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
- req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
-{
- struct hclge_rss_indirection_table_cmd *req;
- struct hclge_desc desc;
- int rss_cfg_tbl_num;
- u8 rss_msb_oft;
- u8 rss_msb_val;
- int ret;
- u16 qid;
- int i;
- u32 j;
-
- req = (struct hclge_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGE_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclge_cmd_setup_basic_desc
- (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
-
- req->start_table_index =
- cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
- qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
- req->rss_qid_l[j] = qid & 0xff;
- rss_msb_oft =
- j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
- rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
- (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
- req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
- }
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure rss indir table fail,status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
- u16 *tc_size, u16 *tc_offset)
-{
- struct hclge_rss_tc_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
- int i;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode_cmd *)desc.data;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
- hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss tc mode fail, status = %d\n", ret);
-
- return ret;
-}
-
-static void hclge_get_rss_type(struct hclge_vport *vport)
-{
- if (vport->rss_tuple_sets.ipv4_tcp_en ||
- vport->rss_tuple_sets.ipv4_udp_en ||
- vport->rss_tuple_sets.ipv4_sctp_en ||
- vport->rss_tuple_sets.ipv6_tcp_en ||
- vport->rss_tuple_sets.ipv6_udp_en ||
- vport->rss_tuple_sets.ipv6_sctp_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
- else if (vport->rss_tuple_sets.ipv4_fragment_en ||
- vport->rss_tuple_sets.ipv6_fragment_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
- else
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
-static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
-{
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
-
- /* Get the tuple cfg from pf */
- req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
- hclge_get_rss_type(&hdev->vport[0]);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
- int i;
+ struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
- /* Get hash algorithm */
- if (hfunc) {
- switch (vport->rss_algo) {
- case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGE_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
-
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
- /* Get indirect table */
- if (indir)
- for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = vport->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = vport->rss_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
if (ret) {
dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
return ret;
}
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
- if (ret)
- return ret;
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- } else {
- ret = hclge_set_rss_algo_key(hdev, hash_algo,
- vport->rss_hash_key);
- if (ret)
- return ret;
- }
- vport->rss_algo = hash_algo;
-
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = indir[i];
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
- return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
-}
-
-static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGE_D_PORT_BIT;
- else
- hash_sets &= ~HCLGE_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGE_S_IP_BIT;
- else
- hash_sets &= ~HCLGE_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGE_D_IP_BIT;
- else
- hash_sets &= ~HCLGE_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGE_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
- struct ethtool_rxnfc *nfc,
- struct hclge_rss_input_tuple_cmd *req)
-{
- struct hclge_dev *hdev = vport->back;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclge_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
@@ -5023,92 +4650,20 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
int ret;
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
+ "failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
- vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- hclge_get_rss_type(vport);
- return 0;
-}
-
-static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
- u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
+ hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
-static u64 hclge_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGE_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGE_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGE_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGE_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
-}
-
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
@@ -5118,11 +4673,12 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclge_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -5175,78 +4731,35 @@ static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
tc_offset[i] = tc_info->tqp_offset[i];
}
- return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
}
int hclge_rss_init_hw(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hdev->vport;
- u16 *rss_indir = vport[0].rss_indirection_tbl;
- u8 *key = vport[0].rss_hash_key;
- u8 hfunc = vport[0].rss_algo;
+ u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
+ u8 *key = hdev->rss_cfg.rss_hash_key;
+ u8 hfunc = hdev->rss_cfg.rss_algo;
int ret;
- ret = hclge_set_rss_indir_table(hdev, rss_indir);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_indir);
if (ret)
return ret;
- ret = hclge_set_rss_algo_key(hdev, hfunc, key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
if (ret)
return ret;
- ret = hclge_set_rss_input_tuple(hdev);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
+ &hdev->hw.hw, true,
+ &hdev->rss_cfg);
if (ret)
return ret;
return hclge_init_rss_tc_mode(hdev);
}
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = &hdev->vport[0];
- int i;
-
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
-}
-
-static int hclge_rss_init_cfg(struct hclge_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- struct hclge_vport *vport = &hdev->vport[0];
- u16 *rss_ind_tbl;
-
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
-
- vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
-
- vport->rss_algo = rss_algo;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- vport->rss_indirection_tbl = rss_ind_tbl;
- memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
-
- hclge_rss_indir_init_cfg(hdev);
-
- return 0;
-}
-
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain)
@@ -5256,7 +4769,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_desc desc;
struct hclge_ctrl_vector_chain_cmd *req =
(struct hclge_ctrl_vector_chain_cmd *)desc.data;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
int i;
@@ -5886,9 +5399,9 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -6790,7 +6303,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) should be less than %u\n",
- vf - 1, hdev->num_req_vfs);
+ vf - 1U, hdev->num_req_vfs);
return -EINVAL;
}
@@ -6800,7 +6313,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
+ ring, tqps - 1U);
return -EINVAL;
}
@@ -7161,6 +6674,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
}
}
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -7168,7 +6712,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
@@ -7177,14 +6720,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= fs->location)
- break;
- }
-
- if (!rule || fs->location != rule->location) {
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOENT;
}
@@ -7222,16 +6760,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
hclge_fd_get_ext_info(fs, rule);
- if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
- fs->ring_cookie = RX_CLS_FLOW_DISC;
- } else {
- u64 vf_id;
-
- fs->ring_cookie = rule->queue_id;
- vf_id = rule->vf_id;
- vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
- fs->ring_cookie |= vf_id;
- }
+ hclge_fd_get_ring_cookie(fs, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -7776,7 +7305,7 @@ static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
@@ -7866,7 +7395,7 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
@@ -7960,7 +7489,7 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 3 Config mac work mode with loopback flag
* and its original configure parameters
*/
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -7968,16 +7497,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
- enum hnae3_loop loop_mode)
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
-#define HCLGE_COMMON_LB_RETRY_MS 10
-#define HCLGE_COMMON_LB_RETRY_NUM 100
-
struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
- int ret, i = 0;
u8 loop_mode_b;
+ int ret;
req = (struct hclge_common_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
@@ -7994,23 +7520,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported common loopback mode %d\n", loop_mode);
+ "unsupported loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
- if (en) {
+ req->mask = loop_mode_b;
+ if (en)
req->enable = loop_mode_b;
- req->mask = loop_mode_b;
- } else {
- req->mask = loop_mode_b;
- }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "common loopback set fail, ret = %d\n", ret);
- return ret;
- }
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
do {
msleep(HCLGE_COMMON_LB_RETRY_MS);
@@ -8019,20 +7556,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "common loopback get, ret = %d\n", ret);
+ "failed to get loopback done status, ret = %d\n",
+ ret);
return ret;
}
} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
!(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
return -EBUSY;
} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
return -EIO;
}
- return ret;
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
}
static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
@@ -8213,22 +7764,6 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
HNAE3_LOOP_PARALLEL_SERDES);
}
-static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
- struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
- int i;
-
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static void hclge_flush_link_update(struct hclge_dev *hdev)
{
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
@@ -8270,7 +7805,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hdev->hw.mac.link = 0;
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_mac_start_phy(hdev);
@@ -8308,7 +7843,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_mac_stop_phy(hdev);
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
}
@@ -8511,14 +8046,14 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
if (is_mc) {
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -8568,12 +8103,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
- hclge_cmd_reuse_desc(&mc_desc[0], false);
- mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[1], false);
- mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[2], false);
- mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
@@ -8743,6 +8278,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_node *mac_node;
struct list_head *list;
@@ -8767,9 +8303,10 @@ int hclge_update_mac_list(struct hclge_vport *vport,
/* if this address is never added, unnecessary to delete */
if (state == HCLGE_MAC_TO_DEL) {
spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "failed to delete address %pM from mac list\n",
- addr);
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
return -ENOENT;
}
@@ -8802,6 +8339,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc;
@@ -8812,9 +8350,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr, is_zero_ether_addr(addr),
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -8871,6 +8410,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
int ret;
@@ -8879,8 +8419,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
- addr);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -8911,6 +8452,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
@@ -8919,9 +8461,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Add mc mac err! invalid mac:%pM.\n",
- addr);
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
@@ -8973,16 +8516,18 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc[3];
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_dbg(&hdev->pdev->dev,
- "Remove mc mac err! invalid mac:%pM.\n",
- addr);
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9422,16 +8967,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
dev_info(&hdev->pdev->dev,
- "Specified MAC(=%pM) is same as before, no change committed!\n",
- mac_addr);
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
return 0;
}
@@ -9439,13 +8986,13 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
return hclge_inform_reset_assert_to_vf(vport);
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
- vf, mac_addr);
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+ vf, format_mac_addr);
return 0;
}
@@ -9549,6 +9096,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
unsigned char *old_addr = NULL;
int ret;
@@ -9557,9 +9105,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
if (is_zero_ether_addr(new_addr) ||
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "change uc mac err! invalid mac: %pM.\n",
- new_addr);
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9577,9 +9126,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
spin_lock_bh(&vport->mac_list_lock);
ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "failed to change the mac addr:%pM, ret = %d\n",
- new_addr, ret);
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
spin_unlock_bh(&vport->mac_list_lock);
if (!is_first)
@@ -9677,20 +9227,20 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to get vlan filter config, ret = %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
return ret;
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->vlan_fe = filter_en ?
(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
- ret);
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
return ret;
}
@@ -9809,7 +9359,7 @@ static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
@@ -9936,6 +9486,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
return ret;
}
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id,
bool is_kill)
@@ -9957,26 +9533,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
- /* vlan 0 may be added twice when 8021q module is enabled */
- if (!is_kill && !vlan_id &&
- test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
return 0;
- if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %u is already in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
- if (is_kill &&
- !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %u is not in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
@@ -10168,67 +9727,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status;
}
-static int hclge_init_vlan_config(struct hclge_dev *hdev)
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
-#define HCLGE_DEF_VLAN_TYPE 0x8100
-
- struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* for revision 0x21, vf vlan filter is per function */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- ret = hclge_set_vlan_filter_ctrl(hdev,
- HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS,
- true,
- vport->vport_id);
- if (ret)
- return ret;
- vport->cur_vlan_fltr_en = true;
- }
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true,
- 0);
- if (ret)
- return ret;
- } else {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B,
- true, 0);
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
- hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
- ret = hclge_set_vlan_protocol_type(hdev);
- if (ret)
- return ret;
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- u16 vlan_tag;
- u8 qos;
+ return hclge_set_vlan_protocol_type(hdev);
+}
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ cfg = &vport->port_base_vlan_cfg;
- ret = hclge_vlan_offload_cfg(vport,
- vport->port_base_vlan_cfg.state,
- vlan_tag, qos);
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
if (ret)
return ret;
}
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -10485,12 +10057,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false;
}
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
@@ -10503,38 +10104,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- if (old_vlan_info->vlan_tag == 0)
- ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- true, 0);
- else
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to clear vport%u port base vlan %u, ret = %d.\n",
- vport->vport_id, old_vlan_info->vlan_tag, ret);
- return ret;
- }
-
- goto out;
- }
-
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
@@ -10881,11 +10456,11 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
+ struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
queue = handle->kinfo.tqp[queue_id];
- tqp = container_of(queue, struct hclge_tqp, q);
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
@@ -11442,10 +11017,11 @@ static int hclge_dev_mem_map(struct hclge_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, HCLGE_MEM_BAR),
- pci_resource_len(pdev, HCLGE_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -11484,8 +11060,8 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->io_base = pcim_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
goto err_clr_master;
@@ -11500,7 +11076,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
return 0;
err_unmap_io_base:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -11514,10 +11090,10 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
@@ -11556,29 +11132,25 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclge_reset_prepare(hdev);
- if (ret || hdev->reset_pending) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGE_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclge_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
hclge_enable_vector(&hdev->misc_vector, false);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
if (hdev->reset_type == HNAE3_FLR_RESET)
hdev->rst_stats.flr_rst_cnt++;
@@ -11683,12 +11255,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_pci_uninit;
/* Firmware command queue initialize */
- ret = hclge_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_devlink_uninit;
/* Firmware command initialize */
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret)
goto err_cmd_uninit;
@@ -11776,7 +11349,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_mdiobus_unreg;
@@ -11861,11 +11435,11 @@ err_msi_irq_uninit:
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
hclge_devlink_uninit(hdev);
err_pci_uninit:
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -12112,7 +11686,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_umv_space(hdev);
}
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed\n");
return ret;
@@ -12252,7 +11827,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_config_nic_hw_error(hdev, false);
hclge_config_rocee_ras_interrupt(hdev, false);
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
@@ -12288,19 +11863,43 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->pf_rss_size_max;
}
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
- u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 roundup_size;
u32 *rss_indir;
unsigned int i;
int ret;
@@ -12313,20 +11912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
return ret;
}
- roundup_size = roundup_pow_of_two(kinfo->rss_size);
- roundup_size = ilog2(roundup_size);
- /* Set the RSS TC mode according to the new RSS size */
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- tc_valid[i] = 0;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = kinfo->rss_size * i;
- }
- ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ ret = hclge_set_rss_tc_mode_cfg(handle);
if (ret)
return ret;
@@ -12508,7 +12094,7 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* initialize the last command BD */
@@ -12552,7 +12138,7 @@ static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(desc, cmd, true);
for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -12985,7 +12571,7 @@ static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
/* bd0~bd4 need next flag */
if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* setup bd0, this bd contains offset and read length. */
@@ -13095,7 +12681,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.check_port_speed = hclge_check_port_speed,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
- .get_rss_key_size = hclge_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebba603483a0..adfb26e79262 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -13,6 +13,8 @@
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
@@ -38,20 +40,7 @@
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
-#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C
-#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-
-#define HCLGE_CMDQ_INTR_STS_REG 0x27104
-#define HCLGE_CMDQ_INTR_EN_REG 0x27108
-#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_GRO_EN_REG 0x28000
@@ -93,22 +82,6 @@
#define HCLGE_TQP_INTR_RL_REG 0x20900
#define HCLGE_RSS_IND_TBL_SIZE 512
-#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
-#define HCLGE_RSS_KEY_SIZE 40
-#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-
-#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGE_D_PORT_BIT BIT(0)
-#define HCLGE_S_PORT_BIT BIT(1)
-#define HCLGE_D_IP_BIT BIT(2)
-#define HCLGE_S_IP_BIT BIT(3)
-#define HCLGE_V_TAG_BIT BIT(4)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
- (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
@@ -228,7 +201,6 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
- HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
@@ -294,31 +266,9 @@ struct hclge_mac {
};
struct hclge_hw {
- void __iomem *io_base;
- void __iomem *mem_base;
+ struct hclge_comm_hw hw;
struct hclge_mac mac;
int num_vec;
- struct hclge_cmq cmq;
-};
-
-/* TQP stats */
-struct hlcge_tqp_stats {
- /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclge_tqp {
- /* copy of device pointer from pci_dev,
- * used when perform DMA mapping
- */
- struct device *dev;
- struct hnae3_queue q;
- struct hlcge_tqp_stats tqp_stats;
- u16 index; /* Global index in a NIC controller */
-
- bool alloced;
};
enum hclge_fc_mode {
@@ -641,6 +591,11 @@ struct key_info {
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
+#define hclge_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclge_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
@@ -920,7 +875,7 @@ struct hclge_dev {
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
- struct hclge_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hclge_vport *vport;
struct dentry *hclge_dbgfs;
@@ -955,6 +910,8 @@ struct hclge_dev {
u16 hclge_fd_rule_num;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+ unsigned long last_rst_scheduled;
+ unsigned long last_mbx_scheduled;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
@@ -977,6 +934,7 @@ struct hclge_dev {
cpumask_t affinity_mask;
struct hclge_ptp *ptp;
struct devlink *devlink;
+ struct hclge_comm_rss_cfg rss_cfg;
};
/* VPort level vlan tag configuration for TX direction */
@@ -1003,17 +961,6 @@ struct hclge_rx_vtag_cfg {
bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
};
-struct hclge_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
-};
-
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
@@ -1047,15 +994,6 @@ struct hclge_vf_info {
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
- u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
- /* User configured lookup table entries */
- u16 *rss_indirection_tbl;
- int rss_algo; /* User configured hash algorithm */
- /* User configured rss tuple sets */
- struct hclge_rss_tuple_cfg rss_tuple_sets;
-
- u16 alloc_rss_size;
-
u16 qs_offset;
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
@@ -1093,6 +1031,11 @@ struct hclge_speed_bit_map {
u32 speed_bit;
};
+struct hclge_mac_speed_map {
+ u32 speed_drv; /* speed defined in driver */
+ u32 speed_fw; /* speed defined in firmware */
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -1111,7 +1054,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
- struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
+ struct hclge_comm_tqp *tqp =
+ container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
@@ -1129,7 +1073,6 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 65d78ee4d65a..6799d16de34b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -4,6 +4,7 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
#define CREATE_TRACE_POINTS
#include "hclge_trace.h"
@@ -33,7 +34,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
u16 resp;
@@ -90,7 +91,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
@@ -181,7 +182,7 @@ static int hclge_get_ring_chain_from_mbx(
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
req->msg.param[i].tqp_index,
- vport->nic.kinfo.rss_size - 1);
+ vport->nic.kinfo.rss_size - 1U);
return -EINVAL;
}
}
@@ -612,15 +613,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
+ struct hclge_comm_rss_cfg *rss_cfg;
u8 index;
index = mbx_req->msg.data[0];
+ rss_cfg = &hdev->rss_cfg;
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(vport[0].rss_hash_key)) {
+ sizeof(rss_cfg->rss_hash_key)) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -628,7 +631,7 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
}
memcpy(resp_msg->data,
- &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
}
@@ -661,9 +664,9 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+ u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
@@ -694,7 +697,7 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
void hclge_mbx_handler(struct hclge_dev *hdev)
{
- struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport;
@@ -705,7 +708,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_warn(&hdev->pdev->dev,
"command queue needs re-initializing\n");
return;
@@ -848,6 +852,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
resp_msg.status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ req->mbx_src_vfid,
+ req->msg.code,
+ req->msg.subcode);
+
hclge_gen_resp_to_vf(vport, req, &resp_msg);
}
@@ -859,5 +871,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 1231c34f0949..63d2be4349e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -47,7 +47,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -85,7 +85,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index fd0e20190b90..4200d0b6d931 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -4,6 +4,10 @@
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
+#include "hnae3.h"
+
+struct hclge_dev;
+
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index befa9bcc2f2f..a40b1583f114 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -464,7 +464,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
}
spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 7a9b77de632a..bbee74cd8404 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -8,6 +8,9 @@
#include <linux/net_tstamp.h>
#include <linux/types.h>
+struct hclge_dev;
+struct ifreq;
+
#define HCLGE_PTP_REG_OFFSET 0x29000
#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 429652a8cde1..089f4444b7e3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -678,8 +678,8 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
hclge_tm_update_kinfo_rss_size(vport);
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
- vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
/* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active)
@@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
return 0;
}
-static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
int ret;
- u32 i, k;
- if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, one by one mapping */
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- struct hnae3_knic_private_info *kinfo =
- &vport[k].nic.kinfo;
-
- for (i = 0; i < kinfo->tc_info.num_tc; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, i);
- if (ret)
- return ret;
- }
+ /* Cfg qs -> pri mapping, one by one mapping */
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ i);
+ if (ret)
+ return ret;
}
- } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
- for (k = 0; k < hdev->num_alloc_vport; k++)
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, k);
- if (ret)
- return ret;
- }
- } else {
- return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
+ int ret;
+
+ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ k);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
+ else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
/* Cfg q -> qs mapping */
for (i = 0; i < hdev->num_alloc_vport; i++) {
ret = hclge_vport_q_to_qs_map(hdev, vport);
@@ -1274,6 +1299,27 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
+static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u16 i;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_qs_schd_mode_cfg(hdev,
+ vport[i].qs_offset + pri_id,
+ HCLGE_SCH_MODE_DWRR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -1304,21 +1350,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
- u8 i, k;
+ u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
+ ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret)
return ret;
-
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- ret = hclge_tm_qs_schd_mode_cfg(
- hdev, vport[k].qs_offset + i,
- HCLGE_SCH_MODE_DWRR);
- if (ret)
- return ret;
- }
}
} else {
for (i = 0; i < hdev->num_alloc_vport; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 1db7f40b4525..619cc30a2dfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -6,6 +6,12 @@
#include <linux/types.h>
+#include "hnae3.h"
+
+struct hclge_dev;
+struct hclge_vport;
+enum hclge_opcode_type;
+
/* MAC Pause */
#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
deleted file mode 100644
index 51ff7d86ee90..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-ccflags-y += -I $(srctree)/$(src)
-
-obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
deleted file mode 100644
index e605c2c5bcce..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include "hclgevf_cmd.h"
-#include "hclgevf_main.h"
-#include "hnae3.h"
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
-{
- int ntc = ring->next_to_clean;
- int ntu = ring->next_to_use;
- int used;
-
- used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
- int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
-{
- struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
- struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean;
- u32 head;
-
- head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
-{
- u32 head;
-
- head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
-
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclgevf_is_special_opcode(u16 opcode)
-{
- const u16 spec_opcode[] = {0x30, 0x31, 0x32};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
-{
- struct hclgevf_dev *hdev = ring->dev;
- struct hclgevf_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->flag == HCLGEVF_TYPE_CSQ) {
- reg_val = lower_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = upper_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGEVF_NIC_SW_RST_RDY;
- reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- } else {
- reg_val = lower_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = upper_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
-{
- hclgevf_cmd_config_regs(&hw->cmq.csq);
- hclgevf_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclgevf_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclgevf_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
-{
- struct hclgevf_hw *hw = &hdev->hw;
- struct hclgevf_cmq_ring *ring =
- (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->dev = hdev;
- ring->flag = ring_type;
-
- /* allocate CSQ/CRQ descriptor */
- ret = hclgevf_alloc_cmd_desc(ring);
- if (ret)
- dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
- (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
-
- return ret;
-}
-
-void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
- enum hclgevf_opcode_type opcode, bool is_read)
-{
- memset(desc, 0, sizeof(struct hclgevf_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
- HCLGEVF_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
-}
-
-struct vf_errcode {
- u32 imp_errcode;
- int common_errno;
-};
-
-static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num)
-{
- struct hclgevf_desc *desc_to_use;
- int handle = 0;
-
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-}
-
-static int hclgevf_cmd_convert_err_code(u16 desc_ret)
-{
- struct vf_errcode hclgevf_cmd_errcode[] = {
- {HCLGEVF_CMD_EXEC_SUCCESS, 0},
- {HCLGEVF_CMD_NO_AUTH, -EPERM},
- {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
- {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
- {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
- {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
- {HCLGEVF_CMD_PARA_ERR, -EINVAL},
- {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
- {HCLGEVF_CMD_TIMEOUT, -ETIME},
- {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
- {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
- {HCLGEVF_CMD_INVALID, -EBADR},
- };
- u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
- u32 i;
-
- for (i = 0; i < errcode_count; i++)
- if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
- return hclgevf_cmd_errcode[i].common_errno;
-
- return -EIO;
-}
-
-static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- /* Get the result of hardware write back */
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclgevf_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
- hw->cmq.last_status = desc_ret;
-
- return hclgevf_cmd_convert_err_code(desc_ret);
-}
-
-static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num, int ntc)
-{
- struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
- bool is_completed = false;
- u32 timeout = 0;
- int handle, ret;
-
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw)) {
- is_completed = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!is_completed)
- ret = -EBADE;
- else
- ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle < 0)
- ret = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
- return ret;
-}
-
-/* hclgevf_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- */
-int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
-{
- struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
- struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int ret;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclgevf_read_dev(hw,
- HCLGEVF_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /* Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
-
- hclgevf_cmd_copy_desc(hw, desc, num);
-
- /* Write to hardware */
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
- hw->cmq.csq.next_to_use);
-
- ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return ret;
-}
-
-static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
-}
-
-static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
- {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
- {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
- {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
- {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
- {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
- {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
-};
-
-static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
- struct hclgevf_query_version_cmd *cmd)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps, i;
-
- caps = __le32_to_cpu(cmd->caps[0]);
- for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++)
- if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit))
- set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit,
- ae_dev->caps);
-}
-
-static __le32 hclgevf_build_api_caps(void)
-{
- u32 api_caps = 0;
-
- hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
-
- return cpu_to_le32(api_caps);
-}
-
-static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hclgevf_query_version_cmd *resp;
- struct hclgevf_desc desc;
- int status;
-
- resp = (struct hclgevf_query_version_cmd *)desc.data;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
- resp->api_caps = hclgevf_build_api_caps();
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- return status;
-
- hdev->fw_version = le32_to_cpu(resp->firmware);
-
- ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
- HNAE3_PCI_REVISION_BIT_SIZE;
- ae_dev->dev_version |= hdev->pdev->revision;
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- hclgevf_set_default_capability(hdev);
-
- hclgevf_parse_capability(hdev, resp);
-
- return status;
-}
-
-int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
- hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
-
- ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclgevf_firmware_compat_config(struct hclgevf_dev *hdev, bool en)
-{
- struct hclgevf_firmware_compat_cmd *req;
- struct hclgevf_desc desc;
- u32 compat = 0;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_IMP_COMPAT_CFG, false);
-
- if (en) {
- req = (struct hclgevf_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGEVF_SYNC_RX_RING_HEAD_EN_B, 1);
-
- req->compat = cpu_to_le32(compat);
- }
-
- return hclgevf_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- /* initialize the pointers of async rx queue of mailbox */
- hdev->arq.hdev = hdev;
- hdev->arq.head = 0;
- hdev->arq.tail = 0;
- atomic_set(&hdev->arq.count, 0);
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclgevf_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if (hclgevf_is_reset_pending(hdev)) {
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- /* get version and device capabilities */
- ret = hclgevf_cmd_query_version_and_capability(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to query version and capabilities, ret = %d\n", ret);
- goto err_cmd_init;
- }
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
- /* ask the firmware to enable some features, driver can work
- * without it.
- */
- ret = hclgevf_firmware_compat_config(hdev, true);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
- }
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
-{
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
-{
- hclgevf_firmware_compat_config(hdev, false);
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- /* wait to ensure that the firmware completes the possible left
- * over commands.
- */
- msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- hclgevf_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index edc9e154061a..537b887fa0a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -6,9 +6,8 @@
#include <linux/io.h>
#include <linux/types.h>
#include "hnae3.h"
+#include "hclge_comm_cmd.h"
-#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
-#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
@@ -16,107 +15,6 @@ struct hclgevf_hw;
struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
-struct hclgevf_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
-};
-
-struct hclgevf_desc {
- __le16 opcode;
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[6];
-};
-
-struct hclgevf_desc_cb {
- dma_addr_t dma;
- void *va;
- u32 length;
-};
-
-struct hclgevf_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclgevf_desc *desc;
- struct hclgevf_desc_cb *desc_cb;
- struct hclgevf_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 flag;
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclgevf_cmd_return_status {
- HCLGEVF_CMD_EXEC_SUCCESS = 0,
- HCLGEVF_CMD_NO_AUTH = 1,
- HCLGEVF_CMD_NOT_SUPPORTED = 2,
- HCLGEVF_CMD_QUEUE_FULL = 3,
- HCLGEVF_CMD_NEXT_ERR = 4,
- HCLGEVF_CMD_UNEXE_ERR = 5,
- HCLGEVF_CMD_PARA_ERR = 6,
- HCLGEVF_CMD_RESULT_ERR = 7,
- HCLGEVF_CMD_TIMEOUT = 8,
- HCLGEVF_CMD_HILINK_ERR = 9,
- HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
- HCLGEVF_CMD_INVALID = 11,
-};
-
-enum hclgevf_cmd_status {
- HCLGEVF_STATUS_SUCCESS = 0,
- HCLGEVF_ERR_CSQ_FULL = -1,
- HCLGEVF_ERR_CSQ_TIMEOUT = -2,
- HCLGEVF_ERR_CSQ_ERROR = -3
-};
-
-struct hclgevf_cmq {
- struct hclgevf_cmq_ring csq;
- struct hclgevf_cmq_ring crq;
- u16 tx_timeout; /* Tx timeout */
- enum hclgevf_cmd_status last_status;
-};
-
-#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
-#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
-#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
-#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
-#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
-#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
-#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
-#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
-
-enum hclgevf_opcode_type {
- /* Generic command */
- HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
- HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
-
- /* TQP command */
- HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
- HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
- HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- /* GRO command */
- HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
- /* RSS cmd */
- HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
- HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
- /* Mailbox cmd */
- HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
-
- /* IMP stats command */
- HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
-};
#define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200
@@ -156,34 +54,6 @@ struct hclgevf_ctrl_vector_chain {
u8 resv;
};
-enum HCLGEVF_CAP_BITS {
- HCLGEVF_CAP_UDP_GSO_B,
- HCLGEVF_CAP_QB_B,
- HCLGEVF_CAP_FD_FORWARD_TC_B,
- HCLGEVF_CAP_PTP_B,
- HCLGEVF_CAP_INT_QL_B,
- HCLGEVF_CAP_HW_TX_CSUM_B,
- HCLGEVF_CAP_TX_PUSH_B,
- HCLGEVF_CAP_PHY_IMP_B,
- HCLGEVF_CAP_TQP_TXRX_INDEP_B,
- HCLGEVF_CAP_HW_PAD_B,
- HCLGEVF_CAP_STASH_B,
- HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
- HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
-};
-
-enum HCLGEVF_API_CAP_BITS {
- HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
-};
-
-#define HCLGEVF_QUERY_CAP_LENGTH 3
-struct hclgevf_query_version_cmd {
- __le32 firmware;
- __le32 hardware;
- __le32 api_caps;
- __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
-};
-
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
#define HCLGEVF_VEC_NUM_S 0
@@ -203,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd {
u8 rsv[23];
};
-#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGEVF_RSS_HASH_KEY_NUM 16
-struct hclgevf_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
-};
-
-struct hclgevf_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-
-#define HCLGEVF_RSS_CFG_TBL_SIZE 16
-
-struct hclgevf_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rsv[4];
- u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
-};
-
-#define HCLGEVF_RSS_TC_OFFSET_S 0
-#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
-#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
-#define HCLGEVF_RSS_TC_SIZE_S 12
-#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
-#define HCLGEVF_RSS_TC_VALID_B 15
-#define HCLGEVF_MAX_TC_NUM 8
-#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
-
-struct hclgevf_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
- u8 rsv[8];
-};
-
#define HCLGEVF_LINK_STS_B 0
#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
struct hclgevf_link_status_cmd {
@@ -273,9 +99,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
-#define HCLGEVF_TYPE_CRQ 0
-#define HCLGEVF_TYPE_CSQ 1
-
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGEVF_NIC_SW_RST_RDY_B 16
#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
@@ -285,6 +108,9 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
+
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
@@ -305,38 +131,6 @@ struct hclgevf_dev_specs_1_cmd {
u8 rsv1[18];
};
-/* capabilities bits map between imp firmware and local driver */
-struct hclgevf_caps_bit_map {
- u16 imp_bit;
- u16 local_bit;
-};
-
-static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
-
-static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
-
- return readl(reg_addr + reg);
-}
-
-#define hclgevf_write_dev(a, reg, value) \
- hclgevf_write_reg((a)->io_base, reg, value)
-#define hclgevf_read_dev(a, reg) \
- hclgevf_read_reg((a)->io_base, reg)
-
-#define HCLGEVF_SEND_SYNC(flag) \
- ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
-
-int hclgevf_cmd_init(struct hclgevf_dev *hdev);
-void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
-int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
-
-int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
-void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
- enum hclgevf_opcode_type opcode,
- bool is_read);
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num);
+void hclgevf_arq_init(struct hclgevf_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 41afaeea881b..21442a9bb996 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -9,6 +9,7 @@
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclgevf_devlink.h"
+#include "hclge_comm_rss.h"
#define HCLGEVF_NAME "hclgevf"
@@ -30,30 +31,22 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, }
};
-static const u8 hclgevf_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG,
- HCLGEVF_NIC_CSQ_BASEADDR_H_REG,
- HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CSQ_TAIL_REG,
- HCLGEVF_NIC_CSQ_HEAD_REG,
- HCLGEVF_NIC_CRQ_BASEADDR_L_REG,
- HCLGEVF_NIC_CRQ_BASEADDR_H_REG,
- HCLGEVF_NIC_CRQ_DEPTH_REG,
- HCLGEVF_NIC_CRQ_TAIL_REG,
- HCLGEVF_NIC_CRQ_HEAD_REG,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG,
- HCLGEVF_VECTOR0_CMDQ_STATE_REG,
- HCLGEVF_CMDQ_INTR_EN_REG,
- HCLGEVF_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
HCLGEVF_RST_ING,
@@ -92,109 +85,40 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
-static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
{
- if (!handle->client)
- return container_of(handle, struct hclgevf_dev, nic);
- else if (handle->client->type == HNAE3_CLIENT_ROCE)
- return container_of(handle, struct hclgevf_dev, roce);
- else
- return container_of(handle, struct hclgevf_dev, nic);
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
}
-static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_desc desc;
- struct hclgevf_tqp *tqp;
- int status;
- int i;
+ struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_QUERY_RX_STATUS,
- true);
-
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- status, i);
- return status;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[1]);
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
- true);
-
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- status, i);
- return status;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[1]);
- }
-
- return 0;
+ spin_lock(&cmdq->crq.lock);
+ /* initialize the pointers of async rx queue of mailbox */
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+ atomic_set(&hdev->arq.count, 0);
+ spin_unlock(&cmdq->crq.lock);
}
-static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- return kinfo->num_tqps * 2;
-}
-
-static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff += ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff += ETH_GSTRING_LEN;
- }
-
- return buff;
+ if (!handle->client)
+ return container_of(handle, struct hclgevf_dev, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclgevf_dev, roce);
+ else
+ return container_of(handle, struct hclgevf_dev, nic);
}
static void hclgevf_update_stats(struct hnae3_handle *handle,
@@ -203,7 +127,7 @@ static void hclgevf_update_stats(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int status;
- status = hclgevf_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"VF update of TQPS stats fail, status = %d.\n",
@@ -215,7 +139,7 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
if (strset == ETH_SS_TEST)
return -EOPNOTSUPP;
else if (strset == ETH_SS_STATS)
- return hclgevf_tqps_get_sset_count(handle, strset);
+ return hclge_comm_tqps_get_sset_count(handle);
return 0;
}
@@ -226,12 +150,12 @@ static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
u8 *p = (char *)data;
if (strset == ETH_SS_STATS)
- p = hclgevf_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
{
- hclgevf_tqps_get_stats(handle, data);
+ hclge_comm_tqps_get_stats(handle, data);
}
static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
@@ -397,11 +321,11 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
- struct hclgevf_tqp *tqp;
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -420,11 +344,11 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
* HCLGEVF_TQP_MAX_SIZE_DEV_V2.
*/
if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGEVF_TQP_REG_OFFSET +
i * HCLGEVF_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGEVF_TQP_REG_OFFSET +
HCLGEVF_TQP_EXT_REG_OFFSET +
(i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
@@ -448,7 +372,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo->num_tx_desc = hdev->num_tx_desc;
kinfo->num_rx_desc = hdev->num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
num_tc++;
@@ -539,7 +463,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->pdev = hdev->pdev;
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclgevf_knic_setup(hdev);
if (ret)
@@ -576,7 +500,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
vector->vector = pci_irq_vector(hdev->pdev, i);
- vector->io_addr = hdev->hw.io_base +
+ vector->io_addr = hdev->hw.hw.io_base +
HCLGEVF_VECTOR_REG_BASE +
(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
hdev->vector_status[i] = 0;
@@ -606,137 +530,11 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
-static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclgevf_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclgevf_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGEVF_RSS_KEY_SIZE;
- req = (struct hclgevf_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
- req->hash_config |=
- (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGEVF_RSS_KEY_SIZE;
-}
-
-static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
-{
- const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
- struct hclgevf_rss_indirection_table_cmd *req;
- struct hclgevf_desc desc;
- int rss_cfg_tbl_num;
- int status;
- int i, j;
-
- req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGEVF_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
- false);
- req->start_table_index =
- cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
- req->rss_result[j] =
- indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
-
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to set RSS indirection table\n",
- status);
- return status;
- }
- }
-
- return 0;
-}
-
-static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
-{
- struct hclgevf_rss_tc_mode_cmd *req;
- u16 tc_offset[HCLGEVF_MAX_TC_NUM];
- u16 tc_valid[HCLGEVF_MAX_TC_NUM];
- u16 tc_size[HCLGEVF_MAX_TC_NUM];
- struct hclgevf_desc desc;
- u16 roundup_size;
- unsigned int i;
- int status;
-
- req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
-
- roundup_size = roundup_pow_of_two(rss_size);
- roundup_size = ilog2(roundup_size);
-
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
- }
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
- 0x1);
- hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to set rss tc mode\n", status);
-
- return status;
-}
-
/* for revision 0x20, vf shared the same rss config with pf */
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
struct hclge_vf_to_pf_msg send_msg;
u16 msg_num, hash_key_index;
@@ -744,7 +542,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
- msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
+ msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
send_msg.data[0] = index;
@@ -761,7 +559,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
if (index == msg_num - 1)
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0],
- HCLGEVF_RSS_KEY_SIZE - hash_key_index);
+ HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
else
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
@@ -774,29 +572,11 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i, ret;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* Get hash algorithm */
- if (hfunc) {
- switch (rss_cfg->hash_algo) {
- case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
-
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
} else {
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -805,67 +585,28 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
if (ret)
return ret;
memcpy(key, rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ HCLGE_COMM_RSS_KEY_SIZE);
}
}
- if (indir)
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = rss_cfg->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ hdev->ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = hdev->rss_cfg.hash_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
+ hfunc);
if (ret)
return ret;
-
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "invalid hfunc type %u\n", hfunc);
- return ret;
- }
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(rss_cfg->rss_hash_key, key,
- HCLGEVF_RSS_KEY_SIZE);
- } else {
- ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
- rss_cfg->rss_hash_key);
- if (ret)
- return ret;
- }
- rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
@@ -873,179 +614,26 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
- return hclgevf_set_rss_indir_table(hdev);
-}
-
-static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGEVF_D_PORT_BIT;
- else
- hash_sets &= ~HCLGEVF_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGEVF_S_IP_BIT;
- else
- hash_sets &= ~HCLGEVF_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGEVF_D_IP_BIT;
- else
- hash_sets &= ~HCLGEVF_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGEVF_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc,
- struct hclgevf_rss_input_tuple_cmd *req)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclgevf_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
+ if (ret)
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
- return ret;
- }
+ "failed to set rss tuple, ret = %d.\n", ret);
- rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- return 0;
-}
-
-static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
- int flow_type, u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGEVF_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGEVF_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGEVF_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGEVF_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
+ return ret;
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
@@ -1060,47 +648,20 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
- &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
-static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
- struct hclgevf_rss_cfg *rss_cfg)
-{
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
- int ret;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
-
- req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
-
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
return rss_cfg->rss_size;
}
@@ -1273,12 +834,11 @@ static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
u16 stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
- false);
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
if (enable)
@@ -1302,18 +862,6 @@ static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
return 0;
}
-static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_tqp *tqp;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
{
struct hclge_vf_to_pf_msg send_msg;
@@ -1514,15 +1062,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
struct list_head *list,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclgevf_mac_addr_node *mac_node, *tmp;
int ret;
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr,
+ mac_node->mac_addr);
dev_err(&hdev->pdev->dev,
- "failed to configure mac %pM, state = %d, ret = %d\n",
- mac_node->mac_addr, mac_node->state, ret);
+ "failed to configure mac %s, state = %d, ret = %d\n",
+ format_mac_addr, mac_node->state, ret);
return;
}
if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
@@ -1859,13 +1410,13 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
int ret;
if (hdev->reset_type == HNAE3_VF_RESET)
- ret = readl_poll_timeout(hdev->hw.io_base +
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
HCLGEVF_VF_RST_ING, val,
!(val & HCLGEVF_VF_RST_ING_BIT),
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_TIMEOUT_US);
else
- ret = readl_poll_timeout(hdev->hw.io_base +
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
HCLGEVF_RST_ING, val,
!(val & HCLGEVF_RST_ING_BITS),
HCLGEVF_RESET_WAIT_US,
@@ -1891,13 +1442,13 @@ static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
reg_val |= HCLGEVF_NIC_SW_RST_RDY;
else
reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
reg_val);
}
@@ -1948,7 +1499,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
hdev->rst_stats.vf_func_rst_cnt++;
}
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
hclgevf_reset_handshake(hdev, true);
@@ -1977,9 +1528,9 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
@@ -2163,24 +1714,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclgevf_reset_prepare(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclgevf_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
@@ -2220,7 +1767,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev,
HCLGEVF_MISC_VECTOR_NUM);
- vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
/* vector status always valid for Vector 0 */
hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
@@ -2341,7 +1888,7 @@ static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
struct hclge_vf_to_pf_msg send_msg;
int ret;
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
@@ -2378,7 +1925,7 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
}
if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
- hclgevf_tqps_update_stats(handle);
+ hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
/* VF does not need to request link status when this bit is set, because
* PF will push its link status to VFs when link status changed.
@@ -2419,7 +1966,7 @@ static void hclgevf_service_task(struct work_struct *work)
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
{
- hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
}
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
@@ -2429,14 +1976,14 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
/* fetch the events from their corresponding regs */
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_STATE_REG);
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
/* set up VF hardware reset status, its PF will clear
@@ -2496,8 +2043,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2560,8 +2106,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->rinfo.base_vector = hdev->roce_base_msix_offset;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2573,13 +2119,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
static int hclgevf_config_gro(struct hclgevf_dev *hdev)
{
struct hclgevf_cfg_gro_status_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int ret;
if (!hnae3_dev_gro_supported(hdev))
return 0;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
@@ -2593,71 +2139,37 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
return ret;
}
-static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_tuple_cfg *tuple_sets;
- u32 i;
-
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
- tuple_sets = &rss_cfg->rss_tuple_sets;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- u8 *rss_ind_tbl;
-
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- rss_cfg->rss_indirection_tbl = rss_ind_tbl;
- memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
-
- tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- }
-
- /* Initialize RSS indirect table */
- for (i = 0; i < rss_ind_tbl_size; i++)
- rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
-
- return 0;
-}
-
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- rss_cfg->rss_hash_key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
+ rss_cfg->rss_algo,
+ rss_cfg->rss_hash_key);
if (ret)
return ret;
- ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
+ false, rss_cfg);
if (ret)
return ret;
}
- ret = hclgevf_set_rss_indir_table(hdev);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
if (ret)
return ret;
- return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
+ hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
}
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
@@ -2711,7 +2223,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
- hclgevf_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
@@ -2729,7 +2241,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hdev->reset_type != HNAE3_VF_RESET)
hclgevf_reset_tqp(handle);
- hclgevf_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
@@ -3043,11 +2555,11 @@ static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev,
- HCLGEVF_MEM_BAR),
- pci_resource_len(pdev, HCLGEVF_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGEVF_MEM_BAR),
+ pci_resource_len(pdev, HCLGEVF_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -3081,9 +2593,8 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->hdev = hdev;
- hw->io_base = pci_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n");
ret = -ENOMEM;
goto err_clr_master;
@@ -3096,7 +2607,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
return 0;
err_unmap_io_base:
- pci_iounmap(pdev, hdev->hw.io_base);
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -3110,10 +2621,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pci_iounmap(pdev, hdev->hw.io_base);
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -3122,10 +2633,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
{
struct hclgevf_query_res_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int ret;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3179,13 +2690,13 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num =
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
- struct hclgevf_desc *desc)
+ struct hclge_desc *desc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0;
@@ -3212,7 +2723,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
@@ -3221,7 +2732,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
{
- struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
int ret;
int i;
@@ -3235,11 +2746,10 @@ static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclgevf_cmd_setup_basic_desc(&desc[i],
- HCLGEVF_OPC_QUERY_DEV_SPECS, true);
- desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
+ HCLGE_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
- hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
- true);
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
if (ret)
@@ -3318,7 +2828,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
- ret = hclgevf_cmd_init(hdev);
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "cmd failed %d\n", ret);
return ret;
@@ -3364,11 +2877,14 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_devlink_init;
- ret = hclgevf_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
- ret = hclgevf_cmd_init(hdev);
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
if (ret)
goto err_cmd_init;
@@ -3421,7 +2937,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
- ret = hclgevf_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_config;
@@ -3468,7 +2985,7 @@ err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_cmd_init:
- hclgevf_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
hclgevf_devlink_uninit(hdev);
err_devlink_init:
@@ -3492,7 +3009,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
- hclgevf_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
@@ -3595,6 +3112,9 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
u32 *rss_indir;
@@ -3603,7 +3123,10 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
hclgevf_update_rss_size(handle, new_tqps_num);
- ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
+ hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+ ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
if (ret)
return ret;
@@ -3704,7 +3227,7 @@ static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
@@ -3862,7 +3385,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
- .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index f6f736c0091c..502ca1ce1a90 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -10,6 +10,8 @@
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
@@ -32,21 +34,6 @@
#define HCLGEVF_VECTOR_REG_OFFSET 0x4
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
-/* bar registers for cmdq */
-#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
-#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C
-#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-
-#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
-#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
-
/* bar registers for common func */
#define HCLGEVF_GRO_EN_REG 0x28000
#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
@@ -86,10 +73,6 @@
#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
#define HCLGEVF_TQP_INTR_RL_REG 0x20900
-/* Vector0 interrupt CMDQ event source register(RW) */
-#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
-/* Vector0 interrupt CMDQ event status register(RO) */
-#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -112,27 +95,16 @@
#define HCLGEVF_WAIT_RESET_DONE 100
#define HCLGEVF_RSS_IND_TBL_SIZE 512
-#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
-#define HCLGEVF_RSS_KEY_SIZE 40
-#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
-
-#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGEVF_D_PORT_BIT BIT(0)
-#define HCLGEVF_S_PORT_BIT BIT(1)
-#define HCLGEVF_D_IP_BIT BIT(2)
-#define HCLGEVF_S_IP_BIT BIT(3)
-#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
- (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
+#define hclgevf_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclgevf_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
@@ -154,7 +126,6 @@ enum hclgevf_states {
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
- HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
@@ -173,29 +144,9 @@ struct hclgevf_mac {
};
struct hclgevf_hw {
- void __iomem *io_base;
- void __iomem *mem_base;
+ struct hclge_comm_hw hw;
int num_vec;
- struct hclgevf_cmq cmq;
struct hclgevf_mac mac;
- void *hdev; /* hchgevf device it is part of */
-};
-
-/* TQP stats */
-struct hlcgevf_tqp_stats {
- /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclgevf_tqp {
- struct device *dev; /* device for DMA mapping */
- struct hnae3_queue q;
- struct hlcgevf_tqp_stats tqp_stats;
- u16 index; /* global index in a NIC controller */
-
- bool alloced;
};
struct hclgevf_cfg {
@@ -208,27 +159,6 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
-struct hclgevf_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
-};
-
-struct hclgevf_rss_cfg {
- u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
- u32 hash_algo;
- u32 rss_size;
- u8 hw_tc_map;
- /* shadow table */
- u8 *rss_indirection_tbl;
- struct hclgevf_rss_tuple_cfg rss_tuple_sets;
-};
-
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
@@ -273,7 +203,7 @@ struct hclgevf_dev {
struct hnae3_ae_dev *ae_dev;
struct hclgevf_hw hw;
struct hclgevf_misc_vector misc_vector;
- struct hclgevf_rss_cfg rss_cfg;
+ struct hclge_comm_rss_cfg rss_cfg;
unsigned long state;
unsigned long flr_state;
unsigned long default_reset_request;
@@ -324,7 +254,7 @@ struct hclgevf_dev {
struct delayed_work service_task;
- struct hclgevf_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hnae3_handle nic;
struct hnae3_handle roce;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index fdc66fae0960..d5e0a3f762f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -53,7 +53,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state))
return -EIO;
usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
@@ -97,7 +98,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
u8 *resp_data, u16 resp_len)
{
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int status;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
@@ -114,7 +115,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
- trace_hclge_vf_mbx_send(hdev, req);
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+ trace_hclge_vf_mbx_send(hdev, req);
/* synchronous send */
if (need_resp) {
@@ -150,9 +152,9 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
- u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
+ u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
@@ -211,14 +213,15 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
struct hclge_mbx_pf_to_vf_cmd *req;
- struct hclgevf_cmq_ring *crq;
- struct hclgevf_desc *desc;
+ struct hclge_comm_cmq_ring *crq;
+ struct hclge_desc *desc;
u16 flag;
- crq = &hdev->hw.cmq.crq;
+ crq = &hdev->hw.hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev, "vf crq need init\n");
return;
}
@@ -268,7 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
crq->next_to_use);
}
@@ -295,7 +298,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev,
"vf crq need init in async\n");
return;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index a85667078b72..93192f58ac88 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -547,7 +547,9 @@ static void hinic_get_drvinfo(struct net_device *netdev,
}
static void hinic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -580,7 +582,9 @@ static int check_ringparam_valid(struct hinic_dev *nic_dev,
}
static int hinic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u16 new_sq_depth, new_rq_depth;
@@ -1205,8 +1209,6 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
return HINIC_RSS_INDIR_SIZE;
}
-#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
-
#define HINIC_FUNC_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
@@ -1374,7 +1376,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
p = (char *)&txq_stats +
hinic_tx_queue_stats[j].offset;
data[i] = (hinic_tx_queue_stats[j].size ==
@@ -1387,7 +1389,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
p = (char *)&rxq_stats +
hinic_rx_queue_stats[j].offset;
data[i] = (hinic_rx_queue_stats[j].size ==
@@ -1411,7 +1413,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get vport stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
p = (char *)&vport_stats + hinic_function_stats[j].offset;
data[i] = (hinic_function_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1420,8 +1422,8 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
if (!port_stats) {
memset(&data[i], 0,
- ARRAY_LEN(hinic_port_stats) * sizeof(*data));
- i += ARRAY_LEN(hinic_port_stats);
+ ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_SIZE(hinic_port_stats);
goto get_drv_stats;
}
@@ -1430,7 +1432,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get port stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
p = (char *)port_stats + hinic_port_stats[j].offset;
data[i] = (hinic_port_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1449,14 +1451,14 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_TEST:
- return ARRAY_LEN(hinic_test_strings);
+ return ARRAY_SIZE(hinic_test_strings);
case ETH_SS_STATS:
q_num = nic_dev->num_qps;
- count = ARRAY_LEN(hinic_function_stats) +
- (ARRAY_LEN(hinic_tx_queue_stats) +
- ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
+ count = ARRAY_SIZE(hinic_function_stats) +
+ (ARRAY_SIZE(hinic_tx_queue_stats) +
+ ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
- count += ARRAY_LEN(hinic_port_stats);
+ count += ARRAY_SIZE(hinic_port_stats);
return count;
default:
@@ -1476,27 +1478,27 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
memcpy(p, hinic_function_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
memcpy(p, hinic_port_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
sprintf(p, hinic_tx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
sprintf(p, hinic_rx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 06586173add7..998717f02136 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
{
struct hinic_hwif *hwif = attr->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
chain->hwif = hwif;
chain->chain_type = attr->chain_type;
@@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
sema_init(&chain->sem, 1);
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
if (!chain->cell_ctxt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 307a6d4af993..a627237f694b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -796,11 +796,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
int err;
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
@@ -884,7 +883,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
u16 max_wqe_size;
int err;
@@ -895,8 +893,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 657a15447bd0..2127a48749a8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -162,7 +162,6 @@ static int init_msix(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
int i, err;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
@@ -171,8 +170,8 @@ static int init_msix(struct hinic_hwdev *hwdev)
if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
GFP_KERNEL);
if (!hwdev->msix_entries)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index d3fc05a07fdb..045c47786a04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq)
struct hinic_hwif *hwif = eq->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 init_val, addr, val;
- size_t addr_size;
int err, pg;
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
if (!eq->dma_addr)
return -ENOMEM;
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
if (!eq->virt_addr) {
err = -ENOMEM;
goto err_virt_addr_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index a6e43d686293..c4a0ba6e183a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -375,31 +375,30 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
{
struct hinic_hwif *hwif = func_to_io->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
void *ci_addr_base;
int i, j, err;
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
+ func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->qps), GFP_KERNEL);
if (!func_to_io->qps)
return -ENOMEM;
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_wq), GFP_KERNEL);
if (!func_to_io->sq_wq) {
err = -ENOMEM;
goto err_sq_wq;
}
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->rq_wq), GFP_KERNEL);
if (!func_to_io->rq_wq) {
err = -ENOMEM;
goto err_rq_wq;
}
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
+ func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_db), GFP_KERNEL);
if (!func_to_io->sq_db) {
err = -ENOMEM;
goto err_sq_db;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 7f0f1aa3cedd..2d9b06d7caad 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -193,20 +193,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
@@ -379,15 +379,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(wq->prod_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index f9a766b8ac43..05329292d940 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -144,13 +144,12 @@ static int create_txqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
if (nic_dev->txqs)
return -EINVAL;
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -241,13 +240,12 @@ static int create_rxqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
if (nic_dev->rxqs)
return -EINVAL;
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
if (!nic_dev->rxqs)
return -ENOMEM;
@@ -1394,12 +1392,8 @@ static int hinic_probe(struct pci_dev *pdev,
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Failed to set DMA mask\n");
- goto err_dma_mask;
- }
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_dma_mask;
}
err = nic_dev_init(pdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index fed3b6bc0d76..b33ed4d92b71 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -548,7 +548,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
if (err)
goto err_irq_affinity;
@@ -565,7 +565,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
- irq_set_affinity_hint(rq->irq, NULL);
+ irq_update_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5bdb0d374ef..8d59babbf476 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -4,6 +4,7 @@
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*/
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
@@ -862,7 +863,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
int err, irqname_len;
- size_t sges_size;
txq->netdev = netdev;
txq->sq = sq;
@@ -871,13 +871,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b482f6f633bd..3ee89ae496d0 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1178,7 +1178,8 @@ found:
DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
for (i = 0; i < 6; i++)
- DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+ DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i]));
+ eth_hw_addr_set(dev, eth_addr);
DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index c612ef526d16..3e7d7c4bafdc 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -986,6 +986,7 @@ static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
int i, ret = 0;
ether1_banner();
@@ -1015,7 +1016,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
if (ether1_init_2(dev)) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 48e001881c75..0af70094aba3 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -147,6 +147,7 @@ lan_init_chip(struct parisc_device *dev)
struct net_device *netdevice;
struct i596_private *lp;
int retval = -ENOMEM;
+ u8 addr[ETH_ALEN];
int i;
if (!dev->irq) {
@@ -167,13 +168,14 @@ lan_init_chip(struct parisc_device *dev)
netdevice->base_addr = dev->hpa.start;
netdevice->irq = dev->irq;
- if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
+ if (pdc_lan_station_id(addr, netdevice->base_addr)) {
for (i = 0; i < 6; i++) {
- netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
printk(KERN_INFO
"%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
}
+ eth_hw_addr_set(netdevice, addr);
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 27937c5d7956..daec9ce04531 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->dev_addr[5] = readb(eth_addr + 0x06);
iounmap(eth_addr);
- if (!netdevice->irq) {
+ if (netdevice->irq < 0) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
__FILE__, netdevice->base_addr);
+ retval = netdevice->irq;
goto probe_failed;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b3fc8823c54..fbea9f7efe8c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2137,8 +2137,11 @@ emac_ethtool_set_link_ksettings(struct net_device *ndev,
return 0;
}
-static void emac_ethtool_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *rp)
+static void
+emac_ethtool_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 45ba40cf4d07..22fb0d109a68 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1890,6 +1890,7 @@ static struct attribute *veth_pool_attrs[] = {
&veth_size_attr,
NULL,
};
+ATTRIBUTE_GROUPS(veth_pool);
static const struct sysfs_ops veth_pool_ops = {
.show = veth_pool_show,
@@ -1899,7 +1900,7 @@ static const struct sysfs_ops veth_pool_ops = {
static struct kobj_type ktype_veth_pool = {
.release = NULL,
.sysfs_ops = &veth_pool_ops,
- .default_attrs = veth_pool_attrs,
+ .default_groups = veth_pool_groups,
};
static int ibmveth_resume(struct device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0bb3911dd014..bda7a2a9d211 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -308,7 +308,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
- rc = -1;
+ rc = -EIO;
goto out;
}
rc = 0;
@@ -540,13 +540,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
dma_addr_t stok;
+ int rc;
stok = dma_map_single(dev, &adapter->stats,
sizeof(struct ibmvnic_statistics),
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, stok)) {
- dev_err(dev, "Couldn't map stats buffer\n");
- return -1;
+ rc = dma_mapping_error(dev, stok);
+ if (rc) {
+ dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
+ return rc;
}
adapter->stats_token = stok;
@@ -655,7 +657,7 @@ static int init_rx_pools(struct net_device *netdev)
u64 num_pools;
u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
- int i, j;
+ int i, j, rc;
pool_size = adapter->req_rx_add_entries_per_subcrq;
num_pools = adapter->req_rx_queues;
@@ -674,7 +676,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!adapter->rx_pool) {
dev_err(dev, "Failed to allocate rx pools\n");
- return -1;
+ return -ENOMEM;
}
/* Set num_active_rx_pools early. If we fail below after partial
@@ -697,6 +699,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->free_map) {
dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ rc = -ENOMEM;
goto out_release;
}
@@ -705,6 +708,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
+ rc = -ENOMEM;
goto out_release;
}
}
@@ -718,8 +722,9 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size))
+ rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size);
+ if (rc)
goto out;
for (j = 0; j < rx_pool->size; ++j) {
@@ -756,7 +761,7 @@ out:
/* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map.
*/
- return -1;
+ return rc;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -817,13 +822,13 @@ static int init_one_tx_pool(struct net_device *netdev,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
- return -1;
+ return -ENOMEM;
tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map) {
kfree(tx_pool->tx_buff);
tx_pool->tx_buff = NULL;
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < pool_size; i++)
@@ -914,7 +919,7 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
- return -1;
+ return -ENOMEM;
adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
@@ -924,7 +929,7 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
- return -1;
+ return -ENOMEM;
}
/* Set num_active_tx_pools early. If we fail below after partial
@@ -1113,7 +1118,7 @@ static int ibmvnic_login(struct net_device *netdev)
retry = false;
if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
- return -1;
+ return -EACCES;
}
adapter->init_done_rc = 0;
@@ -1154,25 +1159,26 @@ static int ibmvnic_login(struct net_device *netdev)
timeout)) {
netdev_warn(netdev,
"Capabilities query timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
rc = init_sub_crqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ initialization failed\n");
- return -1;
+ return rc;
}
rc = init_sub_crq_irqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ irq initialization failed\n");
- return -1;
+ return rc;
}
} else if (adapter->init_done_rc) {
- netdev_warn(netdev, "Adapter login failed\n");
- return -1;
+ netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
+ adapter->init_done_rc);
+ return -EIO;
}
} while (retry);
@@ -1231,7 +1237,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_err(netdev, "timeout setting link state\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc == PARTIALSUCCESS) {
@@ -2042,7 +2048,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_packets++;
tx_bytes += skb->len;
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -2288,7 +2294,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* If someone else changed the adapter state
* when we dropped the rtnl, fail the reset
*/
- rc = -1;
+ rc = -EAGAIN;
goto out;
}
adapter->state = VNIC_CLOSED;
@@ -2330,10 +2336,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
+ if (rc)
goto out;
- }
/* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
@@ -2598,6 +2602,7 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
+ int num_fails = 0;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
@@ -2651,11 +2656,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- if (rc) {
- /* give backing device time to settle down */
+ if (rc)
+ num_fails++;
+ else
+ num_fails = 0;
+
+ /* If auto-priority-failover is enabled we can get
+ * back to back failovers during resets, resulting
+ * in at least two failed resets (from high-priority
+ * backing device to low-priority one and then back)
+ * If resets continue to fail beyond that, give the
+ * adapter some time to settle down before retrying.
+ */
+ if (num_fails >= 3) {
netdev_dbg(adapter->netdev,
- "[S:%s] Hard reset failed, waiting 60 secs\n",
- adapter_state_to_string(adapter->state));
+ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state),
+ num_fails);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -3072,7 +3089,9 @@ static u32 ibmvnic_get_link(struct net_device *netdev)
}
static void ibmvnic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -3092,7 +3111,9 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
}
static int ibmvnic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
@@ -3759,7 +3780,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues)
- return -1;
+ return -ENOMEM;
for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter);
@@ -3828,7 +3849,7 @@ tx_failed:
for (i = 0; i < registered_queues; i++)
release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues);
- return -1;
+ return -ENOMEM;
}
static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
@@ -3836,11 +3857,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
+ int cap_reqs;
+
+ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
+ * the PROMISC flag). Initialize this count upfront. When the tasklet
+ * receives a response to all of these, it will send the next protocol
+ * message (QUERY_IP_OFFLOAD).
+ */
+ if (!(adapter->netdev->flags & IFF_PROMISC) ||
+ adapter->promisc_supported)
+ cap_reqs = 7;
+ else
+ cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
+
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3901,44 +3936,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ } else {
+ atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
-
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3946,16 +3982,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -4187,7 +4228,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq) {
netdev_err(adapter->netdev,
"RX or TX queues are not allocated, device login failed\n");
- return -1;
+ return -ENOMEM;
}
release_login_buffer(adapter);
@@ -4307,7 +4348,7 @@ buf_map_failed:
kfree(login_buffer);
adapter->login_buf = NULL;
buf_alloc_failed:
- return -1;
+ return -ENOMEM;
}
static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
@@ -4349,118 +4390,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
+ int cap_reqs;
+
+ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
+ * upfront. When the tasklet receives a response to all of these, it
+ * can send out the next protocol messaage (REQUEST_CAPABILITY).
+ */
+ cap_reqs = 25;
+
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
- atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_crqs);
+
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4764,6 +4819,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
+ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
+ atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -4827,10 +4884,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_query_ip_offload(adapter);
- }
}
static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
@@ -5128,10 +5183,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_request_cap(adapter, 0);
- }
}
static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
@@ -5427,33 +5480,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
- bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
- /* This barrier makes sure ibmvnic_next_crq()'s
- * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
- * before ibmvnic_handle_crq()'s
- * switch(gen_crq->first) and switch(gen_crq->cmd).
- */
- dma_rmb();
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- }
- /* remain in tasklet until all
- * capabilities responses are received
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
*/
- if (!adapter->wait_capability)
- done = true;
+ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
}
- /* if capabilities CRQ's were sent in this tasklet, the following
- * tasklet must wait until all responses are received
- */
- if (atomic_read(&adapter->running_cap_crqs) != 0)
- adapter->wait_capability = true;
+
spin_unlock_irqrestore(&queue->lock, flags);
}
@@ -5628,7 +5669,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc) {
@@ -5639,7 +5680,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
- return -1;
+ return -EINVAL;
}
if (reset &&
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index b8e42f67d897..4a7a56ff74ce 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -18,8 +18,6 @@
#define IBMVNIC_NAME "ibmvnic"
#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
-#define IBMVNIC_STATS_TIMEOUT 1
-#define IBMVNIC_INIT_FAILED 2
#define IBMVNIC_OPEN_FAILED 3
/* basic structures plus 100 2k buffers */
@@ -921,7 +919,6 @@ struct ibmvnic_adapter {
int login_rsp_buf_sz;
atomic_t running_cap_crqs;
- bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0b274d8fa45b..3facb55b7161 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -327,6 +327,16 @@ config ICE_SWITCHDEV
If unsure, say N.
+config ICE_HWTS
+ bool "Support HW cross-timestamp on platforms with PTM support"
+ default y
+ depends on ICE && X86
+ help
+ Say Y to enable hardware supported cross-timestamping on platforms
+ with PCIe PTM support. The cross-timestamp is available through
+ the PTP clock driver precise cross-timestamp ioctl
+ (PTP_SYS_OFFSET_PRECISE).
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0bf3d47bb90d..4a8013f20152 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2557,7 +2557,9 @@ static int e100_set_eeprom(struct net_device *netdev,
}
static void e100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
@@ -2570,7 +2572,9 @@ static void e100_get_ringparam(struct net_device *netdev,
}
static int e100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 0a57172dfcbc..32803b0cf1e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -539,7 +539,9 @@ static void e1000_get_drvinfo(struct net_device *netdev,
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -556,7 +558,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 669060a2e6aa..3f5feb55cfba 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1953,7 +1953,8 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
- struct e1000_tx_buffer *buffer_info)
+ struct e1000_tx_buffer *buffer_info,
+ int budget)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
@@ -1966,7 +1967,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma = 0;
}
if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
+ napi_consume_skb(buffer_info->skb, budget);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
@@ -1990,7 +1991,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
netdev_reset_queue(adapter->netdev);
@@ -2958,7 +2959,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
return 0;
@@ -3856,7 +3857,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
}
}
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info,
+ 64);
tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count))
@@ -4382,7 +4384,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (!skb) {
unsigned int frag_len = e1000_frag_len(adapter);
- skb = build_skb(data - E1000_HEADROOM, frag_len);
+ skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 8515e00d1b40..b80ae9a82224 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -655,7 +655,9 @@ static void e1000_get_drvinfo(struct net_device *netdev,
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -666,7 +668,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 44e2dc8328a2..635a95927e93 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3614,10 +3614,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
- /* flags reserved for future extensions - must be zero */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 0d37f011d0ce..d53369e30040 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -502,7 +502,9 @@ static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
}
static void fm10k_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -517,7 +519,9 @@ static void fm10k_get_ringparam(struct net_device *netdev,
}
static int fm10k_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 21eff0895a7a..f6d56867f857 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -143,7 +143,7 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
* @vlan: location of buffer to store VLAN
*
* This function pulls the MAC address back out of the attribute and will
- * place it in the array pointed by by mac_addr. It will return success
+ * place it in the array pointed by mac_addr. It will return success
* if provided with a valid pointers.
**/
s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4d939af0a626..2e02cc68cd3f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -174,7 +174,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -848,12 +847,12 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- u32 tx_restart;
- u32 tx_busy;
+ u64 tx_restart;
+ u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u32 rx_buf_failed;
- u32 rx_page_failed;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 593912b17609..7abef88801fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -769,21 +769,22 @@ static bool i40e_asq_done(struct i40e_hw *hw)
}
/**
- * i40e_asq_send_command - send command to Admin Queue
+ * i40e_asq_send_command_atomic - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
@@ -910,7 +911,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- udelay(50);
+
+ if (is_atomic_context)
+ udelay(50);
+ else
+ usleep_range(40, 60);
+
total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -967,6 +973,15 @@ asq_send_command_error:
return status;
}
+i40e_status
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
+ cmd_details, false);
+}
+
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 140b677f114d..60f9e0a6aaca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0009
-#define I40E_FW_API_VERSION_MINOR_X710 0x0009
+#define I40E_FW_API_VERSION_MINOR_X722 0x000C
+#define I40E_FW_API_VERSION_MINOR_X710 0x000F
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index b4d3fed0d2f2..9ddeb015eb7e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -154,8 +154,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
return "I40E_ERR_INVALID_MAC_ADDR";
case I40E_ERR_DEVICE_NOT_SUPPORTED:
return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_MASTER_REQUESTS_PENDING:
- return "I40E_ERR_MASTER_REQUESTS_PENDING";
+ case I40E_ERR_PRIMARY_REQUESTS_PENDING:
+ return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
case I40E_ERR_INVALID_LINK_SETTINGS:
return "I40E_ERR_INVALID_LINK_SETTINGS";
case I40E_ERR_AUTONEG_NOT_COMPLETE:
@@ -2073,7 +2073,8 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -2114,7 +2115,8 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -4136,7 +4138,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings)
{
u32 fcoe_cntx_size, fcoe_filt_size;
- u32 pe_cntx_size, pe_filt_size;
u32 fcoe_fmax;
u32 val;
@@ -4180,8 +4181,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_HASH_FILTER_SIZE_256K:
case I40E_HASH_FILTER_SIZE_512K:
case I40E_HASH_FILTER_SIZE_1M:
- pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
- pe_filt_size <<= (u32)settings->pe_filt_num;
break;
default:
return I40E_ERR_PARAM;
@@ -4198,8 +4197,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_DMA_CNTX_SIZE_64K:
case I40E_DMA_CNTX_SIZE_128K:
case I40E_DMA_CNTX_SIZE_256K:
- pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
- pe_cntx_size <<= (u32)settings->pe_cntx_num;
break;
default:
return I40E_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2c1b1da1220e..1e57cc8c47d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 513ba6974355..091f36adbbe1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1916,7 +1916,9 @@ static void i40e_get_drvinfo(struct net_device *netdev,
}
static void i40e_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -1944,7 +1946,9 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
}
static int i40e_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e118cf9265c7..f70c478dafdb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
+static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ struct net_device *netdev, int delta)
+{
+ struct netdev_hw_addr *ha;
+
+ if (!f || !netdev)
+ return;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ ha->refcount += delta;
+ if (ha->refcount <= 0)
+ ha->refcount = 1;
+ break;
+ }
+ }
+}
+
/**
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
@@ -178,10 +196,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
- *
- * The search_hint trick and lack of advanced fit-finding only work
- * because we're highly likely to have all the same size lump requests.
- * Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -196,8 +210,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
- /* start the linear search with an imperfect hint */
- i = pile->search_hint;
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -216,7 +243,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
- pile->search_hint = i + j;
break;
}
@@ -239,7 +265,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
- int i;
+ u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -251,8 +277,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
- if (count && index < pile->search_hint)
- pile->search_hint = index;
return count;
}
@@ -754,9 +778,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- u32 tx_restart, tx_busy;
+ u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u32 rx_page, rx_buf;
+ u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -2036,6 +2060,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
hlist_for_each_entry_safe(new, h, from, hlist) {
/* We can simply free the wrapper structure */
hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
}
@@ -2383,6 +2408,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
&tmp_add_list,
&tmp_del_list,
vlan_filters);
+
+ hlist_for_each_entry(new, &tmp_add_list, hlist)
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
+
if (retval)
goto err_no_memory_locked;
@@ -2515,6 +2544,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (new->f->state == I40E_FILTER_NEW)
new->f->state = new->state;
hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -3891,10 +3921,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
*
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
- * irq_set_affinity_hint without making a copy.
+ * irq_update_affinity_hint without making a copy.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3905,7 +3935,7 @@ free_queue_irqs:
vector--;
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -4726,7 +4756,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
@@ -8717,6 +8747,27 @@ int i40e_open(struct net_device *netdev)
}
/**
+ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
+ * @vsi: vsi structure
+ *
+ * This updates netdev's number of tx/rx queues
+ *
+ * Returns status of setting tx/rx queues
+ **/
+static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
+{
+ int ret;
+
+ ret = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (ret)
+ return ret;
+
+ return netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+}
+
+/**
* i40e_vsi_open -
* @vsi: the VSI to open
*
@@ -8752,13 +8803,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev,
- vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(vsi->netdev,
- vsi->num_queue_pairs);
+ err = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (err)
goto err_set_queues;
@@ -10535,15 +10580,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
- /* The following delay is necessary for 4.33 firmware and older
- * to recover after EMP reset. 200 ms should suffice but we
- * put here 300 ms to be sure that FW is ready to operate
- * after reset.
- */
- mdelay(300);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -11753,7 +11792,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
- pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -12556,7 +12594,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
- pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;
@@ -14151,6 +14188,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
+ ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
+ if (ret)
+ goto err_netdev;
ret = register_netdev(vsi->netdev);
if (ret)
goto err_netdev;
@@ -15451,8 +15491,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
- dev_info(&pdev->dev,
- "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+ dev_dbg(&pdev->dev,
+ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index aaea297640e0..9241b6005ad3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -22,11 +22,15 @@ void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 09b1d5aed1c9..61e5789d78db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1205,10 +1205,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8d0588a27a05..1908eed4fa5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -413,6 +413,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 77be0702d07c..db3714a65dc7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -18,7 +18,7 @@ enum i40e_status_code {
I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_PRIMARY_REQUESTS_PENDING = -12,
I40E_ERR_INVALID_LINK_SETTINGS = -13,
I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 10a83e5385c7..66cc79500c10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2204,7 +2204,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -2322,7 +2322,7 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
result = I40E_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 2ea4deb8fc44..dfdb6e786461 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1377,6 +1377,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1390,9 +1416,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1406,7 +1434,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -1877,17 +1917,19 @@ sriov_configure_out:
/***********************virtual channel routines******************/
/**
- * i40e_vc_send_msg_to_vf
+ * i40e_vc_send_msg_to_vf_ex
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
+ * @is_quiet: true for not printing unsuccessful return values, false otherwise
*
* send msg to VF
**/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
+static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen,
+ bool is_quiet)
{
struct i40e_pf *pf;
struct i40e_hw *hw;
@@ -1903,7 +1945,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* single place to detect unsuccessful return values */
- if (v_retval) {
+ if (v_retval && !is_quiet) {
vf->num_invalid_msgs++;
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
vf->vf_id, v_opcode, v_retval);
@@ -1934,6 +1976,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
}
/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
+ msg, msglen, false);
+}
+
+/**
* i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info
* @opcode: operation code
@@ -2599,6 +2658,59 @@ error_param:
}
/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2632,6 +2744,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;
@@ -2685,16 +2803,26 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+#define I40E_MAX_MACVLAN_PER_HW 3072
+#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
+ (num_ports))
/* If the VF is not trusted restrict the number of MAC/VLAN it can program
* MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
*/
#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
#define I40E_VC_MAX_VLAN_PER_VF 16
+#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
+({ typeof(vf_num) vf_num_ = (vf_num); \
+ typeof(num_ports) num_ports_ = (num_ports); \
+ ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
+ I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
+ I40E_VC_MAX_MAC_ADDR_PER_VF; })
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
* @al: MAC address list from virtchnl
+ * @is_quiet: set true for printing msg without opcode info, false otherwise
*
* Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions:
@@ -2709,13 +2837,16 @@ error_param:
* addresses might not be accurate.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
- struct virtchnl_ether_addr_list *al)
+ struct virtchnl_ether_addr_list *al,
+ bool *is_quiet)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ struct i40e_hw *hw = &pf->hw;
int mac2add_cnt = 0;
int i;
+ *is_quiet = false;
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
@@ -2739,6 +2870,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ *is_quiet = true;
return -EPERM;
}
@@ -2752,12 +2884,26 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* number of addresses. Check to make sure that the additions do not
* push us over the limit.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (i40e_count_filters(vsi) + mac2add_cnt) >
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
+ /* If this VF is trusted, it can use more resources than untrusted.
+ * However to ensure that every trusted VF has appropriate number of
+ * resources, divide whole pool of resources per port and then across
+ * all VFs.
+ */
+ } else {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
+ hw->num_ports)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ return -EPERM;
+ }
}
return 0;
}
@@ -2775,6 +2921,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
+ bool is_quiet = false;
i40e_status ret = 0;
int i;
@@ -2791,7 +2938,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- ret = i40e_check_vf_permission(vf, al);
+ ret = i40e_check_vf_permission(vf, al, &is_quiet);
if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2829,8 +2976,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret);
+ return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0, is_quiet);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 49575a640a84..03c42fd0fea1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -19,6 +19,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index ea06e957393e..945b1bb9c6f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -176,7 +176,7 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ea88f4597a07..bb962987f300 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -22,7 +22,6 @@
struct i40e_vsi;
struct xsk_buff_pool;
-struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 3789269ce741..59806d1f7e79 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -55,7 +55,8 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
struct iavf_adapter *back;
struct net_device *netdev;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@@ -137,14 +138,24 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
- bool is_new_mac; /* filter is new, wait for PF decision */
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_mac:1; /* filter is new, wait for PF decision */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 is_primary:1; /* filter is a default VF MAC */
+ u8 padding:4;
+ };
+};
+
+#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid })
+struct iavf_vlan {
+ u16 vid;
+ u16 tpid;
};
struct iavf_vlan_filter {
struct list_head list;
- u16 vlan;
+ struct iavf_vlan vlan;
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};
@@ -177,6 +188,8 @@ enum iavf_state_t {
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
+ __IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
@@ -274,38 +287,47 @@ struct iavf_adapter {
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
- u32 aq_required;
-#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
-#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
-#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
-#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
-#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
-#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
-#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
-#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7)
-#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
-#define IAVF_FLAG_AQ_GET_CONFIG BIT(10)
+ u64 aq_required;
+#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0)
+#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT_ULL(1)
+#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2)
+#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3)
+#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4)
+#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6)
+#define IAVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7)
+#define IAVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8)
+#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
+#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT(12)
-#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13)
-#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14)
-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
-#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
-#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
-#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
-#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
-#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
-#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
-#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
-#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
-#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
-#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
-#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
+#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
+#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
+#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
+#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
+#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
+#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
+#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
+#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
+#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
+#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT_ULL(22)
+#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT_ULL(23)
+#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT_ULL(24)
+#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT_ULL(25)
+#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT_ULL(26)
+#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT_ULL(27)
+#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT_ULL(28)
+#define IAVF_FLAG_AQ_REQUEST_STATS BIT_ULL(29)
+#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS BIT_ULL(30)
+#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING BIT_ULL(31)
+#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING BIT_ULL(32)
+#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING BIT_ULL(33)
+#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING BIT_ULL(34)
+#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION BIT_ULL(35)
+#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36)
+#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
+#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
/* OS defined structs */
struct net_device *netdev;
@@ -345,6 +367,14 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN)
+#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+#define VLAN_V2_FILTERING_ALLOWED(_a) \
+ (VLAN_V2_ALLOWED((_a)) && \
+ ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
+ (_a)->vlan_v2_caps.filtering.filtering_support.inner))
+#define VLAN_FILTERING_ALLOWED(_a) \
+ (VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a)))
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
@@ -356,6 +386,7 @@ struct iavf_adapter {
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
+ struct virtchnl_vlan_caps vlan_v2_caps;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct iavf_vsi vsi;
@@ -444,6 +475,7 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
+int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter);
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
@@ -462,6 +494,9 @@ int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
+int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
+int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
@@ -497,6 +532,14 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+void
+iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ netdev_features_t prev_features,
+ netdev_features_t features);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9fa3fa99b4c2..cd4e6a22d0f9 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -551,15 +551,13 @@ init_adminq_exit:
**/
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
- enum iavf_status ret_code = 0;
-
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
iavf_shutdown_asq(hw);
iavf_shutdown_arq(hw);
- return ret_code;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 461f5237a2f8..3bb56714beb0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
**/
static int iavf_get_sset_count(struct net_device *netdev, int sset)
{
+ /* Report the maximum number queues, even if not every queue is
+ * currently configured. Since allocation of queues is in pairs,
+ * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
+ * at device creation and never changes.
+ */
+
if (sset == ETH_SS_STATS)
return IAVF_STATS_LEN +
- (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
+ (IAVF_QUEUE_STATS_LEN * 2 *
+ netdev->real_num_tx_queues);
else if (sset == ETH_SS_PRIV_FLAGS)
return IAVF_PRIV_FLAGS_STR_LEN;
else
@@ -360,17 +367,18 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
- for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
+ /* As num_active_queues describe both tx and rx queues, we can use
+ * it to iterate over rings' stats.
+ */
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct iavf_ring *ring;
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->tx_rings[i] : NULL);
+ /* Tx rings stats */
+ ring = &adapter->tx_rings[i];
iavf_add_queue_stats(&data, ring);
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->rx_rings[i] : NULL);
+ /* Rx rings stats */
+ ring = &adapter->rx_rings[i];
iavf_add_queue_stats(&data, ring);
}
rcu_read_unlock();
@@ -407,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
iavf_add_stat_strings(&data, iavf_gstrings_stats);
- /* Queues are always allocated in pairs, so we just use num_tx_queues
- * for both Tx and Rx queues.
+ /* Queues are always allocated in pairs, so we just use
+ * real_num_tx_queues for both Tx and Rx queues.
*/
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"tx", i);
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
@@ -583,12 +591,16 @@ static void iavf_get_drvinfo(struct net_device *netdev,
* iavf_get_ringparam - Get ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool extenal ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Returns current ring parameters. TX and RX rings are reported separately,
* but the number of rings is not reported.
**/
static void iavf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -602,12 +614,16 @@ static void iavf_get_ringparam(struct net_device *netdev,
* iavf_set_ringparam - Set ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool external ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Sets ring parameters. TX and RX rings are controlled separately, but the
* number of rings is not specified, so all rings get the same settings.
**/
static int iavf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
@@ -1923,7 +1939,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @key: hash key
* @hfunc: hash function to use
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -1932,19 +1948,21 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Only support toeplitz hash function */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+
+ if (!key && !indir)
return 0;
if (key)
memcpy(adapter->rss_key, key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ if (indir) {
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
+ }
return iavf_config_rss(adapter);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cfdbf8c08d18..8125b9120615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -463,14 +463,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-TxRx-%d", basename, rx_int_idx++);
+ "iavf-%s-TxRx-%u", basename, rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-rx-%d", basename, rx_int_idx++);
+ "iavf-%s-rx-%u", basename, rx_int_idx++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-tx-%d", basename, tx_int_idx++);
+ "iavf-%s-tx-%u", basename, tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
@@ -492,10 +492,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_set_affinity_hint.
+ * it's safe to use as a hint for irq_update_affinity_hint.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -505,7 +505,7 @@ free_queue_irqs:
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -557,7 +557,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
@@ -646,14 +646,17 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
* mac_vlan_list_lock.
**/
static struct
-iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
+ struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (vlan == f->vlan)
+ if (f->vlan.vid == vlan.vid &&
+ f->vlan.tpid == vlan.tpid)
return f;
}
+
return NULL;
}
@@ -665,7 +668,8 @@ iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
* Returns ptr to the filter object or NULL when no memory available.
**/
static struct
-iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f = NULL;
@@ -694,7 +698,7 @@ clearout:
* @adapter: board private structure
* @vlan: VLAN tag
**/
-static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
+static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f;
@@ -720,8 +724,55 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
u16 vid;
/* re-add all VLAN filters */
- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
- iavf_add_vlan(adapter, vid);
+ for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
+
+ for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
+}
+
+/**
+ * iavf_get_num_vlans_added - get number of VLANs added
+ * @adapter: board private structure
+ */
+static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+{
+ return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
+ bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
+}
+
+/**
+ * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
+ * @adapter: board private structure
+ *
+ * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
+ * do not impose a limit as that maintains current behavior and for
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
+ **/
+static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
+{
+ /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
+ * never been a limit on the VF driver side
+ */
+ if (VLAN_ALLOWED(adapter))
+ return VLAN_N_VID;
+ else if (VLAN_V2_ALLOWED(adapter))
+ return adapter->vlan_v2_caps.filtering.max_filters;
+
+ return 0;
+}
+
+/**
+ * iavf_max_vlans_added - check if maximum VLANs allowed already exist
+ * @adapter: board private structure
+ **/
+static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
+{
+ if (iavf_get_num_vlans_added(adapter) <
+ iavf_get_max_vlans_allowed(adapter))
+ return false;
+
+ return true;
}
/**
@@ -735,13 +786,23 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
+ if (!VLAN_FILTERING_ALLOWED(adapter))
+ return -EIO;
+
+ if (iavf_max_vlans_added(adapter)) {
+ netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
+ iavf_get_max_vlans_allowed(adapter));
return -EIO;
+ }
- if (iavf_add_vlan(adapter, vid) == NULL)
+ if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
return -ENOMEM;
- set_bit(vid, adapter->vsi.active_vlans);
+ if (proto == cpu_to_be16(ETH_P_8021Q))
+ set_bit(vid, adapter->vsi.active_cvlans);
+ else
+ set_bit(vid, adapter->vsi.active_svlans);
+
return 0;
}
@@ -756,8 +817,11 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- iavf_del_vlan(adapter, vid);
- clear_bit(vid, adapter->vsi.active_vlans);
+ iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
+ if (proto == cpu_to_be16(ETH_P_8021Q))
+ clear_bit(vid, adapter->vsi.active_cvlans);
+ else
+ clear_bit(vid, adapter->vsi.active_svlans);
return 0;
}
@@ -1152,6 +1216,86 @@ static void iavf_free_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
+ * @adapter: board private structure
+ *
+ * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
+ * stripped in certain descriptor fields. Instead of checking the offload
+ * capability bits in the hot path, cache the location the ring specific
+ * flags.
+ */
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *tx_ring = &adapter->tx_rings[i];
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ /* prevent multiple L2TAG bits being set after VFR */
+ tx_ring->flags &=
+ ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
+ rx_ring->flags &=
+ ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
+
+ if (VLAN_ALLOWED(adapter)) {
+ tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+
+ stripping_support =
+ &adapter->vlan_v2_caps.offloads.stripping_support;
+ insertion_support =
+ &adapter->vlan_v2_caps.offloads.insertion_support;
+
+ if (stripping_support->outer) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rx_ring->flags |=
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ } else if (stripping_support->inner) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rx_ring->flags |=
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ }
+
+ if (insertion_support->outer) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ tx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (insertion_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+ tx_ring->flags |=
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+ } else if (insertion_support->inner) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ tx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (insertion_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+ tx_ring->flags |=
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+ }
+ }
+ }
+}
+
+/**
* iavf_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -1212,6 +1356,8 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
adapter->num_active_queues = num_active_queues;
+ iavf_set_queue_vlan_tag_loc(adapter);
+
return 0;
err_out:
@@ -1584,6 +1730,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
{
if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
return iavf_send_vf_config_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
+ return iavf_send_vf_offload_vlan_v2_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -1717,6 +1865,39 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_del_adv_rss_cfg(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
+ iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
+ iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
+ iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
+ iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -1726,6 +1907,91 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_vlan_offload_features - set VLAN offload configuration
+ * @adapter: board private structure
+ * @prev_features: previous features used for comparison
+ * @features: updated features used for configuration
+ *
+ * Set the aq_required bit(s) based on the requested features passed in to
+ * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
+ * the watchdog if any changes are requested to expedite the request via
+ * virtchnl.
+ **/
+void
+iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ netdev_features_t prev_features,
+ netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ u16 vlan_ethertype = 0;
+ u64 aq_required = 0;
+
+ /* keep cases separate because one ethertype for offloads can be
+ * disabled at the same time as another is disabled, so check for an
+ * enabled ethertype first, then check for disabled. Default to
+ * ETH_P_8021Q so an ethertype is specified if disabling insertion and
+ * stripping.
+ */
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+ else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+ else
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (VLAN_ALLOWED(adapter)) {
+ /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
+ * stripping via virtchnl. VLAN insertion can be toggled on the
+ * netdev, but it doesn't require a virtchnl message
+ */
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ switch (vlan_ethertype) {
+ case ETH_P_8021Q:
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
+
+ if (enable_insertion)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
+ break;
+ case ETH_P_8021AD:
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
+
+ if (enable_insertion)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
+ break;
+ }
+ }
+
+ if (aq_required) {
+ adapter->aq_required |= aq_required;
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ }
+}
+
+/**
* iavf_startup - first step of driver startup
* @adapter: board private structure
*
@@ -1827,6 +2093,59 @@ err:
}
/**
+ * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
+ * @adapter: board private structure
+ */
+int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
+{
+ int i, num_req_queues = adapter->num_req_queues;
+ struct iavf_vsi *vsi = &adapter->vsi;
+
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+ return -ENODEV;
+ }
+
+ if (num_req_queues &&
+ num_req_queues > adapter->vsi_res->num_queue_pairs) {
+ /* Problem. The PF gave us fewer queues than what we had
+ * negotiated in our request. Need a reset to see if we can't
+ * get back to a working state.
+ */
+ dev_err(&adapter->pdev->dev,
+ "Requested %d queues, but PF only gave us %d.\n",
+ num_req_queues,
+ adapter->vsi_res->num_queue_pairs);
+ adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+ iavf_schedule_reset(adapter);
+
+ return -EAGAIN;
+ }
+ adapter->num_req_queues = 0;
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = adapter->vf_res->rss_key_size;
+ adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
+ } else {
+ adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
+ }
+
+ return 0;
+}
+
+/**
* iavf_init_get_resources - third step of driver startup
* @adapter: board private structure
*
@@ -1837,7 +2156,6 @@ err:
**/
static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
int err;
@@ -1855,7 +2173,7 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
err = iavf_get_vf_config(adapter);
if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
err = iavf_send_vf_config_msg(adapter);
- goto err;
+ goto err_alloc;
} else if (err == IAVF_ERR_PARAM) {
/* We only get ERR_PARAM if the device is in a very bad
* state or if we've been disabled for previous bad
@@ -1870,9 +2188,83 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
goto err_alloc;
}
- err = iavf_process_config(adapter);
+ err = iavf_parse_vf_resource_msg(adapter);
if (err)
goto err_alloc;
+
+ err = iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (err == -EOPNOTSUPP) {
+ /* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
+ * go directly to finishing initialization
+ */
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ return;
+ } else if (err) {
+ dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
+ err);
+ goto err_alloc;
+ }
+
+ /* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
+ * state accordingly
+ */
+ iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+ return;
+
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_get_offload_vlan_v2_caps - part of driver startup
+ * @adapter: board private structure
+ *
+ * Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
+ * VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
+ * not negotiated, then this state will never be entered.
+ **/
+static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
+
+ memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
+
+ ret = iavf_get_vf_vlan_v2_caps(adapter);
+ if (ret) {
+ if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ iavf_send_vf_offload_vlan_v2_msg(adapter);
+ goto err;
+ }
+
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+ return;
+err:
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_config_adapter - last part of driver startup
+ * @adapter: board private structure
+ *
+ * After all the supported capabilities are negotiated, then the
+ * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
+ */
+static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
+
+ if (iavf_process_config(adapter))
+ goto err;
+
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
@@ -1955,6 +2347,10 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
+ if (VLAN_V2_ALLOWED(adapter))
+ /* request initial VLAN offload settings */
+ iavf_set_vlan_offload_features(adapter, 0, netdev->features);
+
return;
err_mem:
iavf_free_rss(adapter);
@@ -1962,9 +2358,6 @@ err_register:
iavf_free_misc_irq(adapter);
err_sw_init:
iavf_reset_interrupt_capability(adapter);
-err_alloc:
- kfree(adapter->vf_res);
- adapter->vf_res = NULL;
err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
@@ -2013,6 +2406,18 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
+ case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
+ iavf_init_get_offload_vlan_v2_caps(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ iavf_init_config_adapter(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
case __IAVF_INIT_FAILED:
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
@@ -2046,6 +2451,7 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
@@ -2065,10 +2471,13 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_send_api_ver(adapter);
}
} else {
+ int ret = iavf_process_aq_command(adapter);
+
/* An error will be returned if no commands were
* processed; use this opportunity to update stats
+ * if the error isn't -ENOTSUPP
*/
- if (iavf_process_aq_command(adapter) &&
+ if (ret && ret != -EOPNOTSUPP &&
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
@@ -2076,16 +2485,14 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
- mutex_unlock(&adapter->crit_lock);
- return;
default:
+ mutex_unlock(&adapter->crit_lock);
return;
}
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2309,6 +2716,13 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
+ /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
+ * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
+ * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
+ * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
+ * been successfully sent and negotiated
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -2709,8 +3123,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
- if (num_qps > IAVF_MAX_REQ_QUEUES)
+ if (num_qps > adapter->num_active_queues) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot support requested number of queues\n");
return -EINVAL;
+ }
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
return ret;
@@ -2911,7 +3328,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2921,7 +3338,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2956,7 +3373,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
match.mask->vlan_id);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
@@ -2980,7 +3397,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2990,13 +3407,13 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
@@ -3017,7 +3434,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
/* src and dest IPv6 address should not be LOOPBACK
@@ -3027,7 +3444,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -3052,7 +3469,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(match.mask->src));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -3062,7 +3479,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (match.key->dst) {
@@ -3429,6 +3846,8 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (CLIENT_ENABLED(adapter)) {
iavf_notify_client_l2_params(&adapter->vsi);
@@ -3440,6 +3859,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
/**
* iavf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -3451,25 +3875,11 @@ static int iavf_set_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow enabling VLAN features when adapter is not capable
- * of VLAN offload/filtering
- */
- if (!VLAN_ALLOWED(adapter)) {
- netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- if (features & (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER))
- return -EINVAL;
- } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- adapter->aq_required |=
- IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- else
- adapter->aq_required |=
- IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
- }
+ /* trigger update on any VLAN feature change */
+ if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
+ (features & NETIF_VLAN_OFFLOAD_FEATURES))
+ iavf_set_vlan_offload_features(adapter, netdev->features,
+ features);
return 0;
}
@@ -3533,6 +3943,228 @@ out_err:
}
/**
+ * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
+ * @adapter: board private structure
+ *
+ * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * were negotiated determine the VLAN features that can be toggled on and off.
+ **/
+static netdev_features_t
+iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
+{
+ netdev_features_t hw_features = 0;
+
+ if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
+ return hw_features;
+
+ /* Enable VLAN features if supported */
+ if (VLAN_ALLOWED(adapter)) {
+ hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_caps *vlan_v2_caps =
+ &adapter->vlan_v2_caps;
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vlan_v2_caps->offloads.stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support =
+ &vlan_v2_caps->offloads.insertion_support;
+
+ if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
+ stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ } else if (stripping_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED &&
+ stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
+ insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ } else if (insertion_support->inner &&
+ insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ }
+
+ return hw_features;
+}
+
+/**
+ * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
+ * @adapter: board private structure
+ *
+ * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * were negotiated determine the VLAN features that are enabled by default.
+ **/
+static netdev_features_t
+iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
+{
+ netdev_features_t features = 0;
+
+ if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
+ return features;
+
+ if (VLAN_ALLOWED(adapter)) {
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_caps *vlan_v2_caps =
+ &adapter->vlan_v2_caps;
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vlan_v2_caps->filtering.filtering_support;
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vlan_v2_caps->offloads.stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support =
+ &vlan_v2_caps->offloads.insertion_support;
+ u32 ethertype_init;
+
+ /* give priority to outer stripping and don't support both outer
+ * and inner stripping
+ */
+ ethertype_init = vlan_v2_caps->offloads.ethertype_init;
+ if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ } else if (stripping_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ /* give priority to outer insertion and don't support both outer
+ * and inner insertion
+ */
+ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_TX;
+ } else if (insertion_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+
+ /* give priority to outer filtering and don't bother if both
+ * outer and inner filtering are enabled
+ */
+ ethertype_init = vlan_v2_caps->filtering.ethertype_init;
+ if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (filtering_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (filtering_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ } else if (filtering_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (filtering_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (filtering_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ }
+
+ return features;
+}
+
+#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
+ (!(((requested) & (feature_bit)) && \
+ !((allowed) & (feature_bit))))
+
+/**
+ * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
+ * @adapter: board private structure
+ * @requested_features: stack requested NETDEV features
+ **/
+static netdev_features_t
+iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
+ netdev_features_t requested_features)
+{
+ netdev_features_t allowed_features;
+
+ allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
+ iavf_get_netdev_vlan_features(adapter);
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_TX))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_RX))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_TX))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_RX))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_FILTER))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if ((requested_features &
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (requested_features &
+ (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
+ adapter->vlan_v2_caps.offloads.ethertype_match ==
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
+ netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ return requested_features;
+}
+
+/**
* iavf_fix_features - fix up the netdev feature bits
* @netdev: our net device
* @features: desired feature bits
@@ -3544,13 +4176,7 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (adapter->vf_res &&
- !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
- features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
-
- return features;
+ return iavf_fix_netdev_vlan_features(adapter, features);
}
static const struct net_device_ops iavf_netdev_ops = {
@@ -3602,39 +4228,11 @@ static int iavf_check_reset_complete(struct iavf_hw *hw)
int iavf_process_config(struct iavf_adapter *adapter)
{
struct virtchnl_vf_resource *vfres = adapter->vf_res;
- int i, num_req_queues = adapter->num_req_queues;
+ netdev_features_t hw_vlan_features, vlan_features;
struct net_device *netdev = adapter->netdev;
- struct iavf_vsi *vsi = &adapter->vsi;
netdev_features_t hw_enc_features;
netdev_features_t hw_features;
- /* got VF config message back from PF, now we can parse it */
- for (i = 0; i < vfres->num_vsis; i++) {
- if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
- adapter->vsi_res = &vfres->vsi_res[i];
- }
- if (!adapter->vsi_res) {
- dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
- return -ENODEV;
- }
-
- if (num_req_queues &&
- num_req_queues > adapter->vsi_res->num_queue_pairs) {
- /* Problem. The PF gave us fewer queues than what we had
- * negotiated in our request. Need a reset to see if we can't
- * get back to a working state.
- */
- dev_err(&adapter->pdev->dev,
- "Requested %d queues, but PF only gave us %d.\n",
- num_req_queues,
- adapter->vsi_res->num_queue_pairs);
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
- adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
- iavf_schedule_reset(adapter);
- return -ENODEV;
- }
- adapter->num_req_queues = 0;
-
hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -3678,19 +4276,19 @@ int iavf_process_config(struct iavf_adapter *adapter)
*/
hw_features = hw_enc_features;
- /* Enable VLAN features if supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
- hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX);
+ /* get HW VLAN features that can be toggled */
+ hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
+
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
hw_features |= NETIF_F_HW_TC;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
- netdev->hw_features |= hw_features;
+ netdev->hw_features |= hw_features | hw_vlan_features;
+ vlan_features = iavf_get_netdev_vlan_features(adapter);
- netdev->features |= hw_features;
+ netdev->features |= hw_features | vlan_features;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -3715,21 +4313,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
netdev->features &= ~NETIF_F_GSO;
}
- adapter->vsi.id = adapter->vsi_res->vsi_id;
-
- adapter->vsi.back = adapter;
- adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
- vsi->netdev = adapter->netdev;
- vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- adapter->rss_key_size = vfres->rss_key_size;
- adapter->rss_lut_size = vfres->rss_lut_size;
- } else {
- adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
- adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
- }
-
return 0;
}
@@ -3999,6 +4582,7 @@ static void iavf_remove(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ dev_info(&adapter->pdev->dev, "Removing device\n");
/* Shut down all the garbage mashers on the detention level */
iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 3525eab8e9f9..8cbe7ad1347c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -865,6 +865,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1363,7 +1366,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
net_prefetch(va);
/* build an skb around the page buffer */
- skb = build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
@@ -1468,7 +1471,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
struct iavf_rx_buffer *rx_buffer;
union iavf_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
+ u16 vlan_tag = 0;
u8 rx_ptype;
u64 qword;
@@ -1551,9 +1554,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
+ rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
+ vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
+ if (rx_desc->wb.qword2.ext_status &
+ cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
+ rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
+ vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
iavf_receive_skb(rx_ring, skb, vlan_tag);
@@ -1766,7 +1773,7 @@ tx_only:
if (likely(napi_complete_done(napi, work_done)))
iavf_update_enable_itr(vsi, q_vector);
- return min(work_done, budget - 1);
+ return min_t(int, work_done, budget - 1);
}
/**
@@ -1781,46 +1788,29 @@ tx_only:
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct iavf_ring *tx_ring,
- u32 *flags)
+static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct iavf_ring *tx_ring, u32 *flags)
{
- __be16 protocol = skb->protocol;
u32 tx_flags = 0;
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* When HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- goto out;
- }
- /* if we have a HW VLAN tag being added, default to the HW one */
- if (skb_vlan_tag_present(skb)) {
- tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
- /* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
-
- vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
- if (!vhdr)
- return -EINVAL;
+ /* stack will only request hardware VLAN insertion offload for protocols
+ * that the driver supports and has enabled
+ */
+ if (!skb_vlan_tag_present(skb))
+ return;
- protocol = vhdr->h_vlan_encapsulated_proto;
- tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
+ tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
+ if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
+ tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
+ } else {
+ dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
+ return;
}
-out:
*flags = tx_flags;
- return 0;
}
/**
@@ -2440,8 +2430,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* prepare the xmit flags */
- if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
- goto out_drop;
+ iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
+ if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
+ IAVF_TXD_CTX_QW1_CMD_SHIFT;
+ cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
+ IAVF_TX_FLAGS_VLAN_SHIFT;
+ }
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index e5b9ba42dd00..2624bf6d009e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -243,19 +243,20 @@ static inline unsigned int iavf_txd_use_count(unsigned int size)
#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define IAVF_MIN_DESC_PENDING 4
-#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
-#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
-#define IAVF_TX_FLAGS_TSO BIT(3)
-#define IAVF_TX_FLAGS_IPV4 BIT(4)
-#define IAVF_TX_FLAGS_IPV6 BIT(5)
-#define IAVF_TX_FLAGS_FCCRC BIT(6)
-#define IAVF_TX_FLAGS_FSO BIT(7)
-#define IAVF_TX_FLAGS_FD_SB BIT(9)
-#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
-#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
-#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
-#define IAVF_TX_FLAGS_VLAN_SHIFT 16
+#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
+#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
+#define IAVF_TX_FLAGS_TSO BIT(3)
+#define IAVF_TX_FLAGS_IPV4 BIT(4)
+#define IAVF_TX_FLAGS_IPV6 BIT(5)
+#define IAVF_TX_FLAGS_FCCRC BIT(6)
+#define IAVF_TX_FLAGS_FSO BIT(7)
+#define IAVF_TX_FLAGS_FD_SB BIT(9)
+#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
+#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11)
+#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define IAVF_TX_FLAGS_VLAN_SHIFT 16
struct iavf_tx_buffer {
struct iavf_tx_desc *next_to_watch;
@@ -362,6 +363,9 @@ struct iavf_ring {
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
+#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
+#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
/* stats structs */
struct iavf_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index d60bf7c21200..5ee1d118fd30 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -137,6 +137,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
@@ -155,6 +156,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
+
+ if (!VLAN_V2_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ NULL, 0);
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -235,6 +249,45 @@ out:
return err;
}
+int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ struct iavf_hw *hw = &adapter->hw;
+ struct iavf_arq_event_info event;
+ enum virtchnl_ops op;
+ enum iavf_status err;
+ u16 len;
+
+ len = sizeof(struct virtchnl_vlan_caps);
+ event.buf_len = len;
+ event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ while (1) {
+ /* When the AQ is empty, iavf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ err = iavf_clean_arq_element(hw, &event, NULL);
+ if (err)
+ goto out_alloc;
+ op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
+ break;
+ }
+
+ err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
+ if (err)
+ goto out_alloc;
+
+ memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -589,7 +642,6 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
**/
void iavf_add_vlans(struct iavf_adapter *adapter)
{
- struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct iavf_vlan_filter *f;
bool more = false;
@@ -607,48 +659,105 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
if (f->add)
count++;
}
- if (!count || !VLAN_ALLOWED(adapter)) {
+ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
- adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_vlan_filter_list)) /
- sizeof(u16);
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- more = true;
- }
- vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
+ if (VLAN_ALLOWED(adapter)) {
+ struct virtchnl_vlan_filter_list *vvfl;
+
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
+ sizeof(u16);
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ i++;
+ f->add = false;
+ if (i == count)
+ break;
+ }
+ }
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return;
- }
- vvfl->vsi_id = adapter->vsi_res->vsi_id;
- vvfl->num_elements = count;
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
- vvfl->vlan_id[i] = f->vlan;
- i++;
- f->add = false;
- if (i == count)
- break;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ } else {
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
+
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+
+ len = sizeof(*vvfl_v2) + ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
+ sizeof(struct virtchnl_vlan_filter);
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ more = true;
}
- }
- if (!more)
- adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl_v2) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
- kfree(vvfl);
+ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ vvfl_v2->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &adapter->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan;
+
+ /* give priority over outer if it's enabled */
+ if (filtering_support->outer)
+ vlan = &vvfl_v2->filters[i].outer;
+ else
+ vlan = &vvfl_v2->filters[i].inner;
+
+ vlan->tci = f->vlan.vid;
+ vlan->tpid = f->vlan.tpid;
+
+ i++;
+ f->add = false;
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
+ (u8 *)vvfl_v2, len);
+ kfree(vvfl_v2);
+ }
}
/**
@@ -659,7 +768,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
**/
void iavf_del_vlans(struct iavf_adapter *adapter)
{
- struct virtchnl_vlan_filter_list *vvfl;
struct iavf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
@@ -680,56 +788,116 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
* filters marked for removal to enable bailing out before
* sending a virtchnl message
*/
- if (f->remove && !VLAN_ALLOWED(adapter)) {
+ if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
} else if (f->remove) {
count++;
}
}
- if (!count) {
+ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
- adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_vlan_filter_list)) /
- sizeof(u16);
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- more = true;
- }
- vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
+ if (VLAN_ALLOWED(adapter)) {
+ struct virtchnl_vlan_filter_list *vvfl;
+
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
+ sizeof(u16);
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return;
- }
- vvfl->vsi_id = adapter->vsi_res->vsi_id;
- vvfl->num_elements = count;
- list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
- vvfl->vlan_id[i] = f->vlan;
- i++;
- list_del(&f->list);
- kfree(f);
- if (i == count)
- break;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ } else {
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
+
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
+
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) * sizeof(struct virtchnl_vlan_filter));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE -
+ sizeof(*vvfl_v2)) /
+ sizeof(struct virtchnl_vlan_filter);
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ more = true;
}
- }
- if (!more)
- adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl_v2) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
- kfree(vvfl);
+ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ vvfl_v2->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &adapter->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan;
+
+ /* give priority over outer if it's enabled */
+ if (filtering_support->outer)
+ vlan = &vvfl_v2->filters[i].outer;
+ else
+ vlan = &vvfl_v2->filters[i].inner;
+
+ vlan->tci = f->vlan.vid;
+ vlan->tpid = f->vlan.tpid;
+
+ list_del(&f->list);
+ kfree(f);
+ i++;
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
+ (u8 *)vvfl_v2, len);
+ kfree(vvfl_v2);
+ }
}
/**
@@ -762,15 +930,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+ adapter->netdev->name);
}
if (!flags) {
- adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
- IAVF_FLAG_ALLMULTI_ON);
- adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
- IAVF_FLAG_AQ_RELEASE_ALLMULTI);
- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+ adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
+ if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+ adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+ adapter->netdev->name);
+ }
}
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -948,6 +1124,204 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
}
+/**
+ * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
+ * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
+ */
+static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ return VIRTCHNL_VLAN_ETHERTYPE_8100;
+ case ETH_P_8021AD:
+ return VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
+ * @adapter: adapter structure
+ * @msg: message structure used for updating offloads over virtchnl to update
+ * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
+ * @offload_op: opcode used to determine which support structure to check
+ */
+static int
+iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
+ struct virtchnl_vlan_setting *msg, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ struct virtchnl_vlan_supported_caps *offload_support;
+ u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
+
+ /* reference the correct offload support structure */
+ switch (offload_op) {
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ offload_support =
+ &adapter->vlan_v2_caps.offloads.stripping_support;
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ offload_support =
+ &adapter->vlan_v2_caps.offloads.insertion_support;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
+ offload_op);
+ return -EINVAL;
+ }
+
+ /* make sure ethertype is supported */
+ if (offload_support->outer & vc_ethertype &&
+ offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ msg->outer_ethertype_setting = vc_ethertype;
+ } else if (offload_support->inner & vc_ethertype &&
+ offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ msg->inner_ethertype_setting = vc_ethertype;
+ } else {
+ dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
+ offload_op, tpid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID
+ * @offload_op: opcode used to determine which AQ required bit to clear
+ */
+static void
+iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ switch (offload_op) {
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
+ offload_op);
+ }
+}
+
+/**
+ * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
+ * @offload_op: offload_op used to make the request over virtchnl
+ */
+static void
+iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ struct virtchnl_vlan_setting *msg;
+ int len = sizeof(*msg);
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
+ offload_op, adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = offload_op;
+
+ msg = kzalloc(len, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ msg->vport_id = adapter->vsi_res->vsi_id;
+
+ /* always clear to prevent unsupported and endless requests */
+ iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
+
+ /* only send valid offload requests */
+ if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
+ iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
+ else
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+
+ kfree(msg);
+}
+
+/**
+ * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to enable VLAN stripping
+ */
+void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
+}
+
+/**
+ * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to disable VLAN stripping
+ */
+void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
+}
+
+/**
+ * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to enable VLAN insertion
+ */
+void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
+}
+
+/**
+ * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to disable VLAN insertion
+ */
+void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
+}
+
#define IAVF_MAX_SPEED_STRLEN 13
/**
@@ -1017,7 +1391,7 @@ print_link_msg:
} else if (link_speed_mbps == SPEED_UNKNOWN) {
snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
} else {
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
link_speed_mbps, "Mbps");
}
@@ -1522,7 +1896,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
- dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
@@ -1751,6 +2125,26 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_parse_vf_resource_msg(adapter);
+
+ /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
+ * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
+ * configuration
+ */
+ if (VLAN_V2_ALLOWED(adapter))
+ break;
+ /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * wasn't successfully negotiated with the PF
+ */
+ }
+ fallthrough;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
+ if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
+ memcpy(&adapter->vlan_v2_caps, msg,
+ min_t(u16, msglen,
+ sizeof(adapter->vlan_v2_caps)));
+
iavf_process_config(adapter);
/* unlock crit_lock before acquiring rtnl_lock as other
@@ -1758,13 +2152,23 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
* crit_lock
*/
mutex_unlock(&adapter->crit_lock);
+ /* VLAN capabilities can change during VFR, so make sure to
+ * update the netdev features with the new capabilities
+ */
rtnl_lock();
- netdev_update_features(adapter->netdev);
+ netdev_update_features(netdev);
rtnl_unlock();
if (iavf_lock_timeout(&adapter->crit_lock, 10000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
__FUNCTION__);
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features(adapter, 0,
+ netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b2db39ee5f85..4e16d185077d 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -112,7 +112,6 @@
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
#define ICE_CHNL_START_TC 1
-#define ICE_CHNL_MAX_TC 16
#define ICE_MAX_RESET_WAIT 20
@@ -201,6 +200,7 @@ struct ice_channel {
struct ice_aqc_vsi_props info;
u64 max_tx_rate;
u64 min_tx_rate;
+ atomic_t num_sb_fltr;
struct ice_vsi *ch_vsi;
};
@@ -503,6 +503,7 @@ struct ice_pf {
struct pci_dev *pdev;
struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
struct devlink_region *devcaps_region;
/* devlink port data */
@@ -552,6 +553,7 @@ struct ice_pf {
spinlock_t aq_wait_lock;
struct hlist_head aq_wait_list;
wait_queue_head_t aq_wait_queue;
+ bool fw_emp_reset_disabled;
wait_queue_head_t reset_wait_queue;
@@ -576,6 +578,7 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
+ u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
@@ -789,6 +792,9 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
#define ICE_FD_STAT_PF_IDX(base_idx) \
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+#define ICE_FD_STAT_CH 1
+#define ICE_FD_CH_STAT_IDX(base_idx) \
+ (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
/**
* ice_is_adq_active - any active ADQs
@@ -847,9 +853,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
-const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
+void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -860,6 +866,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
int
ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
u32 *rule_locs);
+void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 4eef3488d86f..ad1dcfa5ff65 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,6 +117,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@@ -1408,6 +1410,11 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
+#define ICE_AQC_NVM_RESET_LVL_M ICE_M(0x3, 0) /* Write reply only */
+#define ICE_AQC_NVM_POR_FLAG 0
+#define ICE_AQC_NVM_PERST_FLAG 1
+#define ICE_AQC_NVM_EMPR_FLAG 2
+#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1efc635cc0f5..1a5ece3bce79 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -6,6 +6,18 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
+ return !!rx_ring->xdp_buf;
+}
+
+static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
+ return !!rx_ring->rx_buf;
+}
+
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
+ kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
+ if (!ice_alloc_rx_buf_zc(ring))
+ return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ if (!ice_alloc_rx_buf(ring))
+ return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
@@ -759,7 +776,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u16 pf_q;
u8 tc;
@@ -804,9 +821,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ring->q_handle, 1, qg_buf, buf_len,
NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return status;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
@@ -929,7 +946,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u32 val;
/* clear cause_ena bit for disabled queues */
@@ -953,18 +970,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * active reset flow, -EBUSY is returned.
* This is not an error as the reset operation disables
* queues at the hardware level anyway.
*/
- if (status == ICE_ERR_RESET_ONGOING) {
+ if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ } else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
new file mode 100644
index 000000000000..57abd52386d0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_CGU_REGS_H_
+#define _ICE_CGU_REGS_H_
+
+#define NAC_CGU_DWORD9 0x24
+union nac_cgu_dword9 {
+ struct {
+ u32 time_ref_freq_sel : 3;
+ u32 clk_eref1_en : 1;
+ u32 clk_eref0_en : 1;
+ u32 time_ref_en : 1;
+ u32 time_sync_en : 1;
+ u32 one_pps_out_en : 1;
+ u32 clk_ref_synce_en : 1;
+ u32 clk_synce1_en : 1;
+ u32 clk_synce0_en : 1;
+ u32 net_clk_ref1_en : 1;
+ u32 net_clk_ref0_en : 1;
+ u32 clk_synce1_amp : 2;
+ u32 misc6 : 1;
+ u32 clk_synce0_amp : 2;
+ u32 one_pps_out_amp : 2;
+ u32 misc24 : 12;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD19 0x4c
+union nac_cgu_dword19 {
+ struct {
+ u32 tspll_fbdiv_intgr : 8;
+ u32 fdpll_ulck_thr : 5;
+ u32 misc15 : 3;
+ u32 tspll_ndivratio : 4;
+ u32 tspll_iref_ndivratio : 3;
+ u32 misc19 : 1;
+ u32 japll_ndivratio : 4;
+ u32 japll_iref_ndivratio : 3;
+ u32 misc27 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD22 0x58
+union nac_cgu_dword22 {
+ struct {
+ u32 fdpll_frac_div_out_nc : 2;
+ u32 fdpll_lock_int_for : 1;
+ u32 synce_hdov_int_for : 1;
+ u32 synce_lock_int_for : 1;
+ u32 fdpll_phlead_slip_nc : 1;
+ u32 fdpll_acc1_ovfl_nc : 1;
+ u32 fdpll_acc2_ovfl_nc : 1;
+ u32 synce_status_nc : 6;
+ u32 fdpll_acc1f_ovfl : 1;
+ u32 misc18 : 1;
+ u32 fdpllclk_div : 4;
+ u32 time1588clk_div : 4;
+ u32 synceclk_div : 4;
+ u32 synceclk_sel_div2 : 1;
+ u32 fdpllclk_sel_div2 : 1;
+ u32 time1588clk_sel_div2 : 1;
+ u32 misc3 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD24 0x60
+union nac_cgu_dword24 {
+ struct {
+ u32 tspll_fbdiv_frac : 22;
+ u32 misc20 : 2;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_CNTR_BIST_SETTINGS 0x344
+union tspll_cntr_bist_settings {
+ struct {
+ u32 i_irefgen_settling_time_cntr_7_0 : 8;
+ u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
+ u32 reserved195 : 5;
+ u32 i_plllock_sel_0 : 1;
+ u32 i_plllock_sel_1 : 1;
+ u32 i_plllock_cnt_6_0 : 7;
+ u32 i_plllock_cnt_10_7 : 4;
+ u32 reserved200 : 4;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_RO_BWM_LF 0x370
+union tspll_ro_bwm_lf {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 biascaldone_cri : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 m2fbdivmod_cri_7_0 : 8;
+ } field;
+ u32 val;
+};
+
+#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b3066d0fea8b..408d15a5b0e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@@ -16,10 +15,10 @@
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the HW structure.
*/
-static enum ice_status ice_set_mac_type(struct ice_hw *hw)
+static int ice_set_mac_type(struct ice_hw *hw)
{
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
- return ICE_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
switch (hw->device_id) {
case ICE_DEV_ID_E810C_BACKPLANE:
@@ -99,7 +98,7 @@ bool ice_is_e810t(struct ice_hw *hw)
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+int ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -123,21 +122,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* ice_discover_dev_caps is expected to be called before this function is
* called.
*/
-static enum ice_status
+static int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 flags;
u8 i;
cmd = &desc.params.mac_read;
if (buf_size < sizeof(*resp))
- return ICE_ERR_BUF_TOO_SHORT;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
@@ -150,7 +149,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* A single port can report up to two (LAN and WoL) addresses */
@@ -176,7 +175,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -184,18 +183,18 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -252,7 +251,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
* returns error (ENOENT), then no cage present. If no cage present, then
* connection type is backplane or BASE-T.
*/
-static enum ice_status
+static int
ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
@@ -418,7 +417,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -429,12 +428,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
@@ -556,7 +555,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
*
* Set MAC configuration (0x0603)
*/
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
@@ -565,7 +564,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd = &desc.params.set_mac_cfg;
if (max_frame_size == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
@@ -580,17 +579,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
- enum ice_status status;
+ int status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
sw = hw->switch_info;
if (!sw)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
@@ -666,17 +665,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
* ice_get_fw_log_cfg - get FW logging configuration
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
+static int ice_get_fw_log_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
- enum ice_status status;
__le16 *config;
+ int status;
u16 size;
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
@@ -738,15 +737,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
* messages from FW to SW. Interrupts are typically disabled during the device's
* initialization phase.
*/
-static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
{
struct ice_aqc_fw_logging *cmd;
- enum ice_status status = 0;
u16 i, chgs = 0, len = 0;
struct ice_aq_desc desc;
__le16 *data = NULL;
u8 actv_evnts = 0;
void *buf = NULL;
+ int status = 0;
if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
return 0;
@@ -790,7 +789,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
sizeof(*data),
GFP_KERNEL);
if (!data)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
val = i << ICE_AQC_FW_LOG_ID_S;
@@ -904,12 +903,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw(struct ice_hw *hw)
+int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u16 mac_buf_len;
void *mac_buf;
+ int status;
/* Set MAC type based on DeviceID */
status = ice_set_mac_type(hw);
@@ -956,7 +955,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->port_info), GFP_KERNEL);
if (!hw->port_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_cqinit;
}
@@ -985,7 +984,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_sched;
}
@@ -1006,7 +1005,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* need a valid SW entry point to build a Tx tree */
if (!hw->sw_entry_point_layer) {
ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
@@ -1026,7 +1025,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
@@ -1096,7 +1095,7 @@ void ice_deinit_hw(struct ice_hw *hw)
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_check_reset(struct ice_hw *hw)
+int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask;
@@ -1116,7 +1115,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
@@ -1143,7 +1142,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1156,7 +1155,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
-static enum ice_status ice_pf_reset(struct ice_hw *hw)
+static int ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
@@ -1169,7 +1168,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
/* poll on global reset currently in progress until done */
if (ice_check_reset(hw))
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
return 0;
}
@@ -1194,7 +1193,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1212,7 +1211,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
@@ -1228,7 +1227,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
val = GLGEN_RTRIG_GLOBR_M;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -1247,16 +1246,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
*
* Copies rxq context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
if (!ice_rxq_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (rxq_index > QRX_CTRL_MAX_INDEX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
@@ -1306,14 +1305,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
if (!rlan_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
rlan_ctx->prefena = 1;
@@ -1369,9 +1368,8 @@ static int
ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
- return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc,
- buf, buf_size, cd));
+ return ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1453,17 +1451,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
-static enum ice_status
+static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
- enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
+ int status;
opcode = le16_to_cpu(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
@@ -1473,7 +1471,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf) {
buf_cpy = kzalloc(buf_size, GFP_KERNEL);
if (!buf_cpy)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
@@ -1510,13 +1508,13 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd = &desc->params.res_owner;
bool lock_acquired = false;
- enum ice_status status;
+ int status;
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
@@ -1555,11 +1553,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*
* Get the firmware version (0x0001) from the admin queue commands
*/
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
struct ice_aqc_get_ver *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
resp = &desc.params.get_ver;
@@ -1590,7 +1588,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
*
* Send the driver version (0x0002) to the firmware
*/
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
@@ -1601,7 +1599,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
cmd = &desc.params.driver_ver;
if (!dv)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
@@ -1627,7 +1625,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well (0x0003).
*/
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
struct ice_aq_desc desc;
@@ -1654,12 +1652,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can
* learn of three states:
- * 1) ICE_SUCCESS - acquired lock, and can perform download package
- * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
- * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
- * successfully downloaded the package; the driver does
- * not have to download the package and can continue
- * loading
+ * 1) 0 - acquired lock, and can perform download package
+ * 2) -EIO - did not get lock, driver should fail to load
+ * 3) -EALREADY - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
*
* Note that if the caller is in an acquire lock, perform action, release lock
* phase of operation, it is possible that the FW may detect a timeout and issue
@@ -1668,14 +1666,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* will likely get an error propagated back to it indicating the Download
* Package, Update Package or the Release Resource AQ commands timed out.
*/
-static enum ice_status
+static int
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd_resp = &desc.params.res_owner;
@@ -1707,15 +1705,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_DONE) {
- return ICE_ERR_AQ_NO_WORK;
+ return -EALREADY;
}
/* invalid FW response, force a timeout immediately */
*timeout = 0;
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
}
/* If the resource is held by some other driver, the command completes
@@ -1737,7 +1735,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
*
* release common resource using the admin queue commands (0x0009)
*/
-static enum ice_status
+static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
@@ -1763,23 +1761,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
*
* This function will attempt to acquire the ownership of a resource.
*/
-enum ice_status
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout;
- enum ice_status status;
+ int status;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ /* A return code of -EALREADY means that another driver has
* previously acquired the resource and performed any necessary updates;
* in this case the caller does not obtain the resource and has no
* further work to do.
*/
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
goto ice_acquire_res_exit;
if (status)
@@ -1792,7 +1790,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
/* lock free, but no work to do */
break;
@@ -1800,15 +1798,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* lock acquired */
break;
}
- if (status && status != ICE_ERR_AQ_NO_WORK)
+ if (status && status != -EALREADY)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit:
- if (status == ICE_ERR_AQ_NO_WORK) {
+ if (status == -EALREADY) {
if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else
- ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
}
return status;
}
@@ -1822,16 +1820,15 @@ ice_acquire_res_exit:
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
- enum ice_status status;
u32 total_delay = 0;
+ int status;
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
- while ((status == ICE_ERR_AQ_TIMEOUT) &&
- (total_delay < hw->adminq.sq_cmd_timeout)) {
+ while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
mdelay(1);
status = ice_aq_release_res(hw, res, 0, NULL);
total_delay++;
@@ -1849,7 +1846,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
*
* Helper function to allocate/free resources using the admin queue commands
*/
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
@@ -1860,10 +1857,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
cmd = &desc.params.sw_res_ctrl;
if (!buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (buf_size < flex_array_size(buf, elem, num_entries))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1882,17 +1879,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to allocate resource. */
buf->num_elems = cpu_to_le16(num);
@@ -1920,16 +1917,16 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
@@ -2071,6 +2068,18 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
+ case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: pcie_reset_avoidance = %d\n", prefix,
+ caps->pcie_reset_avoidance);
+ break;
+ case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: reset_restrict_support = %d\n", prefix,
+ caps->reset_restrict_support);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2180,6 +2189,18 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
+ info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ } else {
+ /* Unknown clock frequency, so assume a (probably incorrect)
+ * default to avoid out-of-bounds look ups of frequency
+ * related information.
+ */
+ ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
+ info->clk_freq);
+ info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ }
+
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
func_p->common_cap.ieee_1588);
ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
@@ -2486,19 +2507,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
* firmware could return) to avoid this.
*/
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_cap;
if (opc != ice_aqc_opc_list_func_caps &&
opc != ice_aqc_opc_list_dev_caps)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -2517,16 +2538,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2551,16 +2572,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* Read the function capabilities and extract them into the func_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2650,9 +2671,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+int ice_get_caps(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
status = ice_discover_dev_caps(hw, &hw->dev_caps);
if (status)
@@ -2670,7 +2691,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
*
* This function is used to write MAC address to the NVM (0x0108).
*/
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
@@ -2692,7 +2713,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
-static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -2903,15 +2924,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-enum ice_status
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!cfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
@@ -2952,13 +2973,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-enum ice_status ice_update_link_info(struct ice_port_info *pi)
+int ice_update_link_info(struct ice_port_info *pi)
{
struct ice_link_status *li;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
li = &pi->phy.link_info;
@@ -2974,7 +2995,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
@@ -3070,7 +3091,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
* @cfg: PHY configuration data to set FC mode
* @req_mode: FC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode)
{
@@ -3078,7 +3099,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
u8 pause_mask = 0x0;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
switch (req_mode) {
case ICE_FC_FULL:
@@ -3117,23 +3138,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
*
* Set the requested flow control mode.
*/
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !aq_failures)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
*aq_failures = 0;
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
@@ -3258,22 +3279,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
hw = pi->hw;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false,
(ice_fw_supports_report_dflt_cfg(hw) ?
@@ -3313,7 +3334,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cfg->link_fec_opt |= pcaps->link_fec_options;
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
break;
}
@@ -3344,13 +3365,13 @@ out:
* The variable link_up is invalid if status is non zero. As a
* result of this call, link status reporting becomes enabled
*/
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
{
struct ice_phy_info *phy_info;
- enum ice_status status = 0;
+ int status = 0;
if (!pi || !link_up)
- return ICE_ERR_PARAM;
+ return -EINVAL;
phy_info = &pi->phy;
@@ -3375,7 +3396,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
*
* Sets up the link and restarts the Auto-Negotiation over the link.
*/
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
@@ -3405,7 +3426,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
*
* Set event mask (0x0613)
*/
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
@@ -3430,7 +3451,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
*
* Enable/disable loopback on a given port
*/
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
@@ -3453,7 +3474,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
*
* Set LED value for the given port (0x06e9)
*/
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
@@ -3488,17 +3509,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
*
* Read/Write SFF EEPROM (0x06EE)
*/
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || (mem_addr & 0xff00))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param;
@@ -3527,23 +3548,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
-static enum ice_status
+static int
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 *lut;
if (!params)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = params->vsi_handle;
lut = params->lut;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lut_size = params->lut_size;
lut_type = params->lut_type;
@@ -3572,7 +3593,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3607,7 +3628,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
}
fallthrough;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3626,7 +3647,7 @@ ice_aq_get_set_rss_lut_exit:
*
* get the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
return __ice_aq_get_set_rss_lut(hw, get_params, false);
@@ -3639,7 +3660,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_
*
* set the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
return __ice_aq_get_set_rss_lut(hw, set_params, true);
@@ -3654,10 +3675,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_
*
* get (0x0B04) or set (0x0B02) the RSS key per VSI
*/
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
- struct ice_aqc_get_set_rss_keys *key,
- bool set)
+static int
+__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key, bool set)
{
struct ice_aqc_get_set_rss_key *cmd_resp;
u16 key_size = sizeof(*key);
@@ -3688,12 +3708,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
key, false);
@@ -3707,12 +3727,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
*
* set the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
keys, true);
@@ -3739,7 +3759,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
* Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
* flow.
*/
-static enum ice_status
+static int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
@@ -3754,10 +3774,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
if (!qg_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
for (i = 0, list = qg_list; i < num_qgrps; i++) {
sum_size += struct_size(list, txqs, list->num_txqs);
@@ -3766,7 +3786,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sum_size)
- return ICE_ERR_PARAM;
+ return -EINVAL;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -3787,7 +3807,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*
* Disable LAN Tx queue (0x0C31)
*/
-static enum ice_status
+static int
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -3796,18 +3816,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 i, sz = 0;
+ int status;
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
if (!qg_list && !rst_src)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd->num_entries = num_qgrps;
@@ -3856,7 +3876,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do_aq:
status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
@@ -3914,8 +3934,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
cmd->num_qset_grps = num_qset_grps;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
- buf_size, cd));
+ return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
/* End of FW Admin Queue command wrappers */
@@ -4111,7 +4130,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
@@ -4141,7 +4160,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
}
@@ -4185,7 +4204,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
*
* This function adds one LAN queue
*/
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -4193,19 +4212,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (num_qgrps > 1 || buf->num_txqs > 1)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
hw = pi->hw;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4213,7 +4232,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
if (!q_ctx) {
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
q_handle);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4221,7 +4240,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4290,20 +4309,20 @@ ena_txq_exit:
*
* This function removes queues and their corresponding nodes in SW DB
*/
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
+ int status = -ENOENT;
struct ice_hw *hw;
u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
hw = pi->hw;
@@ -4315,13 +4334,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (rst_src)
return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
- return ICE_ERR_CFG;
+ return -EIO;
}
buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&pi->sched_lock);
@@ -4368,18 +4387,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
*
* This function adds/updates the VSI queues per TC.
*/
-static enum ice_status
+static int
ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
- enum ice_status status = 0;
+ int status = 0;
u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4407,7 +4426,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
*
* This function adds/updates the VSI LAN queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
@@ -4428,9 +4447,8 @@ int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
- return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
- max_rdmaqs,
- ICE_SCHED_NODE_OWNER_RDMA));
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA);
}
/**
@@ -4451,7 +4469,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
- enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
int ret;
@@ -4502,12 +4519,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
- status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
- &node);
- if (status) {
- ret = ice_status_to_errno(status);
+ ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node);
+ if (ret)
break;
- }
qset_teid[i] = le32_to_cpu(node.node_teid);
}
rdma_error_exit:
@@ -4528,8 +4543,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
- enum ice_status status = 0;
struct ice_hw *hw;
+ int status = 0;
u16 qg_size;
int i;
@@ -4568,7 +4583,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
- return ice_status_to_errno(status);
+ return status;
}
/**
@@ -4577,7 +4592,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+static int ice_replay_pre_init(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
u8 i;
@@ -4588,7 +4603,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* will allow adding rules entries back to filt_rules list,
* which is operational list.
*/
- for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
list_replace_init(&sw->recp_list[i].filt_rules,
&sw->recp_list[i].filt_replay_rules);
ice_sched_replay_agg_vsi_preinit(hw);
@@ -4604,12 +4619,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* Restore all VSI configuration after reset. It is required to call this
* function with main VSI first.
*/
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Replay pre-initialization if there is any */
if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
@@ -4725,12 +4740,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
*
* This function queries HW element information
*/
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
+ int status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
@@ -4775,7 +4790,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
cmd->param_indx = idx;
cmd->param_val = cpu_to_le32(value);
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4796,7 +4811,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
{
struct ice_aqc_driver_shared_params *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
return -EIO;
@@ -4810,7 +4825,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = le32_to_cpu(cmd->param_val);
@@ -4840,7 +4855,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4860,7 +4875,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
@@ -4869,7 +4884,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = !!cmd->gpio_val;
return 0;
@@ -4903,13 +4918,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
*
* Gets the link default override for a port
*/
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi)
{
u16 i, tlv, tlv_len, tlv_start, buf, offset;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
@@ -4994,7 +5009,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
*
* Set the LLDP MIB. (0x0A08)
*/
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -5004,7 +5019,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
@@ -5044,7 +5059,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
* @vsi_num: absolute HW index for VSI
* @add: boolean for if adding or removing a filter
*/
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 65c1b3244264..1c57097ddf0b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -14,108 +14,108 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
-enum ice_status ice_init_hw(struct ice_hw *hw);
+int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status ice_check_reset(struct ice_hw *hw);
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+int ice_check_reset(struct ice_hw *hw);
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+int ice_create_all_ctrlq(struct ice_hw *hw);
+int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
+int
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
-enum ice_status ice_update_link_info(struct ice_port_info *pi);
-enum ice_status
+int ice_update_link_info(struct ice_port_info *pi);
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
-enum ice_status
+int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
-enum ice_status ice_get_caps(struct ice_hw *hw);
+int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_e810(struct ice_hw *hw);
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
-enum ice_status
+int ice_clear_pf_cfg(struct ice_hw *hw);
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode fc);
bool
@@ -125,27 +125,27 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -159,19 +159,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
@@ -184,7 +184,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
@@ -199,11 +199,11 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 03bdb125be36..6bcfee295991 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
@@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
return 0;
@@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.desc_buf.size = size;
return 0;
}
@@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */
@@ -218,7 +218,7 @@ unwind_alloc_rq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -226,7 +226,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */
@@ -266,10 +266,10 @@ unwind_alloc_sq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
-static enum ice_status
+static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
/* Clear Head and Tail */
@@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
return 0;
}
@@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
*
* Configure base address and length registers for the transmit queue
*/
-static enum ice_status
-ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
@@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*
* Configure base address and length registers for the receive (event queue)
*/
-static enum ice_status
-ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status status;
+ int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status)
@@ -361,19 +359,19 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->sq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -421,19 +419,19 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->rq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -474,15 +472,14 @@ init_ctrlq_exit:
*
* The main shutdown routine for the Control Transmit Queue
*/
-static enum ice_status
-ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->sq_lock);
if (!cq->sq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_sq_out;
}
@@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
*
* The main shutdown routine for the Control Receive Queue
*/
-static enum ice_status
-ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->rq_lock);
if (!cq->rq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_rq_out;
}
@@ -576,17 +572,17 @@ shutdown_rq_out:
* ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure
*/
-static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+static int ice_init_check_adminq(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- enum ice_status status;
+ int status;
status = ice_aq_get_fw_ver(hw, NULL);
if (status)
goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) {
- status = ICE_ERR_FW_API_VER;
+ status = -EIO;
goto init_ctrlq_free_rq;
}
@@ -612,10 +608,10 @@ init_ctrlq_free_rq:
*
* NOTE: this function does not initialize the controlq locks
*/
-static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
- enum ice_status ret_code;
+ int ret_code;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
@@ -631,14 +627,14 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
cq->qtype = q_type;
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) {
- return ICE_ERR_CFG;
+ return -EIO;
}
/* setup SQ command write back timeout */
@@ -751,10 +747,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
*
* NOTE: this function does not initialize the controlq locks.
*/
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+int ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status status;
u32 retry = 0;
+ int status;
/* Init FW admin queue */
do {
@@ -763,7 +759,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
return status;
status = ice_init_check_adminq(hw);
- if (status != ICE_ERR_AQ_FW_CRITICAL)
+ if (status != -EIO)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
@@ -814,7 +810,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+int ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
@@ -962,7 +958,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -970,27 +966,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- enum ice_status status = 0;
struct ice_sq_cd *details;
u32 total_delay = 0;
+ int status = 0;
u16 retval = 0;
u32 val = 0;
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
- return ICE_ERR_RESET_ONGOING;
+ return -EBUSY;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
if ((buf && !buf_size) || (!buf && buf_size)) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -998,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -1011,7 +1007,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
@@ -1028,7 +1024,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/
if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
- status = ICE_ERR_AQ_FULL;
+ status = -ENOSPC;
goto sq_send_command_error;
}
@@ -1082,7 +1078,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
} else {
memcpy(buf, dma_buf->va, copy_size);
}
@@ -1098,7 +1094,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK)
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval;
}
@@ -1116,10 +1112,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
- status = ICE_ERR_AQ_FW_CRITICAL;
+ status = -EIO;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ status = -EIO;
}
}
@@ -1154,15 +1150,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
- enum ice_status ret_code = 0;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
@@ -1176,7 +1172,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
- ret_code = ICE_ERR_AQ_EMPTY;
+ ret_code = -EIO;
goto clean_rq_elem_err;
}
@@ -1185,7 +1181,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = ICE_ERR_AQ_NO_WORK;
+ ret_code = -EALREADY;
goto clean_rq_elem_out;
}
@@ -1196,7 +1192,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
- ret_code = ICE_ERR_AQ_ERROR;
+ ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 241427cd9bc0..0b146a0d4205 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -19,19 +18,19 @@
*
* Requests the complete LLDP MIB (entire packet). (0x0A00)
*/
-static enum ice_status
+static int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
@@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01)
*/
-static enum ice_status
+static int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
@@ -89,7 +88,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
@@ -117,8 +116,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc;
@@ -598,18 +596,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-static enum ice_status
-ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
- enum ice_status ret = 0;
u16 offset = 0;
+ int ret = 0;
u16 typelen;
u16 type;
u16 len;
if (!lldpmib || !dcbcfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
@@ -649,17 +646,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
*
* Query DCB configuration from the firmware
*/
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
- enum ice_status ret;
u8 *lldpmib;
+ int ret;
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
@@ -684,17 +681,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
u16 opcode;
+ int status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -724,7 +721,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*
* Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/
-static enum ice_status
+static int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
@@ -749,7 +746,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
@@ -762,7 +759,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
@@ -903,14 +900,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*
* Get IEEE or CEE mode DCB configuration from the Firmware
*/
-static enum ice_status
-ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{
struct ice_dcbx_cfg *dcbx_cfg = NULL;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -943,14 +939,14 @@ out:
*
* Get DCB configuration from the Firmware
*/
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+int ice_get_dcb_cfg(struct ice_port_info *pi)
{
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
@@ -974,13 +970,13 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*
* Update DCB configuration from the Firmware
*/
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret = 0;
+ int ret = 0;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true;
@@ -996,7 +992,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret;
qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
}
/* Configure the LLDP MIB change event */
@@ -1016,19 +1012,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*
* Configure (disable/enable) MIB
*/
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret;
+ int ret;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
@@ -1469,16 +1465,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
*
* Set DCB configuration to the Firmware
*/
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+int ice_set_dcb_cfg(struct ice_port_info *pi)
{
u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg;
- enum ice_status ret;
struct ice_hw *hw;
u16 miblen;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -1487,7 +1483,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
@@ -1511,17 +1507,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
*
* query current port ETS configuration
*/
-static enum ice_status
+static int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
@@ -1537,18 +1533,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
*
* update the SW DB with the new TC changes
*/
-static enum ice_status
+static int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem;
- enum ice_status status = 0;
u32 teid1, teid2;
+ int status = 0;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
@@ -1605,12 +1601,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the
* SW DB with the TC changes
*/
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index 9b6f87a889a6..d73348f279f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -138,28 +138,27 @@ struct ice_cee_app_prio {
} __packed;
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
-enum ice_status
+int ice_get_dcb_cfg(struct ice_port_info *pi);
+int ice_set_dcb_cfg(struct ice_port_info *pi);
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
#else /* CONFIG_DCB */
-static inline enum ice_status
+static inline int
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
bool __always_unused persist,
@@ -168,7 +167,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
@@ -176,7 +175,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
bool __always_unused start_dcbx_agent,
bool *dcbx_agent_status,
@@ -187,7 +186,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
bool __always_unused ena_mib)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a72e18320a22..b94d8daeaa58 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -528,7 +528,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
struct ice_dcbx_cfg *err_cfg;
- enum ice_status ret;
+ int ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index b9bd9f9472f6..a230edb38466 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <linux/vmalloc.h>
+
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
@@ -39,13 +41,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
if (status)
/* We failed to locate the PBA, so just skip this entry */
- dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
- ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
+ status);
}
static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -251,7 +253,6 @@ static int ice_devlink_info_get(struct devlink *devlink,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_info_ctx *ctx;
- enum ice_status status;
size_t i;
int err;
@@ -266,20 +267,19 @@ static int ice_devlink_info_get(struct devlink *devlink,
return -ENOMEM;
/* discover capabilities first */
- status = ice_discover_dev_caps(hw, &ctx->dev_caps);
- if (status) {
- dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
- err = -EIO;
goto out_free_ctx;
}
if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
- status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -287,10 +287,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
- status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
- if (status) {
- dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -298,10 +298,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
- status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -373,63 +373,225 @@ out_free_ctx:
}
/**
- * ice_devlink_flash_update - Update firmware stored in flash on the device
- * @devlink: pointer to devlink associated with device to update
- * @params: flash update parameters
+ * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
+ * @limit: limits on what reload should do, such as not resetting
* @extack: netlink extended ACK structure
*
- * Perform a device flash update. The bulk of the update logic is contained
- * within the ice_flash_pldm_image function.
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
*
- * Returns: zero on success, or an error code on failure.
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink,
- struct devlink_flash_update_params *params,
- struct netlink_ext_ack *extack)
+ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- u8 preservation;
+ u8 pending;
int err;
- if (!params->overwrite_mask) {
- /* preserve all settings and identifiers */
- preservation = ICE_AQC_NVM_PRESERVE_ALL;
- } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
- /* overwrite settings, but preserve the vital device identifiers */
- preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
- } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
- DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
- /* overwrite both settings and identifiers, preserve nothing */
- preservation = ICE_AQC_NVM_NO_PRESERVATION;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
- return -EOPNOTSUPP;
+ err = ice_get_pending_updates(pf, &pending, extack);
+ if (err)
+ return err;
+
+ /* pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
- NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
- return -EOPNOTSUPP;
+ if (pf->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
}
- err = ice_check_for_pending_update(pf, NULL, extack);
- if (err)
+ dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
+
+ err = ice_aq_nvm_update_empr(hw);
+ if (err) {
+ dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
return err;
+ }
- devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ return 0;
+}
- return ice_flash_pldm_image(pf, params->fw, preservation, extack);
+/**
+ * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
+ * @devlink: pointer to the devlink instance reloading
+ * @action: the action requested
+ * @limit: limits imposed by userspace, such as not resetting
+ * @actions_performed: on return, indicate what actions actually performed
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for driver to finish rebuilding after EMP reset is completed. This
+ * includes time to wait for both the actual device reset as well as the time
+ * for the driver's rebuild to complete.
+ */
+static int
+ice_devlink_reload_empr_finish(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+
+ err = ice_wait_for_reset(pf, 60 * HZ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
+ return err;
+ }
+
+ return 0;
}
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ /* The ice driver currently does not support driver reinit */
+ .reload_down = ice_devlink_reload_empr_start,
+ .reload_up = ice_devlink_reload_empr_finish,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
+static int
+ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool roce_ena = ctx->val.vbool;
+ int ret;
+
+ if (!roce_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool iw_ena = ctx->val.vbool;
+ int ret;
+
+ if (!iw_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ice_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_roce_get,
+ ice_devlink_enable_roce_set,
+ ice_devlink_enable_roce_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_iw_get,
+ ice_devlink_enable_iw_set,
+ ice_devlink_enable_iw_validate),
+
+};
+
static void ice_devlink_free(void *devlink_ptr)
{
devlink_free((struct devlink *)devlink_ptr);
@@ -470,6 +632,7 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
}
@@ -484,6 +647,36 @@ void ice_devlink_unregister(struct ice_pf *pf)
devlink_unregister(priv_to_devlink(pf));
}
+int ice_devlink_register_params(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+ if (err)
+ return err;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
+
+ return 0;
+}
+
+void ice_devlink_unregister_params(struct ice_pf *pf)
+{
+ devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+}
+
/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
@@ -597,16 +790,20 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
}
/**
- * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
+ * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
* This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
- * the shadow-ram devlink region. It captures a snapshot of the shadow ram
- * contents. This snapshot can later be viewed via the devlink-region
- * interface.
+ * the nvm-flash devlink region. It captures a snapshot of the full NVM flash
+ * contents, including both banks of flash. This snapshot can later be viewed
+ * via the devlink-region interface.
+ *
+ * It captures the flash using the FLASH_ONLY bit set when reading via
+ * firmware, so it does not read the current Shadow RAM contents. For that,
+ * use the shadow-ram region.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
* error code on failure.
@@ -618,9 +815,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *nvm_data;
u32 nvm_size;
+ int status;
nvm_size = hw->flash.flash_size;
nvm_data = vzalloc(nvm_size);
@@ -633,7 +830,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
vfree(nvm_data);
- return -EIO;
+ return status;
}
status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
@@ -643,7 +840,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
vfree(nvm_data);
- return -EIO;
+ return status;
}
ice_release_nvm(hw);
@@ -654,6 +851,66 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
}
/**
+ * ice_devlink_sram_snapshot - Capture a snapshot of the Shadow RAM contents
+ * @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the shadow-ram devlink region. It captures a snapshot of the shadow ram
+ * contents. This snapshot can later be viewed via the devlink-region
+ * interface.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int
+ice_devlink_sram_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops __always_unused *ops,
+ struct netlink_ext_ack *extack, u8 **data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u8 *sram_data;
+ u32 sram_size;
+ int err;
+
+ sram_size = hw->flash.sr_words * 2u;
+ sram_data = vzalloc(sram_size);
+ if (!sram_data)
+ return -ENOMEM;
+
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ err, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(sram_data);
+ return err;
+ }
+
+ /* Read from the Shadow RAM, rather than directly from NVM */
+ err = ice_read_flat_nvm(hw, 0, &sram_size, sram_data, true);
+ if (err) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ sram_size, err, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read Shadow RAM contents");
+ ice_release_nvm(hw);
+ vfree(sram_data);
+ return err;
+ }
+
+ ice_release_nvm(hw);
+
+ *data = sram_data;
+
+ return 0;
+}
+
+/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
* @ops: the devlink region being snapshotted
@@ -675,8 +932,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *devcaps;
+ int status;
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
if (!devcaps)
@@ -689,7 +946,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
vfree(devcaps);
- return -EIO;
+ return status;
}
*data = (u8 *)devcaps;
@@ -703,6 +960,12 @@ static const struct devlink_region_ops ice_nvm_region_ops = {
.snapshot = ice_devlink_nvm_snapshot,
};
+static const struct devlink_region_ops ice_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = vfree,
+ .snapshot = ice_devlink_sram_snapshot,
+};
+
static const struct devlink_region_ops ice_devcaps_region_ops = {
.name = "device-caps",
.destructor = vfree,
@@ -720,7 +983,7 @@ void ice_devlink_init_regions(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
- u64 nvm_size;
+ u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
@@ -731,6 +994,15 @@ void ice_devlink_init_regions(struct ice_pf *pf)
pf->nvm_region = NULL;
}
+ sram_size = pf->hw.flash.sr_words * 2u;
+ pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(pf->sram_region)) {
+ dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(pf->sram_region));
+ pf->sram_region = NULL;
+ }
+
pf->devcaps_region = devlink_region_create(devlink,
&ice_devcaps_region_ops, 10,
ICE_AQ_MAX_BUF_LEN);
@@ -751,6 +1023,10 @@ void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
+
+ if (pf->sram_region)
+ devlink_region_destroy(pf->sram_region);
+
if (pf->devcaps_region)
devlink_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index b7f9551e4fc4..fe006d9946f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,6 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
+int ice_devlink_register_params(struct ice_pf *pf);
+void ice_devlink_unregister_params(struct ice_pf *pf);
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index d1d7389b0bff..864692b157b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -10,6 +10,100 @@
#include "ice_tc_lib.h"
/**
+ * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
+ * @pf: pointer to PF struct
+ * @vf: pointer to VF struct
+ * @mac: VF's MAC address
+ *
+ * This function adds advanced rule that forwards packets with
+ * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ */
+int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_adv_lkup_elem *list;
+ struct ice_hw *hw = &pf->hw;
+ const u16 lkups_cnt = 1;
+ int err;
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
+ rule_info.rx = false;
+ rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
+ ctrl_vsi->rxq_map[vf->vf_id];
+ rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
+ rule_info.flags_info.act_valid = true;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
+ vf->repr->mac_rule);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
+ vf->vf_id);
+ else
+ vf->repr->rule_added = true;
+
+ kfree(list);
+ return err;
+}
+
+/**
+ * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
+ * @vf: pointer to vF struct
+ *
+ * This function replays VF's MAC rule after reset.
+ */
+void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
+{
+ int err;
+
+ if (!ice_is_switchdev_running(vf->pf))
+ return;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
+ vf->hw_lan_addr.addr);
+ if (err) {
+ dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ vf->hw_lan_addr.addr, vf->vf_id, err);
+ return;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ }
+}
+
+/**
+ * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
+ * @vf: pointer to the VF struct
+ *
+ * Delete the advanced rule that was used to forward packets with the VF's MAC
+ * address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ */
+void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
+{
+ if (!ice_is_switchdev_running(vf->pf))
+ return;
+
+ if (!vf->repr->rule_added)
+ return;
+
+ ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
+ vf->repr->rule_added = false;
+}
+
+/**
* ice_eswitch_setup_env - configure switchdev HW filters
* @pf: pointer to PF struct
*
@@ -21,7 +115,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- struct ice_port_info *pi = pf->hw.port_info;
bool rule_added = false;
ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
@@ -42,29 +135,17 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
rule_added = true;
}
- if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
- goto err_def_tx;
-
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
- if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
- ICE_FLTR_TX,
- ICE_SINGLE_ACT_LB_ENABLE))
- goto err_update_action;
-
return 0;
-err_update_action:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
- ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
-err_def_tx:
if (rule_added)
ice_clear_dflt_vsi(uplink_vsi->vsw);
err_def_rx:
@@ -167,21 +248,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
netif_keep_dst(vf->repr->netdev);
}
- kfree(ctrl_vsi->target_netdevs);
-
- ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
- sizeof(*ctrl_vsi->target_netdevs),
- GFP_KERNEL);
- if (!ctrl_vsi->target_netdevs)
- goto err;
-
ice_for_each_vf(pf, i) {
struct ice_repr *repr = pf->vf[i].repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
-
dst = repr->dst;
dst->u.port_info.port_id = vsi->vsi_num;
dst->u.port_info.lower_dev = repr->netdev;
@@ -214,7 +285,6 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
int i;
- kfree(ctrl_vsi->target_netdevs);
ice_for_each_vf(pf, i) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
@@ -320,7 +390,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
ice_clear_dflt_vsi(uplink_vsi->vsw);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -375,24 +444,6 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
}
/**
- * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
- * @vsi: VSI to setup rxdid on
- * @rxdid: flex descriptor id
- */
-static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int i;
-
- ice_for_each_rxq(vsi, i) {
- struct ice_rx_ring *ring = vsi->rx_rings[i];
- u16 pf_q = vsi->rxq_map[ring->q_index];
-
- ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- }
-}
-
-/**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure
*/
@@ -425,8 +476,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_enable(pf);
- ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
-
return 0;
err_setup_reprs:
@@ -448,6 +497,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_disable(pf);
ice_eswitch_release_env(pf);
+ ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
@@ -497,34 +547,6 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
/**
- * ice_eswitch_get_target_netdev - return port representor netdev
- * @rx_ring: pointer to Rx ring
- * @rx_desc: pointer to Rx descriptor
- *
- * When working in switchdev mode context (when control VSI is used), this
- * function returns netdev of appropriate port representor. For non-switchdev
- * context, regular netdev associated with Rx ring is returned.
- */
-struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
-{
- struct ice_32b_rx_flex_desc_nic_2 *desc;
- struct ice_vsi *vsi = rx_ring->vsi;
- struct ice_vsi *control_vsi;
- u16 target_vsi_id;
-
- control_vsi = vsi->back->switchdev.control_vsi;
- if (vsi != control_vsi)
- return rx_ring->netdev;
-
- desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
- target_vsi_id = le16_to_cpu(desc->src_vsi);
-
- return vsi->target_netdevs[target_vsi_id];
-}
-
-/**
* ice_eswitch_mode_get - get current eswitch mode
* @devlink: pointer to devlink structure
* @mode: output parameter for current eswitch mode
@@ -648,7 +670,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
return status;
ice_eswitch_napi_enable(pf);
- ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
ice_eswitch_start_all_tx_queues(pf);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 364cd2a79c37..bd58d9d2e565 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -20,10 +20,11 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
-
-struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc);
+int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
+ const u8 *mac);
+void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
+void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
@@ -33,6 +34,15 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
+static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }
+
+static inline int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
+ const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
@@ -67,13 +77,6 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
return false;
}
-static inline struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
-{
- return rx_ring->netdev;
-}
-
static inline netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 572519e402f4..e2e3ef7fba7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -270,9 +270,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
- int ret = 0;
+ int ret;
u8 *buf;
dev = ice_pf_to_dev(pf);
@@ -285,22 +284,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!buf)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret) {
+ dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
- status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
- false);
- if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
+ false);
+ if (ret) {
+ dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -342,14 +337,14 @@ static bool ice_active_vfs(struct ice_pf *pf)
static u64 ice_link_test(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- enum ice_status status;
bool link_up = false;
+ int status;
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "link query error, status = %d\n",
+ status);
return 1;
}
@@ -1052,8 +1047,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
- enum ice_status status;
- int err = 0;
+ int err;
pi = vsi->port_info;
@@ -1079,12 +1073,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ caps, NULL);
+ if (err)
goto done;
- }
/* Set supported/configured FEC modes based on PHY capability */
if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
@@ -1203,7 +1195,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
- enum ice_status status;
+ int status;
/* Disable FW LLDP engine */
status = ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -1232,8 +1224,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
} else {
- enum ice_status status;
bool dcbx_agent_status;
+ int status;
if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
@@ -1288,8 +1280,10 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- ice_down(vsi);
- ice_up(vsi);
+ if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ice_down(vsi);
+ ice_up(vsi);
+ }
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
@@ -1938,8 +1932,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
- int err = 0;
+ int err;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
@@ -1990,12 +1983,10 @@ ice_get_link_ksettings(struct net_device *netdev,
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
+ if (err)
goto done;
- }
/* Set the advertised flow control based on the PHY capability */
if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
@@ -2027,12 +2018,10 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
+ if (err)
goto done;
- }
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
@@ -2210,11 +2199,10 @@ ice_set_link_ksettings(struct net_device *netdev,
struct ice_pf *pf = np->vsi->back;
struct ice_port_info *pi;
u8 autoneg_changed = 0;
- enum ice_status status;
u64 phy_type_high = 0;
u64 phy_type_low = 0;
- int err = 0;
bool linkup;
+ int err;
pi = np->vsi->port_info;
@@ -2234,15 +2222,13 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Get the PHY capabilities based on media */
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- phy_caps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ phy_caps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- phy_caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ phy_caps, NULL);
+ if (err)
goto done;
- }
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
@@ -2308,11 +2294,9 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Call to get the current link speed */
pi->phy.get_link_info = true;
- status = ice_get_link_status(pi, &linkup);
- if (status) {
- err = -EIO;
+ err = ice_get_link_status(pi, &linkup);
+ if (err)
goto done;
- }
curr_link_speed = pi->phy.link_info.link_speed;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
@@ -2381,10 +2365,9 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
- if (status) {
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
+ if (err) {
netdev_info(netdev, "Set phy config failed,\n");
- err = -EIO;
goto done;
}
@@ -2522,9 +2505,9 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
u64 hashed_flds;
+ int status;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2550,9 +2533,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EINVAL;
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return status;
}
return 0;
@@ -2686,7 +2669,9 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
static void
-ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2704,7 +2689,9 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
}
static int
-ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *xdp_rings = NULL;
@@ -2949,7 +2936,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status status;
+ int status;
/* Initialize pause params */
pause->rx_pause = 0;
@@ -2999,11 +2986,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
- enum ice_status status;
u8 aq_failures;
bool link_up;
- int err = 0;
u32 is_an;
+ int err;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -3029,11 +3015,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
kfree(pcaps);
- return -EIO;
+ return err;
}
is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
@@ -3069,22 +3055,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EINVAL;
/* Set the FC mode and only restart AN if link is up */
- status = ice_set_fc(pi, &aq_failures, link_up);
+ err = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3924,16 +3907,16 @@ ice_get_module_info(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
u8 sff8636_rev = 0;
u8 value = 0;
+ int status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
0, &value, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
switch (value) {
case ICE_MODULE_TYPE_SFP:
@@ -3941,12 +3924,12 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_SFF_8472_COMP, 0x00, 0,
&sff8472_comp, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
&sff8472_swap, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -3966,7 +3949,7 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_REVISION_ADDR, 0x00, 0,
&sff8636_rev, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
/* Check revision compliance */
if (sff8636_rev > 0x02) {
/* Module is SFF-8636 compliant */
@@ -4001,11 +3984,11 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
bool is_sfp = false;
unsigned int i, j;
u16 offset = 0;
u8 page = 0;
+ int status;
if (!ee || !ee->len || !data)
return -EINVAL;
@@ -4013,7 +3996,7 @@ ice_get_module_eeprom(struct net_device *netdev,
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
NULL);
if (status)
- return -EIO;
+ return status;
if (value[0] == ICE_MODULE_TYPE_SFP)
is_sfp = true;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index b6e7f47c8c78..5d10c4f84a36 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_fdir.h"
#include "ice_flow.h"
static struct in6_addr full_ipv6_addr_mask = {
@@ -205,7 +206,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
else
- fsp->ring_cookie = rule->q_index;
+ fsp->ring_cookie = rule->orig_q_index;
idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
@@ -257,6 +258,80 @@ release_lock:
}
/**
+ * ice_fdir_remap_entries - update the FDir entries in profile
+ * @prof: FDir structure pointer
+ * @tun: tunneled or non-tunneled packet
+ * @idx: FDir entry index
+ */
+static void
+ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
+{
+ if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
+ int i;
+
+ for (i = idx; i < (prof->cnt - 1); i++) {
+ u64 old_entry_h;
+
+ old_entry_h = prof->entry_h[i + 1][tun];
+ prof->entry_h[i][tun] = old_entry_h;
+ prof->vsi_h[i] = prof->vsi_h[i + 1];
+ }
+
+ prof->entry_h[i][tun] = 0;
+ prof->vsi_h[i] = 0;
+ }
+}
+
+/**
+ * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
+ * @hw: hardware structure containing filter list
+ * @vsi_idx: VSI handle
+ */
+void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
+{
+ int status, flow;
+
+ if (!hw->fdir_prof)
+ return;
+
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
+ int tun, i;
+
+ if (!prof || !prof->cnt)
+ continue;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ u64 prof_id;
+
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+
+ for (i = 0; i < prof->cnt; i++) {
+ if (prof->vsi_h[i] != vsi_idx)
+ continue;
+
+ prof->entry_h[i][tun] = 0;
+ prof->vsi_h[i] = 0;
+ break;
+ }
+
+ /* after clearing FDir entries update the remaining */
+ ice_fdir_remap_entries(prof, tun, i);
+
+ /* find flow profile corresponding to prof_id and clear
+ * vsi_idx from bitmap.
+ */
+ status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
+ status);
+ }
+ }
+ prof->cnt--;
+ }
+}
+
+/**
* ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
* @hw: hardware structure containing the filter list
* @blk: hardware block
@@ -514,6 +589,28 @@ ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
}
/**
+ * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
+ * @prof: pointer to flow director HW profile
+ * @vsi_idx: vsi_idx to locate
+ *
+ * return the index of the vsi_idx. if vsi_idx is not found insert it
+ * into the vsi_h table.
+ */
+static u16
+ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
+{
+ u16 idx = 0;
+
+ for (idx = 0; idx < prof->cnt; idx++)
+ if (prof->vsi_h[idx] == vsi_idx)
+ return idx;
+
+ if (idx == prof->cnt)
+ prof->vsi_h[prof->cnt++] = vsi_idx;
+ return idx;
+}
+
+/**
* ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
* @pf: pointer to the PF structure
* @seg: protocol header description pointer
@@ -530,11 +627,12 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *hw_prof;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u64 entry1_h = 0;
u64 entry2_h = 0;
+ bool del_last;
u64 prof_id;
int err;
+ int idx;
main_vsi = ice_get_main_vsi(pf);
if (!main_vsi)
@@ -581,24 +679,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* actions (NULL) and zero actions 0.
*/
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
- if (status)
- return ice_status_to_errno(status);
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (err)
+ return err;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (err)
goto err_prof;
- }
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (err)
goto err_entry;
- }
hw_prof->fdir_seg[tun] = seg;
hw_prof->entry_h[0][tun] = entry1_h;
@@ -608,8 +702,60 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
if (!hw_prof->cnt)
hw_prof->cnt = 2;
+ for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
+ u16 vsi_idx;
+ u16 vsi_h;
+
+ if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
+ continue;
+
+ entry1_h = 0;
+ vsi_h = main_vsi->tc_map_vsi[idx]->idx;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ main_vsi->idx, vsi_h,
+ ICE_FLOW_PRIO_NORMAL, seg,
+ &entry1_h);
+ if (err) {
+ dev_err(dev, "Could not add Channel VSI %d to flow group\n",
+ idx);
+ goto err_unroll;
+ }
+
+ vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
+ main_vsi->tc_map_vsi[idx]->idx);
+ hw_prof->entry_h[vsi_idx][tun] = entry1_h;
+ }
+
return 0;
+err_unroll:
+ entry1_h = 0;
+ hw_prof->fdir_seg[tun] = NULL;
+
+ /* The variable del_last will be used to determine when to clean up
+ * the VSI group data. The VSI data is not needed if there are no
+ * segments.
+ */
+ del_last = true;
+ for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
+ if (hw_prof->fdir_seg[idx]) {
+ del_last = false;
+ break;
+ }
+
+ for (idx = 0; idx < hw_prof->cnt; idx++) {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
+
+ if (!hw_prof->entry_h[idx][tun])
+ continue;
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
+ hw_prof->entry_h[idx][tun] = 0;
+ if (del_last)
+ hw_prof->vsi_h[idx] = 0;
+ }
+ if (del_last)
+ hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
@@ -1174,6 +1320,31 @@ err_exit:
}
/**
+ * ice_update_per_q_fltr
+ * @vsi: ptr to VSI
+ * @q_index: queue index
+ * @inc: true to increment or false to decrement per queue filter count
+ *
+ * This function is used to keep track of per queue sideband filters
+ */
+static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
+{
+ struct ice_rx_ring *rx_ring;
+
+ if (!vsi->num_rxq || q_index >= vsi->num_rxq)
+ return;
+
+ rx_ring = vsi->rx_rings[q_index];
+ if (!rx_ring || !rx_ring->ch)
+ return;
+
+ if (inc)
+ atomic_inc(&rx_ring->ch->num_sb_fltr);
+ else
+ atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
+}
+
+/**
* ice_fdir_write_fltr - send a flow director filter to the hardware
* @pf: PF data structure
* @input: filter structure
@@ -1190,7 +1361,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
struct ice_hw *hw = &pf->hw;
struct ice_fltr_desc desc;
struct ice_vsi *ctrl_vsi;
- enum ice_status status;
u8 *pkt, *frag_pkt;
bool has_frag;
int err;
@@ -1209,11 +1379,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
}
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (err)
goto err_free_all;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
if (err)
goto err_free_all;
@@ -1223,12 +1391,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
if (has_frag) {
/* does not return error */
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
- is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (err)
goto err_frag;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
if (err)
goto err_frag;
@@ -1324,13 +1490,32 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf)
}
/**
+ * ice_fdir_del_all_fltrs - Delete all flow director filters
+ * @vsi: the VSI being changed
+ *
+ * This function needs to be called while holding hw->fdir_fltr_lock
+ */
+void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
+{
+ struct ice_fdir_fltr *f_rule, *tmp;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
+ ice_fdir_write_all_fltr(pf, f_rule, false);
+ ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
+ list_del(&f_rule->fltr_node);
+ devm_kfree(ice_pf_to_dev(pf), f_rule);
+ }
+}
+
+/**
* ice_vsi_manage_fdir - turn on/off flow director
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
*/
void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
{
- struct ice_fdir_fltr *f_rule, *tmp;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_fltr_ptype flow;
@@ -1344,13 +1529,8 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
mutex_lock(&hw->fdir_fltr_lock);
if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
goto release_lock;
- list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
- /* ignore return value */
- ice_fdir_write_all_fltr(pf, f_rule, false);
- ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
- list_del(&f_rule->fltr_node);
- devm_kfree(ice_hw_to_dev(hw), f_rule);
- }
+
+ ice_fdir_del_all_fltrs(vsi);
if (hw->fdir_prof)
for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
@@ -1401,18 +1581,25 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
{
struct ice_fdir_fltr *old_fltr;
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
int err = -ENOENT;
/* Do not update filters during reset */
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EINVAL;
+
old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
if (old_fltr) {
err = ice_fdir_write_all_fltr(pf, old_fltr, false);
if (err)
return err;
ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
/* we just deleted the last filter of flow_type so we
* should also delete the HW filter info.
@@ -1424,6 +1611,8 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
if (!input)
return err;
ice_fdir_list_add_fltr(hw, input);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, input->orig_q_index, true);
ice_fdir_update_cntrs(hw, input->flow_type, true);
return 0;
}
@@ -1463,6 +1652,39 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_update_ring_dest_vsi - update dest ring and dest VSI
+ * @vsi: pointer to target VSI
+ * @dest_vsi: ptr to dest VSI index
+ * @ring: ptr to dest ring
+ *
+ * This function updates destination VSI and queue if user specifies
+ * target queue which falls in channel's (aka ADQ) queue region
+ */
+static void
+ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
+{
+ struct ice_channel *ch;
+
+ list_for_each_entry(ch, &vsi->ch_list, list) {
+ if (!ch->ch_vsi)
+ continue;
+
+ /* make sure to locate corresponding channel based on "queue"
+ * specified
+ */
+ if ((*ring < ch->base_q) ||
+ (*ring >= (ch->base_q + ch->num_rxq)))
+ continue;
+
+ /* update the dest_vsi based on channel */
+ *dest_vsi = ch->ch_vsi->idx;
+
+ /* update the "ring" to be correct based on channel */
+ *ring -= ch->base_q;
+ }
+}
+
+/**
* ice_set_fdir_input_set - Set the input set for Flow Director
* @vsi: pointer to target VSI
* @fsp: pointer to ethtool Rx flow specification
@@ -1473,6 +1695,7 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
u16 dest_vsi, q_index = 0;
+ u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
@@ -1499,6 +1722,8 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
if (ring >= vsi->num_rxq)
return -EINVAL;
+ orig_q_index = ring;
+ ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
q_index = ring;
}
@@ -1507,6 +1732,11 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->q_index = q_index;
flow_type = fsp->flow_type & ~FLOW_EXT;
+ /* Record the original queue index as specified by user.
+ * with channel configuration 'q_index' becomes relative
+ * to TC (channel).
+ */
+ input->orig_q_index = orig_q_index;
input->dest_vsi = dest_vsi;
input->dest_ctl = dest_ctl;
input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
@@ -1694,6 +1924,8 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
remove_sw_rule:
ice_fdir_update_cntrs(hw, input->flow_type, false);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, input->orig_q_index, false);
list_del(&input->fltr_node);
release_lock:
mutex_unlock(&hw->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 4dca009bdd50..ae089d32ee9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
* @hw: pointer to the hardware structure
* @cntr_id: returns counter index
*/
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
* @hw: pointer to the hardware structure
* @cntr_id: counter index to be freed
*/
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
{
return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
* @frag: generate a fragment packet
* @tun: true implies generate a tunnel packet
*/
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun)
{
@@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (ice_fdir_pkt[idx].flow == flow)
break;
if (idx == ICE_FDIR_NUM_PKT)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!tun) {
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
if (!ice_fdir_pkt[idx].tun_pkt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
ice_fdir_pkt[idx].tun_pkt_len);
ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
@@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
if (input->flex_fltr)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index da4163856f4c..1b9b84490689 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -182,6 +182,7 @@ struct ice_fdir_fltr {
/* filter control */
u16 q_index;
+ u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;
u8 cnt_ena;
@@ -201,16 +202,14 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
void
ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
struct ice_fltr_desc *fdesc, bool add);
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 6ad1c2559724..4deb2c9446ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -314,6 +314,78 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
}
/**
+ * ice_hw_ptype_ena - check if the PTYPE is enabled or not
+ * @hw: pointer to the HW structure
+ * @ptype: the hardware PTYPE
+ */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
+{
+ return ptype < ICE_FLOW_PTYPE_MAX &&
+ test_bit(ptype, hw->hw_ptype);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *
+ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = section;
+ if (index >= le16_to_cpu(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ memset(&state, 0, sizeof(state));
+
+ do {
+ tcam = ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
* ice_boost_tcam_handler
* @sect_type: section type
* @section: pointer to section
@@ -358,7 +430,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* if it is found. The ice_seg parameter must not be NULL since the first call
* to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
*/
-static enum ice_status
+static int
ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
struct ice_boost_tcam_entry **entry)
{
@@ -368,7 +440,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
memset(&state, 0, sizeof(state));
if (!ice_seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do {
tcam = ice_pkg_enum_entry(ice_seg, &state,
@@ -383,7 +455,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
} while (tcam);
*entry = NULL;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -549,7 +621,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
* ------------------------------
* Result: key: b01 10 11 11 00 00
*/
-static enum ice_status
+static int
ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
u8 *key_inv)
{
@@ -558,7 +630,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
- return ICE_ERR_CFG;
+ return -EIO;
*key = 0;
*key_inv = 0;
@@ -651,7 +723,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-static enum ice_status
+static int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -660,11 +732,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
/* size must be a multiple of 2 bytes. */
if (size % 2)
- return ICE_ERR_CFG;
+ return -EIO;
half_size = size / 2;
if (off + len > half_size)
- return ICE_ERR_CFG;
+ return -EIO;
/* Make sure at most one bit is set in the never match mask. Having more
* than one never match mask bit set will cause HW to consume excessive
@@ -672,13 +744,13 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
*/
#define ICE_NVR_MTCH_BITS_MAX 1
if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
- return ICE_ERR_CFG;
+ return -EIO;
for (i = 0; i < len; i++)
if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
dc ? dc[i] : 0, nm ? nm[i] : 0,
key + off + i, key + half_size + off + i))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -692,25 +764,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
* or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values:
*
- * ICE_SUCCESS - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static enum ice_status
+ * 0 - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * -EALREADY - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
- enum ice_status status;
+ int status;
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (!status)
mutex_lock(&ice_global_cfg_lock_sw);
- else if (status == ICE_ERR_AQ_NO_WORK)
+ else if (status == -EALREADY)
ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status;
@@ -735,7 +807,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -765,14 +837,14 @@ void ice_release_change_lock(struct ice_hw *hw)
*
* Download Package (0x0C40)
*/
-static enum ice_status
+static int
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, bool last_buf, u32 *error_offset,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -787,7 +859,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
@@ -813,14 +885,14 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Update Package (0x0C42)
*/
-static enum ice_status
+static int
ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
bool last_buf, u32 *error_offset, u32 *error_info,
struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -835,7 +907,7 @@ ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
/* Read error from buffer only when the FW returned an error */
struct ice_aqc_download_pkg_resp *resp;
@@ -892,11 +964,10 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
*
* Obtains change lock and updates package.
*/
-static enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
u32 offset, info, i;
+ int status;
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
@@ -921,6 +992,22 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
return status;
}
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_LOAD_ERROR;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
@@ -931,15 +1018,17 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* to the firmware. Metadata buffers are skipped, and the first metadata buffer
* found indicates that the rest of the buffers are all metadata buffers.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
u32 offset, info, i;
+ int status;
if (!bufs || !count)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
/* If the first buffer's first section has its metadata bit set
* then there are no buffers to be downloaded, and the operation is
@@ -947,20 +1036,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*/
bh = (struct ice_buf_hdr *)bufs;
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return 0;
-
- /* reset pkg_dwnld_status in case this function is called in the
- * reset/rebuild flow
- */
- hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+ return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
if (status) {
- if (status == ICE_ERR_AQ_NO_WORK)
- hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
- else
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- return status;
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
for (i = 0; i < count; i++) {
@@ -986,11 +1068,11 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
&offset, &info, NULL);
/* Save AQ status from download package */
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
-
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
break;
}
@@ -1000,7 +1082,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
ice_release_global_cfg_lock(hw);
- return status;
+ return state;
}
/**
@@ -1012,7 +1094,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*
* Get Package Info List (0x0C43)
*/
-static enum ice_status
+static int
ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
@@ -1031,7 +1113,7 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
*
* Handles the download of a complete package.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
@@ -1062,13 +1144,13 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
*
* Saves off the package details into the HW structure.
*/
-static enum ice_status
+static enum ice_ddp_state
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
struct ice_generic_seg_hdr *seg_hdr;
if (!pkg_hdr)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
@@ -1082,7 +1164,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
ICE_SID_METADATA);
if (!meta) {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
hw->pkg_ver = meta->ver;
@@ -1104,10 +1186,10 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
seg_hdr->seg_id);
} else {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1116,21 +1198,22 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
*
* Store details of the package currently loaded in HW into the HW structure.
*/
-static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_aqc_get_pkg_info_resp *pkg_info;
- enum ice_status status;
u16 size;
u32 i;
size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = kzalloc(size, GFP_KERNEL);
if (!pkg_info)
- return ICE_ERR_NO_MEMORY;
+ return ICE_DDP_PKG_ERR;
- status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
- if (status)
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
goto init_pkg_free_alloc;
+ }
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
@@ -1165,7 +1248,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
init_pkg_free_alloc:
kfree(pkg_info);
- return status;
+ return state;
}
/**
@@ -1176,28 +1259,28 @@ init_pkg_free_alloc:
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
if (len < struct_size(pkg, seg_offset, 1))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
/* pkg must have at least one segment */
seg_count = le32_to_cpu(pkg->seg_count);
if (seg_count < 1)
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
/* make sure segment array fits in package length */
if (len < struct_size(pkg, seg_offset, seg_count))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
/* all segments must fit within length */
for (i = 0; i < seg_count; i++) {
@@ -1206,16 +1289,16 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
/* segment header must fit */
if (len < off + sizeof(*seg))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
/* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size))
- return ICE_ERR_BUF_TOO_SHORT;
+ return ICE_DDP_PKG_INVALID_FILE;
}
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1259,13 +1342,18 @@ static void ice_init_pkg_regs(struct ice_hw *hw)
* version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
* definitions.
*/
-static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
{
- if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
- return ICE_ERR_NOT_SUPPORTED;
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
- return 0;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1276,20 +1364,20 @@ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This function checks the package version compatibility with driver and NVM
*/
-static enum ice_status
+static enum ice_ddp_state
ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_status status;
+ enum ice_ddp_state state;
u16 size;
u32 i;
/* Check package version compatibility */
- status = ice_chk_pkg_version(&hw->pkg_ver);
- if (status) {
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return status;
+ return state;
}
/* find ICE segment in given package */
@@ -1297,18 +1385,19 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
ospkg);
if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
+ return ICE_DDP_PKG_INVALID_FILE;
}
/* Check if FW is compatible with the OS package */
size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = kzalloc(size, GFP_KERNEL);
if (!pkg)
- return ICE_ERR_NO_MEMORY;
+ return ICE_DDP_PKG_ERR;
- status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
- if (status)
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_LOAD_ERROR;
goto fw_ddp_compat_free_alloc;
+ }
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
@@ -1318,7 +1407,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
pkg->pkg_info[i].ver.major ||
(*seg)->hdr.seg_format_ver.minor >
pkg->pkg_info[i].ver.minor) {
- status = ICE_ERR_FW_DDP_MISMATCH;
+ state = ICE_DDP_PKG_FW_MISMATCH;
ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
}
/* done processing NVM package so break */
@@ -1326,7 +1415,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
}
fw_ddp_compat_free_alloc:
kfree(pkg);
- return status;
+ return state;
}
/**
@@ -1367,7 +1456,7 @@ ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* and store the index number in struct ice_switch_info *switch_info
* in HW for following use.
*/
-static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+static int ice_get_prof_index_max(struct ice_hw *hw)
{
u16 prof_index = 0, j, max_prof_index = 0;
struct ice_pkg_enum state;
@@ -1379,7 +1468,7 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
memset(&state, 0, sizeof(state));
if (!hw->seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_seg = hw->seg;
@@ -1410,6 +1499,34 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
}
/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ */
+static enum ice_ddp_state
+ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1434,53 +1551,54 @@ static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
* ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
* case.
*/
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
struct ice_pkg_hdr *pkg;
- enum ice_status status;
struct ice_seg *seg;
if (!buf || !len)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
- if (status) {
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
- return status;
+ state);
+ return state;
}
/* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
/* before downloading the package, check package version for
* compatibility with driver
*/
- status = ice_chk_pkg_compat(hw, pkg, &seg);
- if (status)
- return status;
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
+ state = ice_download_pkg(hw, seg);
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- status = 0;
+ already_loaded = true;
}
/* Get information on the package currently loaded in HW, then make sure
* the driver is compatible with this version.
*/
- if (!status) {
- status = ice_get_pkg_info(hw);
- if (!status)
- status = ice_chk_pkg_version(&hw->active_pkg_ver);
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
}
- if (!status) {
+ if (ice_is_init_pkg_successful(state)) {
hw->seg = seg;
/* on successful package download update other required
* registers to support the package and fill HW tables
@@ -1488,13 +1606,14 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- status);
+ state);
}
- return status;
+ return state;
}
/**
@@ -1520,18 +1639,19 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
* package buffer, as the new copy will be managed by this function and
* related routines.
*/
-enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
{
- enum ice_status status;
+ enum ice_ddp_state state;
u8 *buf_copy;
if (!buf || !len)
- return ICE_ERR_PARAM;
+ return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
- status = ice_init_pkg(hw, buf_copy, len);
- if (status) {
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
/* Free the copy, since we failed to initialize the package */
devm_kfree(ice_hw_to_dev(hw), buf_copy);
} else {
@@ -1540,7 +1660,23 @@ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
hw->pkg_size = len;
}
- return status;
+ return state;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -1644,7 +1780,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list)
{
@@ -1658,7 +1794,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
memset(&state, 0, sizeof(state));
if (!ids_cnt || !hw->seg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_seg = hw->seg;
do {
@@ -1702,7 +1838,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
}
} while (fv);
if (list_empty(fv_list))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
err:
@@ -1711,7 +1847,7 @@ err:
devm_kfree(ice_hw_to_dev(hw), fvl);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -1779,7 +1915,7 @@ static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-static enum ice_status
+static int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1787,17 +1923,17 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
u16 data_end;
if (!bld)
- return ICE_ERR_PARAM;
+ return -EINVAL;
buf = (struct ice_buf_hdr *)&bld->buf;
/* already an active section, can't increase table size */
section_count = le16_to_cpu(buf->section_count);
if (section_count > 0)
- return ICE_ERR_CFG;
+ return -EIO;
if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return ICE_ERR_CFG;
+ return -EIO;
bld->reserved_section_table_entries += count;
data_end = le16_to_cpu(buf->data_end) +
@@ -1959,19 +2095,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-static enum ice_status
+static int
ice_create_tunnel(struct ice_hw *hw, u16 index,
enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_tunnel_end;
}
@@ -2030,26 +2166,26 @@ ice_create_tunnel_end:
* targeting the specific updates requested and then performing an update
* package.
*/
-static enum ice_status
+static int
ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
if (WARN_ON(!hw->tnl.tbl[index].valid ||
hw->tnl.tbl[index].type != type ||
hw->tnl.tbl[index].port != port)) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto ice_destroy_tunnel_end;
}
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_destroy_tunnel_end;
}
@@ -2097,7 +2233,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
@@ -2105,8 +2241,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error adding UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2121,15 +2257,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error removing UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error removing UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2145,17 +2281,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
* @prot: variable to receive the protocol ID
* @off: variable to receive the protocol offset
*/
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
struct ice_fv_word *fv_ext;
if (prof >= hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (fv_idx >= hw->blk[blk].es.fvw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
@@ -2178,11 +2314,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-static enum ice_status
+static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
return 0;
@@ -2212,21 +2348,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
-static enum ice_status
+static int
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
struct ice_ptg_ptype *p;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Should not happen if .in_use is set, bad config */
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
- return ICE_ERR_CFG;
+ return -EIO;
/* find the ptype within this PTG, and bypass the link over it */
p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
@@ -2259,17 +2395,17 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-static enum ice_status
+static int
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
- enum ice_status status;
u8 original_ptg;
+ int status;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
if (status)
@@ -2404,11 +2540,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
-static enum ice_status
+static int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* As long as there's a default or valid VSIG associated with the input
* VSI, the functions returns a success. Any handling of VSIG will be
@@ -2473,7 +2609,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-static enum ice_status
+static int
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct list_head *chs, u16 *vsig)
{
@@ -2487,7 +2623,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2499,8 +2635,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-static enum ice_status
-ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
struct ice_vsig_vsi *vsi_cur;
@@ -2508,10 +2643,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
@@ -2560,7 +2695,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
-static enum ice_status
+static int
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
@@ -2569,10 +2704,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
@@ -2580,7 +2715,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
vsi_cur = (*vsi_head);
@@ -2597,7 +2732,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* verify if VSI was removed from group list */
if (!vsi_cur)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_cur->vsig = ICE_DEFAULT_VSIG;
vsi_cur->changed = 1;
@@ -2618,24 +2753,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-static enum ice_status
+static int
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
- enum ice_status status;
u16 orig_vsig, idx;
+ int status;
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* if VSIG not in use and VSIG is not default type this VSIG
* doesn't exist.
*/
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
vsig != ICE_DEFAULT_VSIG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (status)
@@ -2741,7 +2876,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @masks: masks for FV
* @prof_id: receives the profile ID
*/
-static enum ice_status
+static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
@@ -2752,7 +2887,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
* field vector and mask. This will cause rule interference.
*/
if (blk == ICE_BLK_FD)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
@@ -2768,7 +2903,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2821,14 +2956,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
-static enum ice_status
+static int
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
@@ -2841,13 +2976,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
*
* This function frees an entry in a Profile ID TCAM for a specific block.
*/
-static enum ice_status
+static int
ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
}
@@ -2861,15 +2996,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
* This function allocates a new profile ID, which also corresponds to a Field
* Vector (Extraction Sequence) entry.
*/
-static enum ice_status
-ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
{
- enum ice_status status;
u16 res_type;
u16 get_prof;
+ int status;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
if (!status)
@@ -2886,14 +3020,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
*
* This function frees a profile ID, which also corresponds to a Field Vector.
*/
-static enum ice_status
-ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
u16 tmp_prof_id = (u16)prof_id;
u16 res_type;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
}
@@ -2904,11 +3037,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to increment the reference count
*/
-static enum ice_status
-ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw->blk[blk].es.ref_count[prof_id]++;
@@ -3025,17 +3157,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw)
* @mask: the 16-bit mask
* @mask_idx: variable to receive the mask index
*/
-static enum ice_status
+static int
ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
u16 *mask_idx)
{
bool found_unused = false, found_copy = false;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
u16 unused_idx = 0, copy_idx = 0;
+ int status = -ENOSPC;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3093,15 +3225,15 @@ err_ice_alloc_prof_mask:
* @blk: hardware block
* @mask_idx: index of mask
*/
-static enum ice_status
+static int
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!(mask_idx >= hw->blk[blk].masks.first &&
mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3135,14 +3267,14 @@ exit_ice_free_prof_mask:
* @blk: hardware block
* @prof_id: profile ID
*/
-static enum ice_status
+static int
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
{
u32 mask_bm;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mask_bm = hw->blk[blk].es.mask_ena[prof_id];
for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
@@ -3197,7 +3329,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
* @prof_id: profile ID
* @masks: masks
*/
-static enum ice_status
+static int
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
u16 *masks)
{
@@ -3227,7 +3359,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
if (ena_mask & BIT(i))
ice_free_prof_mask(hw, blk, i);
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
}
/* enable the masks for this profile */
@@ -3269,11 +3401,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to decrement the reference count
*/
-static enum ice_status
+static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
@@ -3711,7 +3843,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+int ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
@@ -3830,7 +3962,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
err:
ice_free_hw_tbls(hw);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -3846,7 +3978,7 @@ err:
* @nm_msk: never match mask
* @key: output of profile ID key
*/
-static enum ice_status
+static int
ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3902,7 +4034,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
* @dc_msk: don't care mask
* @nm_msk: never match mask
*/
-static enum ice_status
+static int
ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3910,7 +4042,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
{
struct ice_prof_tcam_entry;
- enum ice_status status;
+ int status;
status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
@@ -3929,7 +4061,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
* @vsig: VSIG to query
* @refs: pointer to variable to receive the reference count
*/
-static enum ice_status
+static int
ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
@@ -3938,7 +4070,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
*refs = 0;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
while (ptr) {
@@ -3979,7 +4111,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -3999,7 +4131,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
sizeof(p->es[0]));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->prof_id);
@@ -4017,7 +4149,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -4033,7 +4165,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct_size(p, entry, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
@@ -4053,7 +4185,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4069,7 +4201,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->ptype);
@@ -4085,7 +4217,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4104,7 +4236,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->vsi);
@@ -4124,18 +4256,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
* @blk: hardware block
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
struct list_head *chgs)
{
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
- enum ice_status status;
u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ int status;
u16 sects;
/* count number of sections we need */
@@ -4167,7 +4299,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
/* Build update package buffer */
b = ice_pkg_buf_alloc(hw);
if (!b)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_pkg_buf_reserve_section(b, sects);
if (status)
@@ -4204,13 +4336,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
*/
pkg_sects = ice_pkg_buf_get_active_sections(b);
if (!pkg_sects || pkg_sects != sects) {
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto error_tmp;
}
/* update package */
status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
- if (status == ICE_ERR_AQ_ERROR)
+ if (status == -EIO)
ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
error_tmp:
@@ -4276,7 +4408,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
* @prof_id: profile ID
* @es: extraction sequence (length of array is determined by the block)
*/
-static enum ice_status
+static int
ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
{
DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
@@ -4308,7 +4440,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
es[i].off == ice_fd_pairs[j].off) {
- set_bit(j, pair_list);
+ __set_bit(j, pair_list);
pair_start[j] = i;
}
}
@@ -4331,7 +4463,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
/* check for room */
if (first_free + 1 < (s8)ice_fd_pairs[index].count)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* place in extraction sequence */
for (k = 0; k < ice_fd_pairs[index].count; k++) {
@@ -4341,7 +4473,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
ice_fd_pairs[index].off + (k * 2);
if (k > first_free)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* keep track of non-relevant fields */
mask_sel |= BIT(first_free - k);
@@ -4452,7 +4584,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
* @attr: array of attributes that will be considered
* @attr_cnt: number of elements in the attribute array
*/
-static enum ice_status
+static int
ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
const struct ice_ptype_attributes *attr, u16 attr_cnt)
{
@@ -4468,11 +4600,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
&prof->attr[prof->ptg_cnt]);
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
if (!found)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
return 0;
}
@@ -4493,7 +4625,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks)
@@ -4501,9 +4633,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- enum ice_status status;
u8 byte = 0;
u8 prof_id;
+ int status;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -4541,7 +4673,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
if (!prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof;
}
@@ -4578,13 +4710,13 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
if (test_bit(ptg, ptgs_used))
continue;
- set_bit(ptg, ptgs_used);
+ __set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for
* this PTYPE, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
- if (status == ICE_ERR_MAX_LIMIT)
+ if (status == -ENOSPC)
break;
if (status) {
/* This is simple a PTYPE/PTG with no
@@ -4661,14 +4793,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* @blk: hardware block
* @idx: the index to release
*/
-static enum ice_status
-ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
{
/* Masks to invoke a never match entry */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status;
+ int status;
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
@@ -4688,11 +4819,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
* @blk: hardware block
* @prof: pointer to profile structure to remove
*/
-static enum ice_status
+static int
ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_vsig_prof *prof)
{
- enum ice_status status;
+ int status;
u16 i;
for (i = 0; i < prof->tcam_count; i++)
@@ -4701,7 +4832,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
status = ice_rel_tcam_idx(hw, blk,
prof->tcam[i].tcam_idx);
if (status)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
}
return 0;
@@ -4714,14 +4845,14 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
* @vsig: the VSIG to remove
* @chg: the change list
*/
-static enum ice_status
+static int
ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- enum ice_status status;
+ int status;
/* remove TCAM entries */
list_for_each_entry_safe(d, t,
@@ -4748,7 +4879,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
p->type = ICE_VSIG_REM;
p->orig_vsig = vsig;
@@ -4771,13 +4902,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @hdl: profile handle indicating which profile to remove
* @chg: list to receive a record of changes
*/
-static enum ice_status
+static int
ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- enum ice_status status;
+ int status;
list_for_each_entry_safe(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
@@ -4795,7 +4926,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
return status;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -4804,12 +4935,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
* @blk: hardware block
* @id: profile tracking ID
*/
-static enum ice_status
-ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 i;
INIT_LIST_HEAD(&chg);
@@ -4845,16 +4975,16 @@ err_ice_rem_flow_all:
* previously created through ice_add_prof. If any existing entries
* are associated with this profile, they will be removed as well.
*/
-enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *pmap;
- enum ice_status status;
+ int status;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_rem_prof;
}
@@ -4881,20 +5011,20 @@ err_ice_rem_prof:
* @hdl: profile handle
* @chg: change list
*/
-static enum ice_status
+static int
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_get_prof;
}
@@ -4904,7 +5034,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_get_prof;
}
@@ -4936,7 +5066,7 @@ err_ice_get_prof:
*
* This routine makes a copy of the list of profiles in the specified VSIG.
*/
-static enum ice_status
+static int
ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *lst)
{
@@ -4964,7 +5094,7 @@ err_ice_get_profs_vsig:
devm_kfree(ice_hw_to_dev(hw), ent1);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -4974,25 +5104,25 @@ err_ice_get_profs_vsig:
* @lst: the list to be added to
* @hdl: profile handle of entry to add
*/
-static enum ice_status
+static int
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_to_lst;
}
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_to_lst;
}
@@ -5021,17 +5151,17 @@ err_ice_add_prof_to_lst:
* @vsig: the VSIG to move the VSI to
* @chg: the change list
*/
-static enum ice_status
+static int
ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 orig_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (!status)
@@ -5081,13 +5211,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
*
* This function appends an enable or disable TCAM entry in the change log
*/
-static enum ice_status
+static int
ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
u16 vsig, struct ice_tcam_inf *tcam,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
+ int status;
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
@@ -5120,7 +5250,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
@@ -5154,13 +5284,13 @@ err_ice_prof_tcam_ena_dis:
* @vsig: the VSIG for which to adjust profile priorities
* @chg: the change list
*/
-static enum ice_status
+static int
ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 idx;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -5209,7 +5339,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
}
/* keep track of used ptgs */
- set_bit(t->tcam[i].ptg, ptgs_used);
+ __set_bit(t->tcam[i].ptg, ptgs_used);
}
}
@@ -5225,7 +5355,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
-static enum ice_status
+static int
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
bool rev, struct list_head *chg)
{
@@ -5233,26 +5363,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
+ int status = 0;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
/* new VSIG profile structure */
t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
if (!t)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_id_vsig;
}
@@ -5267,7 +5397,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_id_vsig;
}
@@ -5337,21 +5467,21 @@ err_ice_add_prof_id_vsig:
* @hdl: the profile handle of the profile that will be added to the VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 new_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
new_vsig = ice_vsig_alloc(hw, blk);
if (!new_vsig) {
- status = ICE_ERR_HW_TABLE;
+ status = -EIO;
goto err_ice_create_prof_id_vsig;
}
@@ -5387,18 +5517,18 @@ err_ice_create_prof_id_vsig:
* @new_vsig: return of new VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct list_head *lst, u16 *new_vsig,
struct list_head *chg)
{
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 vsig;
vsig = ice_vsig_alloc(hw, blk);
if (!vsig)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
status = ice_move_vsi(hw, blk, vsi, vsig, chg);
if (status)
@@ -5428,8 +5558,8 @@ static bool
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- enum ice_status status;
struct list_head lst;
+ int status;
INIT_LIST_HEAD(&lst);
@@ -5459,14 +5589,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head union_lst;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -5492,7 +5622,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* scenario
*/
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto err_ice_add_prof_id_flow;
}
@@ -5600,7 +5730,7 @@ err_ice_add_prof_id_flow:
* @lst: list to remove the profile from
* @hdl: the profile handle indicating the profile to remove
*/
-static enum ice_status
+static int
ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
{
struct ice_vsig_prof *ent, *tmp;
@@ -5612,7 +5742,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -5626,13 +5756,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head chg, copy;
- enum ice_status status;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&copy);
@@ -5727,7 +5857,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
}
}
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
/* update hardware tables */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index a2863f38fd1f..6cbc29bcb02f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,10 +18,67 @@
#define ICE_PKG_CNT 4
-enum ice_status
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* The signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
+
+ /* An error occurred in firmware while loading the DDP package */
+ ICE_DDP_PKG_LOAD_ERROR = -11,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -12
+};
+
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
void
@@ -29,7 +86,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
unsigned long *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list);
bool
@@ -40,22 +97,26 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
-enum ice_status
+/* Rx parser PTYPE functions */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
+
+/* XLT2/VSI group functions */
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks);
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
-enum ice_status
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+bool ice_is_init_pkg_successful(enum ice_ddp_state state);
+int ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
-enum ice_status
-ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0f572a36d021..fc087e0b5292 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -160,6 +160,7 @@ struct ice_meta_sect {
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_TXPARSER_BOOST_TCAM 66
@@ -201,6 +202,24 @@ enum ice_sect {
ICE_SECT_COUNT
};
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY 1
+#define ICE_PTYPE_IPV4_PAY 23
+#define ICE_PTYPE_IPV4_UDP_PAY 24
+#define ICE_PTYPE_IPV4_TCP_PAY 26
+#define ICE_PTYPE_IPV4_SCTP_PAY 27
+#define ICE_PTYPE_IPV6_PAY 89
+#define ICE_PTYPE_IPV6_UDP_PAY 90
+#define ICE_PTYPE_IPV6_TCP_PAY 92
+#define ICE_PTYPE_IPV6_SCTP_PAY 93
+#define ICE_MAC_IPV4_ESP 160
+#define ICE_MAC_IPV6_ESP 161
+#define ICE_MAC_IPV4_AH 162
+#define ICE_MAC_IPV6_AH 163
+#define ICE_MAC_IPV4_NAT_T_ESP 164
+#define ICE_MAC_IPV6_NAT_T_ESP 165
+#define ICE_MAC_IPV4_GTPU 329
+#define ICE_MAC_IPV6_GTPU 330
#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
@@ -221,6 +240,10 @@ enum ice_sect {
#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
+#define ICE_MAC_IPV4_PFCP_SESSION 352
+#define ICE_MAC_IPV6_PFCP_SESSION 354
+#define ICE_MAC_IPV4_L2TPV3 360
+#define ICE_MAC_IPV6_L2TPV3 361
/* Attributes that can modify PTYPE definitions.
*
@@ -329,6 +352,25 @@ struct ice_boost_tcam_section {
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
+/* package Marker Ptype TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
struct ice_xlt1_section {
__le16 count;
__le16 offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index f160672448a0..beed4838dcbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -609,8 +609,6 @@ struct ice_flow_prof_params {
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
ICE_FLOW_SEG_HDR_NAT_T_ESP)
-#define ICE_FLOW_SEG_HDRS_L2_MASK \
- (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
@@ -625,8 +623,7 @@ struct ice_flow_prof_params {
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
*/
-static enum ice_status
-ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
@@ -634,12 +631,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Multiple L4 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
@@ -700,8 +697,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
* This function identifies the packet types associated with the protocol
* headers being present in packet segments of the specified flow profile.
*/
-static enum ice_status
-ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof;
u8 i;
@@ -898,7 +894,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* field. It then allocates one or more extraction sequence entries for the
* given field, and fill the entries with protocol ID and offset information.
*/
-static enum ice_status
+static int
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld, u64 match)
{
@@ -1035,7 +1031,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = ICE_PROT_GRE_OF;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
/* Each extraction sequence entry is a word in size, and extracts a
@@ -1073,7 +1069,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* does not exceed the block's capability
*/
if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1099,7 +1095,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* @params: information about the flow to be processed
* @seg: index of packet segment whose raw fields are to be extracted
*/
-static enum ice_status
+static int
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg)
{
@@ -1112,12 +1108,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws))
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg);
if (!hdrs_sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw;
@@ -1150,7 +1146,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1176,12 +1172,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
* This function iterates through all matched fields in the given segments, and
* creates an extraction sequence for the fields.
*/
-static enum ice_status
+static int
ice_flow_create_xtrct_seq(struct ice_hw *hw,
struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof = params->prof;
- enum ice_status status = 0;
+ int status = 0;
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
@@ -1210,10 +1206,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
*/
-static enum ice_status
+static int
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ int status;
status = ice_flow_proc_seg_hdrs(params);
if (status)
@@ -1229,7 +1225,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
status = 0;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
return status;
@@ -1329,12 +1325,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
* @blk: classification stage
* @entry: flow entry to be removed
*/
-static enum ice_status
+static int
ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
struct ice_flow_entry *entry)
{
if (!entry)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
list_del(&entry->l_entry);
@@ -1355,27 +1351,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
- enum ice_status status;
+ int status;
u8 i;
if (!prof)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL);
if (!params->prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto free_params;
}
@@ -1432,11 +1428,11 @@ free_params:
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status;
+ int status;
/* Remove all remaining flow entries before removing the flow profile */
if (!list_empty(&prof->entries)) {
@@ -1474,11 +1470,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (!test_bit(vsi_handle, prof->vsis)) {
status = ice_add_prof_id_flow(hw, blk,
@@ -1505,11 +1501,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (test_bit(vsi_handle, prof->vsis)) {
status = ice_rem_prof_id_flow(hw, blk,
@@ -1536,21 +1532,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
- enum ice_status status;
+ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
if (!segs_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!segs)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt);
if (status)
@@ -1574,17 +1570,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -1608,34 +1603,34 @@ out:
* @data: pointer to a data buffer containing flow entry's match values/masks
* @entry_h: pointer to buffer that receives the new flow entry's handle
*/
-enum ice_status
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
void *data, u64 *entry_h)
{
struct ice_flow_entry *e = NULL;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
/* No flow entry data is expected for RSS */
if (!entry_h || (!data && blk != ICE_BLK_RSS))
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
} else {
/* Allocate memory for the entry being added and associate
* the VSI to the found flow profile
*/
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
if (!e)
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
}
@@ -1654,7 +1649,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
case ICE_BLK_RSS:
break;
default:
- status = ICE_ERR_NOT_IMPL;
+ status = -EOPNOTSUPP;
goto out;
}
@@ -1680,15 +1675,14 @@ out:
* @blk: classification stage
* @entry_h: handle to the flow entry to be removed
*/
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
- u64 entry_h)
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
{
struct ice_flow_entry *entry;
struct ice_flow_prof *prof;
- enum ice_status status = 0;
+ int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
- return ICE_ERR_PARAM;
+ return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h);
@@ -1812,6 +1806,57 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
seg->raws_cnt++;
}
+/**
+ * ice_flow_rem_vsi_prof - remove VSI from flow profile
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof_id: unique ID to identify this flow profile
+ *
+ * This function removes the flow entries associated to the input
+ * VSI handle and disassociate the VSI from the flow profile.
+ */
+int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ /* find flow profile pointer with input package block and profile ID */
+ prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
+ if (!prof) {
+ ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n",
+ prof_id);
+ return -ENOENT;
+ }
+
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!list_empty(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ mutex_lock(&prof->entries_lock);
+ list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
+ if (e->vsi_handle != vsi_handle)
+ continue;
+
+ status = ice_flow_rem_entry_sync(hw, ICE_BLK_FD, e);
+ if (status)
+ break;
+ }
+ mutex_unlock(&prof->entries_lock);
+ }
+ if (status)
+ return status;
+
+ /* disassociate the flow profile from sw VSI handle */
+ status = ice_flow_disassoc_prof(hw, ICE_BLK_FD, prof, vsi_handle);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "ice_flow_disassoc_prof() failed with status=%d\n",
+ status);
+ return status;
+}
+
#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
@@ -1836,7 +1881,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
-static enum ice_status
+static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
@@ -1853,15 +1898,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
- return ICE_ERR_PARAM;
+ return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -1899,14 +1944,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
* the VSI from that profile. If the flow profile has no VSIs it will
* be removed.
*/
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (list_empty(&hw->fl_profs[blk]))
return 0;
@@ -1966,7 +2011,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
struct ice_rss_cfg *r, *rss_cfg;
@@ -1981,7 +2026,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
GFP_KERNEL);
if (!rss_cfg)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
@@ -2022,21 +2067,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status;
+ int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2128,15 +2173,15 @@ exit:
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
-enum ice_status
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2159,18 +2204,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2182,7 +2227,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -2216,15 +2261,15 @@ out:
* removed. Calls are made to underlying flow s which will APIs
* turn build or update buffers for RSS XLT1 section.
*/
-enum ice_status __maybe_unused
+int __maybe_unused
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2279,20 +2324,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
- enum ice_status status = 0;
+ int status = 0;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Make sure no unsupported bits are specified */
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
- return ICE_ERR_CFG;
+ return -EIO;
hash_flds = avf_hash;
@@ -2352,7 +2396,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
}
if (rss_hash == ICE_HASH_INVALID)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
@@ -2368,13 +2412,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
*/
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status = 0;
struct ice_rss_cfg *r;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2a2d8c1536cb..84b6e4464a21 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -383,33 +383,31 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof);
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
-enum ice_status
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
-enum ice_status
-ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
+int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c2e78eaf4ccb..c29177c6bb9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -47,12 +47,69 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
}
/**
+ * ice_fltr_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Set VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+}
+
+/**
+ * ice_fltr_clear_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Clear VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+}
+
+/**
+ * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+}
+
+/**
+ * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+}
+
+/**
* ice_fltr_add_mac_list - add list of MAC filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_mac(&vsi->back->hw, list);
}
@@ -62,8 +119,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_mac(&vsi->back->hw, list);
}
@@ -73,8 +129,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_vlan(&vsi->back->hw, list);
}
@@ -84,7 +139,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
+static int
ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_vlan(&vsi->back->hw, list);
@@ -95,8 +150,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_eth_mac(&vsi->back->hw, list);
}
@@ -106,8 +160,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_eth_mac(&vsi->back->hw, list);
}
@@ -207,18 +260,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status (*mac_action)(struct ice_vsi *,
- struct list_head *))
+ int (*mac_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -233,21 +285,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status(*mac_action)
+ int(*mac_action)
(struct ice_vsi *, struct list_head *))
{
u8 broadcast[ETH_ALEN];
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
eth_broadcast_addr(broadcast);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -262,17 +314,16 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @vlan_action: pointer to add or remove VLAN function
*/
-static enum ice_status
+static int
ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
enum ice_sw_fwd_act_type action,
- enum ice_status (*vlan_action)(struct ice_vsi *,
- struct list_head *))
+ int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -287,17 +338,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @action: action to be performed on filter match
* @eth_action: pointer to add or remove ethertype function
*/
-static enum ice_status
+static int
ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action,
- enum ice_status (*eth_action)(struct ice_vsi *,
- struct list_head *))
+ int (*eth_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -310,8 +360,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
}
@@ -322,7 +372,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
@@ -336,8 +386,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @mac: filter MAC to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
}
@@ -348,8 +398,8 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
* @vlan_id: VLAN ID to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_add_vlan_list);
@@ -361,8 +411,8 @@ enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @vlan_id: filter VLAN to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_remove_vlan_list);
@@ -375,8 +425,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @flag: direction of packet to be filtered, Tx or Rx
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_add_eth_list);
@@ -389,89 +439,9 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @flag: direction of filter
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
- u16 flag, enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
-
-/**
- * ice_fltr_update_rule_flags - update lan_en/lb_en flags
- * @hw: pointer to hw
- * @rule_id: id of rule being updated
- * @recipe_id: recipe id of rule
- * @act: current action field
- * @type: Rx or Tx
- * @src: source VSI
- * @new_flags: combinations of lb_en and lan_en
- */
-static enum ice_status
-ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
- u32 act, u16 type, u16 src, u32 new_flags)
-{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status err;
- u32 flags_mask;
-
- s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
- act &= ~flags_mask;
- act |= (flags_mask & new_flags);
-
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
-
- if (type & ICE_FLTR_RX) {
- s_rule->pdata.lkup_tx_rx.src =
- cpu_to_le16(hw->port_info->lport);
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
-
- } else {
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
- }
-
- err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_update_sw_rules, NULL);
-
- kfree(s_rule);
- return err;
-}
-
-/**
- * ice_fltr_build_action - build action for rule
- * @vsi_id: id of VSI which is use to build action
- */
-static u32 ice_fltr_build_action(u16 vsi_id)
-{
- return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
- ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
-}
-
-/**
- * ice_fltr_update_flags_dflt_rule - update flags on default rule
- * @vsi: pointer to VSI
- * @rule_id: id of rule
- * @direction: Tx or Rx
- * @new_flags: flags to update
- *
- * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
- *
- * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
- * ICE_SINGLE_ACT_LAN_ENABLE.
- */
-enum ice_status
-ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
- u32 new_flags)
-{
- u32 action = ice_fltr_build_action(vsi->vsi_num);
- struct ice_hw *hw = &vsi->back->hw;
-
- return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
- direction, vsi->vsi_num, new_flags);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 8eec4febead1..3eb42479175f 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -5,38 +5,49 @@
#define _ICE_FLTR_H_
void ice_fltr_free_list(struct device *dev, struct list_head *h);
-enum ice_status
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int
ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
-enum ice_status
-ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
- u32 new_flags);
+
+int
+ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index f8601d5b0b19..665a344fb9c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -16,6 +16,18 @@ struct ice_fwu_priv {
/* Track which NVM banks to activate at the end of the update */
u8 activate_flags;
+
+ /* Track the firmware response of the required reset to complete the
+ * flash update.
+ *
+ * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
+ * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
+ */
+ u8 reset_level;
+
+ /* Track if EMP reset is available */
+ u8 emp_reset_available;
};
/**
@@ -40,8 +52,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 *package_data;
+ int status;
dev_dbg(dev, "Sending PLDM record package data to firmware\n");
@@ -54,9 +66,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
kfree(package_data);
if (status) {
- dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -203,8 +214,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
size_t length;
+ int status;
switch (component->identifier) {
case NVM_COMP_ID_OROM:
@@ -240,9 +251,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
kfree(comp_tbl);
if (status) {
- dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -259,6 +269,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
* @block_size: size of the block to write, up to 4k
* @block: pointer to block of data to write
* @last_cmd: whether this is the last command
+ * @reset_level: storage for reset level required
* @extack: netlink extended ACK structure
*
* Write a block of data to a flash module, and await for the completion
@@ -266,18 +277,24 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Note this function assumes the caller has acquired the NVM resource.
*
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
+ * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
+ *
* Returns: zero on success, or a negative error code on failure.
*/
static int
ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
- struct netlink_ext_ack *extack)
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u32 completion_offset;
int err;
@@ -286,11 +303,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset);
- status = ice_aq_update_nvm(hw, module, offset, block_size, block,
- last_cmd, 0, NULL);
- if (status) {
- dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
- module, block_size, offset, ice_stat_str(status),
+ err = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (err) {
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
+ module, block_size, offset, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -338,6 +355,24 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO;
}
+ /* For the last command to write the NVM bank, newer versions of
+ * firmware indicate the required level of reset to complete
+ * activation of firmware. If the firmware supports this, cache the
+ * response for indicating to the user later. Otherwise, assume that
+ * a full power cycle is required.
+ */
+ if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
+ if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
+ *reset_level = (event.desc.params.nvm.cmd_flags &
+ ICE_AQC_NVM_RESET_LVL_M);
+ dev_dbg(dev, "Firmware reported required reset level as %u\n",
+ *reset_level);
+ } else {
+ *reset_level = ICE_AQC_NVM_POR_FLAG;
+ dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n");
+ }
+ }
+
return 0;
}
@@ -348,6 +383,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* @component: the name of the component being updated
* @image: buffer of image data to write to the NVM
* @length: length of the buffer
+ * @reset_level: storage for reset level required
* @extack: netlink extended ACK structure
*
* Loop over the data for a given NVM module and program it in 4 Kb
@@ -360,7 +396,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
static int
ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
- const u8 *image, u32 length,
+ const u8 *image, u32 length, u8 *reset_level,
struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
@@ -394,7 +430,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
memcpy(block, image + offset, block_size);
err = ice_write_one_nvm_block(pf, module, offset, block_size,
- block, last_cmd, extack);
+ block, last_cmd, reset_level,
+ extack);
if (err)
break;
@@ -445,7 +482,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct devlink *devlink;
- enum ice_status status;
int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
@@ -456,10 +492,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
- status = ice_aq_erase_nvm(hw, module, NULL);
- if (status) {
- dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
- component, module, ice_stat_str(status),
+ err = ice_aq_erase_nvm(hw, module, NULL);
+ if (err) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
+ component, module, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
@@ -511,6 +547,7 @@ out_notify_devlink:
* ice_switch_flash_banks - Tell firmware to switch NVM banks
* @pf: Pointer to the PF data structure
* @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
* @extack: netlink extended ACK structure
*
* Notify firmware to activate the newly written flash banks, and wait for the
@@ -518,27 +555,43 @@ out_notify_devlink:
*
* Returns: zero on success or an error code on failure.
*/
-static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
- struct netlink_ext_ack *extack)
+static int
+ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
+ u8 *emp_reset_available, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u16 completion_retval;
+ u8 response_flags;
int err;
memset(&event, 0, sizeof(event));
- status = ice_nvm_write_activate(hw, activate_flags);
- if (status) {
- dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
+ /* Newer versions of firmware have support to indicate whether an EMP
+ * reset to reload firmware is available. For older firmware, EMP
+ * reset is always available.
+ */
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support) {
+ *emp_reset_available = response_flags & ICE_AQC_NVM_EMPR_ENA;
+ dev_dbg(dev, "Firmware indicated that EMP reset is %s\n",
+ *emp_reset_available ?
+ "available" : "not available");
+ } else {
+ *emp_reset_available = ICE_AQC_NVM_EMPR_ENA;
+ dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n");
+ }
+ }
+
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
&event);
if (err) {
@@ -579,6 +632,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
const char *name;
+ u8 *reset_level;
u16 module;
u8 flag;
int err;
@@ -587,16 +641,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
case NVM_COMP_ID_OROM:
module = ICE_SR_1ST_OROM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
+ reset_level = NULL;
name = "fw.undi";
break;
case NVM_COMP_ID_NVM:
module = ICE_SR_1ST_NVM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
+ reset_level = &priv->reset_level;
name = "fw.mgmt";
break;
case NVM_COMP_ID_NETLIST:
module = ICE_SR_NETLIST_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ reset_level = NULL;
name = "fw.netlist";
break;
default:
@@ -616,7 +673,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
return err;
return ice_write_nvm_module(pf, module, name, component->component_data,
- component->component_size, extack);
+ component->component_size, reset_level,
+ extack);
}
/**
@@ -634,110 +692,75 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
+ struct devlink *devlink;
+ int err;
/* Finally, notify firmware to activate the written NVM banks */
- return ice_switch_flash_banks(pf, priv->activate_flags, extack);
-}
+ err = ice_switch_flash_banks(pf, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
-static const struct pldmfw_ops ice_fwu_ops = {
- .match_record = &pldmfw_op_pci_match_record,
- .send_package_data = &ice_send_package_data,
- .send_component_table = &ice_send_component_table,
- .flash_component = &ice_flash_component,
- .finalize_update = &ice_finalize_update,
-};
+ devlink = priv_to_devlink(pf);
-/**
- * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
- * @pf: private device driver structure
- * @fw: firmware object pointing to the relevant firmware file
- * @preservation: preservation level to request from firmware
- * @extack: netlink extended ACK structure
- *
- * Parse the data for a given firmware file, verifying that it is a valid PLDM
- * formatted image that matches this device.
- *
- * Extract the device record Package Data and Component Tables and send them
- * to the firmware. Extract and write the flash data for each of the three
- * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
- * firmware once the data is written to the inactive banks.
- *
- * Returns: zero on success or a negative error code on failure.
- */
-int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- u8 preservation, struct netlink_ext_ack *extack)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_fwu_priv priv;
- enum ice_status status;
- int err;
+ /* If the required reset is EMPR, but EMPR is disabled, report that
+ * a reboot is required instead.
+ */
+ if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG &&
+ !priv->emp_reset_available) {
+ dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n");
+ priv->reset_level = ICE_AQC_NVM_PERST_FLAG;
+ }
- switch (preservation) {
- case ICE_AQC_NVM_PRESERVE_ALL:
- case ICE_AQC_NVM_PRESERVE_SELECTED:
- case ICE_AQC_NVM_NO_PRESERVATION:
- case ICE_AQC_NVM_FACTORY_DEFAULT:
+ switch (priv->reset_level) {
+ case ICE_AQC_NVM_EMPR_FLAG:
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by devlink reload",
+ NULL, 0, 0);
break;
+ case ICE_AQC_NVM_PERST_FLAG:
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by rebooting the system",
+ NULL, 0, 0);
+ break;
+ case ICE_AQC_NVM_POR_FLAG:
default:
- WARN(1, "Unexpected preservation level request %u", preservation);
- return -EINVAL;
- }
-
- memset(&priv, 0, sizeof(priv));
-
- priv.context.ops = &ice_fwu_ops;
- priv.context.dev = dev;
- priv.extack = extack;
- priv.pf = pf;
- priv.activate_flags = preservation;
-
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
- }
-
- err = pldmfw_flash_image(&priv.context, fw);
- if (err == -ENOENT) {
- dev_err(dev, "Firmware image has no record matching this device\n");
- NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
- } else if (err) {
- /* Do not set a generic extended ACK message here. A more
- * specific message may already have been set by one of our
- * ops.
- */
- dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by power cycling the system",
+ NULL, 0, 0);
+ break;
}
- ice_release_nvm(hw);
+ pf->fw_emp_reset_disabled = !priv->emp_reset_available;
- return err;
+ return 0;
}
+static const struct pldmfw_ops ice_fwu_ops = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
/**
- * ice_check_for_pending_update - Check for a pending flash update
+ * ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure
- * @component: if not NULL, the name of the component being updated
- * @extack: Netlink extended ACK structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
*
- * Check whether the device already has a pending flash update. If such an
- * update is found, cancel it so that the requested update may proceed.
+ * Check if the device has any pending updates on any flash components.
*
- * Returns: zero on success, or a negative error code on failure.
+ * Returns: zero on success, or a negative error code on failure. Updates
+ * pending with the bitmap of pending updates.
*/
-int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
- struct netlink_ext_ack *extack)
+int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
+ struct netlink_ext_ack *extack)
{
- struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw_dev_caps *dev_caps;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u8 pending = 0;
int err;
dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
@@ -749,30 +772,60 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
* may have changed, e.g. if an update was previously completed and
* the system has not yet rebooted.
*/
- status = ice_discover_dev_caps(hw, dev_caps);
- if (status) {
+ err = ice_discover_dev_caps(hw, dev_caps);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
kfree(dev_caps);
- return -EIO;
+ return err;
}
+ *pending = 0;
+
if (dev_caps->common_cap.nvm_update_pending_nvm) {
dev_info(dev, "The fw.mgmt flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
}
if (dev_caps->common_cap.nvm_update_pending_orom) {
dev_info(dev, "The fw.undi flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
}
if (dev_caps->common_cap.nvm_update_pending_netlist) {
dev_info(dev, "The fw.netlist flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
}
kfree(dev_caps);
+ return 0;
+}
+
+/**
+ * ice_cancel_pending_update - Cancel any pending update for a component
+ * @pf: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_cancel_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u8 pending;
+ int err;
+
+ err = ice_get_pending_updates(pf, &pending, extack);
+ if (err)
+ return err;
+
/* If the flash_update request is for a specific component, ignore all
* of the other components.
*/
@@ -798,17 +851,107 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
"Canceling previous pending update",
component, 0, 0);
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
+ return err;
}
pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
- err = ice_switch_flash_banks(pf, pending, extack);
+ err = ice_switch_flash_banks(pf, pending, NULL, extack);
+
+ ice_release_nvm(hw);
+
+ /* Since we've canceled the pending update, we no longer know if EMP
+ * reset is restricted.
+ */
+ pf->fw_emp_reset_disabled = false;
+
+ return err;
+}
+
+/**
+ * ice_devlink_flash_update - Write a firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Returns: zero on success or a negative error code on failure.
+ */
+int ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ priv.context.ops = &ice_fwu_ops;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.pf = pf;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+
+ err = ice_cancel_pending_update(pf, NULL, extack);
+ if (err)
+ return err;
+
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return err;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ dev_err(dev, "Firmware image has no record matching this device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
+ } else if (err) {
+ /* Do not set a generic extended ACK message here. A more
+ * specific message may already have been set by one of our
+ * ops.
+ */
+ dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ }
ice_release_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index c6390f6851ff..750574885716 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -4,9 +4,10 @@
#ifndef _ICE_FW_UPDATE_H_
#define _ICE_FW_UPDATE_H_
-int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- u8 preservation, struct netlink_ext_ack *extack);
-int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
- struct netlink_ext_ack *extack);
+int ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a49082485642..d16738a3d3a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -100,6 +100,7 @@
#define PF_SB_ATQT 0x0022FE00
#define PF_SB_ATQT_ATQT_S 0
#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
+#define PF_SB_REM_DEV_CTL 0x002300F0
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
@@ -440,6 +441,10 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
+#define GLHH_ART_CTL 0x000A41D4
+#define GLHH_ART_CTL_ACTIVE_M BIT(0)
+#define GLHH_ART_TIME_H 0x000A41D8
+#define GLHH_ART_TIME_L 0x000A41DC
#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4))
#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4)
#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4))
@@ -452,6 +457,8 @@
#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4))
#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4))
+#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4))
+#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4))
#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
@@ -468,6 +475,8 @@
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
+#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
+#define PFHH_SEM_BUSY_M BIT(0)
#define PFTSYN_SEM 0x00088880
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index adcc9a251595..fc3580167e7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -288,7 +288,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = IIDC_RDMA_ROCE_NAME;
+ adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -335,6 +335,6 @@ int ice_init_rdma(struct ice_pf *pf)
dev_err(dev, "failed to reserve vectors for RDMA\n");
return ret;
}
-
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
return ice_plug_aux_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 09a3297cd63c..0c187cf04fcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -291,7 +291,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
+ int status;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -305,8 +305,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
+ vsi->vsi_num, status);
kfree(ctxt);
}
@@ -570,10 +570,16 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
u32 g_val, b_val;
- /* Flow Director filters are only allocated/assigned to the PF VSI which
- * passes the traffic. The CTRL VSI is only used to add/delete filters
- * so we don't allocate resources to it
+ /* Flow Director filters are only allocated/assigned to the PF VSI or
+ * CHNL VSI which passes the traffic. The CTRL VSI is only used to
+ * add/delete filters so resources are not allocated to it
*/
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EPERM;
+
+ if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
+ vsi->type == ICE_VSI_CHNL))
+ return -EPERM;
/* FD filters from guaranteed pool per VSI */
g_val = pf->hw.func_caps.fd_fltr_guar;
@@ -585,19 +591,56 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
- if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
- return -EPERM;
+ /* PF main VSI gets only 64 FD resources from guaranteed pool
+ * when ADQ is configured.
+ */
+#define ICE_PF_VSI_GFLTR 64
- if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
- return -EPERM;
+ /* determine FD filter resources per VSI from shared(best effort) and
+ * dedicated pool
+ */
+ if (vsi->type == ICE_VSI_PF) {
+ vsi->num_gfltr = g_val;
+ /* if MQPRIO is configured, main VSI doesn't get all FD
+ * resources from guaranteed pool. PF VSI gets 64 FD resources
+ */
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (g_val < ICE_PF_VSI_GFLTR)
+ return -EPERM;
+ /* allow bare minimum entries for PF VSI */
+ vsi->num_gfltr = ICE_PF_VSI_GFLTR;
+ }
- vsi->num_gfltr = g_val / pf->num_alloc_vsi;
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ } else if (vsi->type == ICE_VSI_VF) {
+ vsi->num_gfltr = 0;
- /* each VSI gets same "best_effort" quota */
- vsi->num_bfltr = b_val;
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ } else {
+ struct ice_vsi *main_vsi;
+ int numtc;
- if (vsi->type == ICE_VSI_VF) {
- vsi->num_gfltr = 0;
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return -EPERM;
+
+ if (!main_vsi->all_numtc)
+ return -EINVAL;
+
+ /* figure out ADQ numtc */
+ numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
+
+ /* only one TC but still asking resources for channels,
+ * invalid config
+ */
+ if (numtc < ICE_CHNL_START_TC)
+ return -EPERM;
+
+ g_val -= ICE_PF_VSI_GFLTR;
+ /* channel VSIs gets equal share from guaranteed pool */
+ vsi->num_gfltr = g_val / numtc;
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
@@ -709,15 +752,15 @@ bool ice_is_aux_ena(struct ice_pf *pf)
static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
+ int status;
if (ice_is_safe_mode(pf))
return;
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -942,7 +985,7 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u16 dflt_q, report_q, val;
if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
- vsi->type != ICE_VSI_VF)
+ vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
@@ -1545,8 +1588,8 @@ ice_vsi_cfg_rss_exit:
static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1557,8 +1600,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -1577,8 +1620,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1590,57 +1633,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
@@ -1749,22 +1792,21 @@ ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
- int err = 0;
+ int err;
dev = ice_pf_to_dev(pf);
- status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!status) {
+ err = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
+ if (!err) {
vsi->num_vlan--;
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
+ } else if (err == -ENOENT) {
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
+ vid, vsi->vsi_num, err);
+ err = 0;
} else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
+ vid, vsi->vsi_num, err);
}
return err;
@@ -2105,8 +2147,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -2124,12 +2165,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -2148,8 +2187,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
/* do not allow modifying VLAN stripping when a port VLAN is configured
* on this VSI
@@ -2177,12 +2215,10 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
- ena, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -2324,10 +2360,9 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- ice_stat_str(status),
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ status, ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -2405,11 +2440,11 @@ clear_reg_idx:
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
- enum ice_sw_fwd_act_type act);
+ int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
@@ -2428,9 +2463,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
+ dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, ice_stat_str(status));
+ vsi->vsi_num, status);
}
/**
@@ -2450,7 +2485,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
struct ice_port_info *port_info;
struct ice_pf *pf = vsi->back;
u32 agg_node_id_start = 0;
- enum ice_status status;
+ int status;
/* create (as needed) scheduler aggregator node and move VSI into
* corresponding aggregator node
@@ -2576,7 +2611,6 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
struct ice_vsi *vsi;
int ret, i;
@@ -2725,11 +2759,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto unroll_clear_rings;
}
@@ -3036,8 +3070,8 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
- enum ice_status err;
struct ice_pf *pf;
+ int err;
if (!vsi->back)
return -ENODEV;
@@ -3273,7 +3307,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_vsi_type vtype;
- enum ice_status status;
struct ice_pf *pf;
int ret, i;
@@ -3421,14 +3454,14 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
/* If MQPRIO is set, means channel code path, hence for main
* VSI's, use TC as 1
*/
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
if (init_vsi) {
ret = -EIO;
goto err_vectors;
@@ -3663,7 +3696,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctx;
- enum ice_status status;
struct device *dev;
int i, ret = 0;
u8 num_tc = 0;
@@ -3705,25 +3737,22 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
+ ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (ret) {
dev_info(dev, "Failed VSI Update\n");
- ret = -EIO;
goto out;
}
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
- max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed TC config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- ret = -EIO;
+ if (ret) {
+ dev_err(dev, "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, ret);
goto out;
}
ice_vsi_update_q_map(vsi, ctx);
@@ -3776,39 +3805,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
}
/**
- * ice_status_to_errno - convert from enum ice_status to Linux errno
- * @err: ice_status value to convert
- */
-int ice_status_to_errno(enum ice_status err)
-{
- switch (err) {
- case ICE_SUCCESS:
- return 0;
- case ICE_ERR_DOES_NOT_EXIST:
- return -ENOENT;
- case ICE_ERR_OUT_OF_RANGE:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_EMPTY:
- case ICE_ERR_AQ_FW_CRITICAL:
- return -EIO;
- case ICE_ERR_PARAM:
- case ICE_ERR_INVAL_SIZE:
- return -EINVAL;
- case ICE_ERR_NO_MEMORY:
- return -ENOMEM;
- case ICE_ERR_MAX_LIMIT:
- return -EAGAIN;
- case ICE_ERR_RESET_ONGOING:
- return -EBUSY;
- case ICE_ERR_AQ_FULL:
- return -ENOSPC;
- default:
- return -EINVAL;
- }
-}
-
-/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free
*
@@ -3849,8 +3845,8 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
*/
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
- enum ice_status status;
struct device *dev;
+ int status;
if (!sw || !vsi)
return -EINVAL;
@@ -3873,9 +3869,9 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EIO;
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return status;
}
sw->dflt_vsi = vsi;
@@ -3895,8 +3891,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
int ice_clear_dflt_vsi(struct ice_sw *sw)
{
struct ice_vsi *dflt_vsi;
- enum ice_status status;
struct device *dev;
+ int status;
if (!sw)
return -EINVAL;
@@ -3912,8 +3908,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
- dflt_vsi->vsi_num, ice_stat_str(status));
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
+ dflt_vsi->vsi_num, status);
return -EIO;
}
@@ -3987,8 +3983,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi)
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4014,7 +4010,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
min_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
@@ -4026,7 +4022,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
if (status) {
dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
@@ -4048,8 +4044,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4075,7 +4071,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
max_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
@@ -4087,7 +4083,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
if (status) {
dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
@@ -4107,7 +4103,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_port_info *pi = vsi->port_info;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
@@ -4119,16 +4115,16 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
* a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE
*/
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
- dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6c803407b67d..b2ed189527d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -103,15 +103,11 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
-
-int ice_status_to_errno(enum ice_status err);
-
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_aux_ena(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 73c61cdb036f..30814435f779 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -155,7 +155,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- enum ice_status status;
struct ice_vsi *vsi;
u8 *perm_addr;
@@ -164,11 +163,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
return -EINVAL;
perm_addr = vsi->port_info->mac.perm_addr;
- status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
- if (status)
- return -EIO;
-
- return 0;
+ return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
}
/**
@@ -237,36 +232,43 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
- * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * ice_set_promisc - Enable promiscuous mode for a given PF
* @vsi: the VSI being configured
* @promisc_m: mask of promiscuous config bits
- * @set_promisc: enable or disable promisc flag request
*
*/
-static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status = 0;
+ int status;
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- set_promisc);
- } else {
- if (set_promisc)
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- }
+ if (vsi->num_vlan > 1)
+ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ return status;
+}
- if (status)
- return -EIO;
+/**
+ * ice_clear_promisc - Disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ *
+ */
+static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
+{
+ int status;
- return 0;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->num_vlan > 1)
+ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+ return status;
}
/**
@@ -282,10 +284,9 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status = 0;
u32 changed_flags = 0;
u8 promisc_m;
- int err = 0;
+ int err;
if (!vsi->netdev)
return -EINVAL;
@@ -315,25 +316,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
- if (status) {
+ if (err) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
- if (status == ICE_ERR_NO_MEMORY) {
- err = -ENOMEM;
+ if (err == -ENOMEM)
goto out;
- }
}
/* Add MAC addresses in the sync list */
- status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
*/
- if (status && status != ICE_ERR_ALREADY_EXISTS) {
+ if (err && err != -EEXIST) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
@@ -346,10 +345,10 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
- err = -EIO;
goto out;
}
}
+ err = 0;
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
@@ -358,7 +357,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
else
promisc_m = ICE_MCAST_PROMISC_BITS;
- err = ice_cfg_promisc(vsi, promisc_m, true);
+ err = ice_set_promisc(vsi, promisc_m);
if (err) {
netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
@@ -372,7 +371,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
else
promisc_m = ICE_MCAST_PROMISC_BITS;
- err = ice_cfg_promisc(vsi, promisc_m, false);
+ err = ice_clear_promisc(vsi, promisc_m);
if (err) {
netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
vsi->vsi_num);
@@ -396,6 +395,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ err = 0;
ice_cfg_vlan_pruning(vsi, false);
}
} else {
@@ -472,6 +472,25 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
+ * ice_clear_sw_switch_recipes - clear switch recipes
+ * @pf: board private structure
+ *
+ * Mark switch recipes as not created in sw structures. There are cases where
+ * rules (especially advanced rules) need to be restored, either re-read from
+ * hardware or added again. For example after the reset. 'recp_created' flag
+ * prevents from doing that and need to be cleared upfront.
+ */
+static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
+{
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ recp = pf->hw.switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
+ recp[i].recp_created = false;
+}
+
+/**
* ice_prepare_for_reset - prep for reset
* @pf: board private structure
* @reset_type: reset type requested
@@ -501,6 +520,11 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ if (reset_type != ICE_RESET_PFR)
+ ice_clear_sw_switch_recipes(pf);
+ }
+
/* release ADQ specific HW and SW resources */
vsi = ice_get_main_vsi(pf);
if (!vsi)
@@ -539,7 +563,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
+ ice_ptp_prepare_for_reset(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -695,12 +719,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
const char *an_advertised;
- enum ice_status status;
const char *fec_req;
const char *speed;
const char *fec;
const char *fc;
const char *an;
+ int status;
if (!vsi)
return;
@@ -1020,10 +1044,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
- enum ice_status status;
struct ice_vsi *vsi;
u16 old_link_speed;
bool old_link;
+ int status;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
@@ -1036,8 +1060,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
status = ice_update_link_info(pi);
if (status)
- dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
- pi->lport, ice_stat_str(status),
+ dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
+ pi->lport, status,
ice_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1063,6 +1087,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
+ if (!ice_is_e810(&pf->hw))
+ ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -1407,15 +1434,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return 0;
do {
- enum ice_status ret;
u16 opcode;
+ int ret;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
- if (ret == ICE_ERR_AQ_NO_WORK)
+ if (ret == -EALREADY)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %s\n", qtype,
- ice_stat_str(ret));
+ dev_err(dev, "%s Receive Queue event error %d\n", qtype,
+ ret);
break;
}
@@ -1866,19 +1893,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
- NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
+ pcaps, NULL);
- if (status) {
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto out;
}
@@ -1977,8 +2002,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EIO;
@@ -1988,14 +2012,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
return -ENOMEM;
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto err_out;
}
@@ -2049,8 +2072,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = vsi->back;
- enum ice_status status;
- int err = 0;
+ int err;
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
@@ -2070,12 +2092,11 @@ static int ice_configure_phy(struct ice_vsi *vsi)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2089,15 +2110,14 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Use PHY topology as baseline for configuration */
memset(pcaps, 0, sizeof(*pcaps));
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2150,12 +2170,10 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Enable link and link update */
cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
- status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
- if (status) {
- dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
- }
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
+ if (err)
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, err);
kfree(cfg);
done:
@@ -2549,9 +2567,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.vsi_map_offset = vsi->alloc_txq,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
- enum ice_status status;
struct device *dev;
int i, v_idx;
+ int status;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2605,8 +2623,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
- ice_stat_str(status));
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
+ status);
goto clear_xdp_rings;
}
@@ -3520,7 +3538,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
- int status = 0;
+ int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
@@ -3533,10 +3551,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
INIT_LIST_HEAD(&vsi->ch_list);
status = ice_cfg_netdev(vsi);
- if (status) {
- status = -ENODEV;
+ if (status)
goto unroll_vsi_setup;
- }
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
@@ -3560,14 +3576,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (status) {
dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
vsi->vsi_num, status);
- status = -EINVAL;
goto unroll_napi_add;
}
status = ice_init_mac_fltr(pf);
if (status)
goto free_cpu_rx_map;
- return status;
+ return 0;
free_cpu_rx_map:
ice_free_cpu_rx_rmap(vsi);
@@ -4023,8 +4038,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!vsi)
return;
@@ -4054,9 +4069,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4069,108 +4083,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
- * @status: status of package load
+ * @state: state of package load
*/
-static void
-ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
+static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
{
- struct ice_pf *pf = (struct ice_pf *)hw->back;
- struct device *dev = ice_pf_to_dev(pf);
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
- switch (*status) {
- case ICE_SUCCESS:
- /* The package download AdminQ command returned success because
- * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
- * already a package loaded on the device.
- */
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name,
- sizeof(hw->pkg_name))) {
- if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- else
- dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
- hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
- *status = ICE_ERR_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft,
- hw->pkg_name,
- hw->pkg_ver.major,
- hw->pkg_ver.minor,
- hw->pkg_ver.update,
- hw->pkg_ver.draft);
- } else {
- dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
- *status = ICE_ERR_NOT_SUPPORTED;
- }
+ dev = ice_pf_to_dev(pf);
+
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
- case ICE_ERR_FW_DDP_MISMATCH:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft,
+ hw->pkg_name,
+ hw->pkg_ver.major,
+ hw->pkg_ver.minor,
+ hw->pkg_ver.update,
+ hw->pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_FW_MISMATCH:
dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
break;
- case ICE_ERR_BUF_TOO_SHORT:
- case ICE_ERR_CFG:
+ case ICE_DDP_PKG_INVALID_FILE:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
- case ICE_ERR_NOT_SUPPORTED:
- /* Package File version not supported */
- if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
- else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
break;
- case ICE_ERR_AQ_ERROR:
- switch (hw->pkg_dwnld_status) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_ESVN:
- dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
- /* poll for reset to complete */
- if (ice_check_reset(hw))
- dev_err(dev, "Error resetting device. Please reload the driver\n");
- return;
- default:
- break;
- }
- fallthrough;
+ case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_LOAD_ERROR:
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
+ break;
+ case ICE_DDP_PKG_ERR:
default:
- dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
- *status);
+ dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
break;
}
}
@@ -4186,24 +4172,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
- enum ice_status status = ICE_ERR_PARAM;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
if (firmware && !hw->pkg_copy) {
- status = ice_copy_and_init_pkg(hw, firmware->data,
- firmware->size);
- ice_log_pkg_init(hw, &status);
+ state = ice_copy_and_init_pkg(hw, firmware->data,
+ firmware->size);
+ ice_log_pkg_init(hw, state);
} else if (!firmware && hw->pkg_copy) {
/* Reload package during rebuild after CORER/GLOBR reset */
- status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
- ice_log_pkg_init(hw, &status);
+ state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ ice_log_pkg_init(hw, state);
} else {
dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
- if (status) {
+ if (!ice_is_init_pkg_successful(state)) {
/* Safe Mode */
clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
return;
@@ -4234,9 +4220,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
* ice_send_version - update firmware with driver version
* @pf: PF struct
*
- * Returns ICE_SUCCESS on success, else error code
+ * Returns 0 on success, else error code
*/
-static enum ice_status ice_send_version(struct ice_pf *pf)
+static int ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
@@ -4526,7 +4512,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -4721,6 +4706,10 @@ probe_done:
if (err)
goto err_netdev_reg;
+ err = ice_devlink_register_params(pf);
+ if (err)
+ goto err_netdev_reg;
+
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
if (ice_is_aux_ena(pf)) {
@@ -4728,7 +4717,7 @@ probe_done:
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
err = -ENOMEM;
- goto err_netdev_reg;
+ goto err_devlink_reg_param;
}
err = ice_init_rdma(pf);
@@ -4747,6 +4736,8 @@ probe_done:
err_init_aux_unroll:
pf->adev = NULL;
ida_free(&ice_aux_ida, pf->aux_idx);
+err_devlink_reg_param:
+ ice_devlink_unregister_params(pf);
err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
@@ -4803,9 +4794,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 mac_addr[ETH_ALEN];
struct ice_vsi *vsi;
+ int status;
u8 flags;
if (!pf->wol_ena)
@@ -4827,9 +4818,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
- dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -4861,6 +4851,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_unplug_aux_dev(pf);
if (pf->aux_idx >= 0)
ida_free(&ice_aux_ida, pf->aux_idx);
+ ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -5367,11 +5358,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
- enum ice_status status;
u8 old_mac[ETH_ALEN];
u8 flags = 0;
- int err = 0;
u8 *mac;
+ int err;
mac = (u8 *)addr->sa_data;
@@ -5403,22 +5393,22 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
- status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
+ if (err && err != -ENOENT) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
/* Add filter for new MAC. If filter exists, return success */
- status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS)
+ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (err == -EEXIST)
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- else if (status)
+ else if (err)
/* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
@@ -5437,10 +5427,10 @@ err_update_filters:
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
- status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
- if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
- mac, ice_stat_str(status));
+ err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
+ if (err) {
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, err);
}
return 0;
}
@@ -5482,8 +5472,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
u16 q_handle;
+ int status;
u8 tc;
/* Validate maxrate requested is within permitted range */
@@ -5503,13 +5493,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
else
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
- if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
- ice_stat_str(status));
- return -EIO;
- }
+ if (status)
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
- return 0;
+ return status;
}
/**
@@ -5879,6 +5867,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ if (!ice_is_e810(&pf->hw))
+ ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
/* clear this now, and the first stats read will be used as baseline */
@@ -6269,15 +6259,18 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
+ *
+ * Caller of this function is expected to set the vsi->state ICE_DOWN bit
*/
int ice_down(struct ice_vsi *vsi)
{
int i, tx_err, rx_err, link_err = 0;
- /* Caller of this function is expected to set the
- * vsi->state ICE_DOWN bit
- */
+ WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
+
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (!ice_is_e810(&vsi->back->hw))
+ ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
@@ -6543,7 +6536,6 @@ static void ice_vsi_release_all(struct ice_pf *pf)
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int i, err;
ice_for_each_vsi(pf, i) {
@@ -6561,12 +6553,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
}
/* replay filters for the VSI */
- status = ice_replay_vsi(&pf->hw, vsi->idx);
- if (status) {
- dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
- ice_stat_str(status), vsi->idx,
- ice_vsi_type_str(type));
- return -EIO;
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
+ return err;
}
/* Re-map HW VSI number, using VSI handle that has been
@@ -6629,7 +6620,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status ret;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6637,10 +6627,17 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
- ret = ice_init_all_ctrlq(hw);
- if (ret) {
- dev_err(dev, "control queues init failed %s\n",
- ice_stat_str(ret));
+ if (reset_type == ICE_RESET_EMPR) {
+ /* If an EMP reset has occurred, any previously pending flash
+ * update will have completed. We no longer know whether or
+ * not the NVM update EMP reset is restricted.
+ */
+ pf->fw_emp_reset_disabled = false;
+ }
+
+ err = ice_init_all_ctrlq(hw);
+ if (err) {
+ dev_err(dev, "control queues init failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6654,10 +6651,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_load_pkg(NULL, pf);
}
- ret = ice_clear_pf_cfg(hw);
- if (ret) {
- dev_err(dev, "clear PF configuration failed %s\n",
- ice_stat_str(ret));
+ err = ice_clear_pf_cfg(hw);
+ if (err) {
+ dev_err(dev, "clear PF configuration failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6669,21 +6665,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_clear_pxe_mode(hw);
- ret = ice_init_nvm(hw);
- if (ret) {
- dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
+ err = ice_init_nvm(hw);
+ if (err) {
+ dev_err(dev, "ice_init_nvm failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_get_caps(hw);
- if (ret) {
- dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ err = ice_get_caps(hw);
+ if (err) {
+ dev_err(dev, "ice_get_caps failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
- if (ret) {
- dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
+ err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (err) {
+ dev_err(dev, "set_mac_cfg failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6721,7 +6717,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ ice_ptp_reset(pf);
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
@@ -6730,6 +6726,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ /* configure PTP timestamping after VSI rebuild */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_cfg_timestamp(pf, false);
+
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -6766,10 +6766,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
- ret = ice_send_version(pf);
- if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
- ice_stat_str(ret));
+ err = ice_send_version(pf);
+ if (err) {
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
+ err);
goto err_vsi_rebuild;
}
@@ -6950,78 +6950,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err)
}
/**
- * ice_stat_str - convert status err code to a string
- * @stat_err: the status error code to convert
- */
-const char *ice_stat_str(enum ice_status stat_err)
-{
- switch (stat_err) {
- case ICE_SUCCESS:
- return "OK";
- case ICE_ERR_PARAM:
- return "ICE_ERR_PARAM";
- case ICE_ERR_NOT_IMPL:
- return "ICE_ERR_NOT_IMPL";
- case ICE_ERR_NOT_READY:
- return "ICE_ERR_NOT_READY";
- case ICE_ERR_NOT_SUPPORTED:
- return "ICE_ERR_NOT_SUPPORTED";
- case ICE_ERR_BAD_PTR:
- return "ICE_ERR_BAD_PTR";
- case ICE_ERR_INVAL_SIZE:
- return "ICE_ERR_INVAL_SIZE";
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- return "ICE_ERR_DEVICE_NOT_SUPPORTED";
- case ICE_ERR_RESET_FAILED:
- return "ICE_ERR_RESET_FAILED";
- case ICE_ERR_FW_API_VER:
- return "ICE_ERR_FW_API_VER";
- case ICE_ERR_NO_MEMORY:
- return "ICE_ERR_NO_MEMORY";
- case ICE_ERR_CFG:
- return "ICE_ERR_CFG";
- case ICE_ERR_OUT_OF_RANGE:
- return "ICE_ERR_OUT_OF_RANGE";
- case ICE_ERR_ALREADY_EXISTS:
- return "ICE_ERR_ALREADY_EXISTS";
- case ICE_ERR_NVM:
- return "ICE_ERR_NVM";
- case ICE_ERR_NVM_CHECKSUM:
- return "ICE_ERR_NVM_CHECKSUM";
- case ICE_ERR_BUF_TOO_SHORT:
- return "ICE_ERR_BUF_TOO_SHORT";
- case ICE_ERR_NVM_BLANK_MODE:
- return "ICE_ERR_NVM_BLANK_MODE";
- case ICE_ERR_IN_USE:
- return "ICE_ERR_IN_USE";
- case ICE_ERR_MAX_LIMIT:
- return "ICE_ERR_MAX_LIMIT";
- case ICE_ERR_RESET_ONGOING:
- return "ICE_ERR_RESET_ONGOING";
- case ICE_ERR_HW_TABLE:
- return "ICE_ERR_HW_TABLE";
- case ICE_ERR_DOES_NOT_EXIST:
- return "ICE_ERR_DOES_NOT_EXIST";
- case ICE_ERR_FW_DDP_MISMATCH:
- return "ICE_ERR_FW_DDP_MISMATCH";
- case ICE_ERR_AQ_ERROR:
- return "ICE_ERR_AQ_ERROR";
- case ICE_ERR_AQ_TIMEOUT:
- return "ICE_ERR_AQ_TIMEOUT";
- case ICE_ERR_AQ_FULL:
- return "ICE_ERR_AQ_FULL";
- case ICE_ERR_AQ_NO_WORK:
- return "ICE_ERR_AQ_NO_WORK";
- case ICE_ERR_AQ_EMPTY:
- return "ICE_ERR_AQ_EMPTY";
- case ICE_ERR_AQ_FW_CRITICAL:
- return "ICE_ERR_AQ_FW_CRITICAL";
- }
-
- return "ICE_ERR_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7033,7 +6961,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7044,14 +6972,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_set_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7064,20 +6989,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7092,7 +7014,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7103,14 +7025,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_get_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7123,20 +7042,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7177,8 +7093,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
vsi_props = &vsi->info;
@@ -7196,12 +7111,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
- bmode, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
+ bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -7233,7 +7146,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
struct ice_pf *pf = np->vsi->back;
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct ice_sw *pf_sw;
int rem, v, err = 0;
@@ -7267,14 +7179,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/* Update the unicast switch filter rules for the corresponding
* switch of the netdev
*/
- status = ice_update_sw_rule_bridge_mode(hw);
- if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
- mode, ice_stat_str(status),
+ err = ice_update_sw_rule_bridge_mode(hw);
+ if (err) {
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
+ mode, err,
ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
- return -EIO;
+ return err;
}
pf_sw->bridge_mode = mode;
@@ -7552,6 +7464,67 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
}
/**
+ * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
+ * @pf: ptr to PF device
+ * @vsi: ptr to VSI
+ */
+static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ bool added = false;
+ struct ice_hw *hw;
+ int flow;
+
+ if (!(vsi->num_gfltr || vsi->num_bfltr))
+ return -EINVAL;
+
+ hw = &pf->hw;
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ struct ice_fd_hw_prof *prof;
+ int tun, status;
+ u64 entry_h;
+
+ if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
+ hw->fdir_prof[flow]->cnt))
+ continue;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ enum ice_flow_priority prio;
+ u64 prof_id;
+
+ /* add this VSI to FDir profile for this flow */
+ prio = ICE_FLOW_PRIO_NORMAL;
+ prof = hw->fdir_prof[flow];
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ prof->vsi_h[0], vsi->idx,
+ prio, prof->fdir_seg[tun],
+ &entry_h);
+ if (status) {
+ dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
+ vsi->idx, flow);
+ continue;
+ }
+
+ prof->entry_h[prof->cnt][tun] = entry_h;
+ }
+
+ /* store VSI for filter replay and delete */
+ prof->vsi_h[prof->cnt] = vsi->idx;
+ prof->cnt++;
+
+ added = true;
+ dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
+ flow);
+ }
+
+ if (!added)
+ dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
+
+ return 0;
+}
+
+/**
* ice_add_channel - add a channel by adding VSI
* @pf: ptr to PF device
* @sw_id: underlying HW switching element ID
@@ -7575,6 +7548,8 @@ static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
return -EINVAL;
}
+ ice_add_vsi_to_fdir(pf, vsi);
+
ch->sw_id = sw_id;
ch->vsi_num = vsi->vsi_num;
ch->info.mapping_flags = vsi->info.mapping_flags;
@@ -7875,6 +7850,15 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
if (rem_fltr)
ice_rem_all_chnl_fltrs(pf);
+ /* remove ntuple filters since queue configuration is being changed */
+ if (vsi->netdev->features & NETIF_F_NTUPLE) {
+ struct ice_hw *hw = &pf->hw;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ ice_fdir_del_all_fltrs(vsi);
+ mutex_unlock(&hw->fdir_fltr_lock);
+ }
+
/* perform cleanup for channels if they exist */
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
struct ice_vsi *ch_vsi;
@@ -7905,6 +7889,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
}
}
+ /* Release FD resources for the channel VSI */
+ ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
+
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
@@ -8449,7 +8436,6 @@ int ice_open_internal(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
- enum ice_status status;
int err;
if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
@@ -8460,11 +8446,10 @@ int ice_open_internal(struct net_device *netdev)
netif_carrier_off(netdev);
pi = vsi->port_info;
- status = ice_update_link_info(pi);
- if (status) {
- netdev_err(netdev, "Failed to get link info, error %s\n",
- ice_stat_str(status));
- return -EIO;
+ err = ice_update_link_info(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to get link info, error %d\n", err);
+ return err;
}
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index fee37a5844cf..4eb0599714f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include <linux/vmalloc.h>
+
#include "ice_common.h"
/**
@@ -16,7 +18,7 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static enum ice_status
+static int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
@@ -27,7 +29,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
@@ -60,21 +62,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
- enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
+ int status;
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
do {
@@ -119,7 +121,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -131,7 +133,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
@@ -158,8 +160,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*
* Erase the NVM sector using the admin queue commands (0x0702)
*/
-enum ice_status
-ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
@@ -184,12 +185,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-static enum ice_status
-ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
- enum ice_status status;
__le16 data_local;
+ int status;
/* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
* Shadow RAM sector restrictions necessary when reading from the NVM.
@@ -210,8 +210,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->flash.blank_nvm_mode)
return 0;
@@ -318,18 +317,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization.
*/
-static enum ice_status
+static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length)
{
- enum ice_status status;
+ int status;
u32 start;
start = ice_get_flash_bank_offset(hw, bank, module);
if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module);
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
status = ice_acquire_nvm(hw, ICE_RES_READ);
@@ -353,11 +352,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -377,7 +376,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
@@ -392,11 +391,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
*
* Read a word from the specified netlist bank.
*/
-static enum ice_status
+static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -414,9 +413,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -438,13 +437,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- enum ice_status status;
u16 pfa_len, pfa_ptr;
u16 next_tlv;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) {
@@ -482,7 +481,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len;
return 0;
}
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
@@ -490,7 +489,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -501,12 +500,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*
* Reads the part number string from the NVM.
*/
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
- enum ice_status status;
u16 pba_word, pba_size;
+ int status;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
@@ -525,7 +523,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Subtract one to get PBA word count (PBA Size word is included in
@@ -534,7 +532,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
for (i = 0; i < pba_size; i++) {
@@ -561,11 +559,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the NVM info structure.
*/
-static enum ice_status
+static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
u16 eetrack_lo, eetrack_hi, ver;
- enum ice_status status;
+ int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
@@ -601,7 +599,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated.
*/
-enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
}
@@ -615,49 +613,73 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*/
-static enum ice_status
+static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
- struct ice_orom_civd_info tmp;
- enum ice_status status;
+ u8 *orom_data;
+ int status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
* A simple modulo 256 sum of all of the bytes of the structure must
* equal 0.
+ *
+ * The exact location is unknown and varies between images but is
+ * usually somewhere in the middle of the bank. We need to scan the
+ * Option ROM bank to locate it.
+ *
+ * It's significantly faster to read the entire Option ROM up front
+ * using the maximum page size, than to read each possible location
+ * with a separate firmware command.
*/
+ orom_data = vzalloc(hw->flash.banks.orom_size);
+ if (!orom_data)
+ return -ENOMEM;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, hw->flash.banks.orom_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+ return status;
+ }
+
+ /* Scan the memory buffer to locate the CIVD data section */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ struct ice_orom_civd_info *tmp;
u8 sum = 0, i;
- status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
- offset, (u8 *)&tmp, sizeof(tmp));
- if (status) {
- ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n");
- return status;
- }
+ tmp = (struct ice_orom_civd_info *)&orom_data[offset];
/* Skip forward until we find a matching signature */
- if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0)
+ if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0)
continue;
+ ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n",
+ offset);
+
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(tmp); i++)
+ for (i = 0; i < sizeof(*tmp); i++)
/* cppcheck-suppress objectIndex */
- sum += ((u8 *)&tmp)[i];
+ sum += ((u8 *)tmp)[i];
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
- return ICE_ERR_NVM;
+ goto err_invalid_checksum;
}
- *civd = tmp;
+ *civd = *tmp;
+ vfree(orom_data);
return 0;
}
- return ICE_ERR_NVM;
+ ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
+
+err_invalid_checksum:
+ vfree(orom_data);
+ return -EIO;
}
/**
@@ -669,12 +691,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* Read Option ROM version and security revision from the Option ROM flash
* section.
*/
-static enum ice_status
+static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{
struct ice_orom_civd_info civd;
- enum ice_status status;
u32 combo_ver;
+ int status;
status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
@@ -700,7 +722,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has
* not yet been activated.
*/
-enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
}
@@ -715,13 +737,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure.
*/
-static enum ice_status
+static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist)
{
u16 module_id, length, node_count, i;
- enum ice_status status;
u16 *id_blk;
+ int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status)
@@ -730,7 +752,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
@@ -741,7 +763,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
@@ -751,7 +773,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
if (!id_blk)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
@@ -791,7 +813,7 @@ exit_error:
* extract version data of a pending flash update in order to display the
* version data.
*/
-enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
@@ -804,10 +826,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
-static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+static int ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -819,7 +841,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
- if (status == ICE_ERR_AQ_ERROR &&
+ if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
@@ -859,10 +881,9 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -891,10 +912,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -917,12 +937,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read
* from the active module.
*/
-static enum ice_status
-ice_determine_active_flash_banks(struct ice_hw *hw)
+static int ice_determine_active_flash_banks(struct ice_hw *hw)
{
struct ice_bank_info *banks = &hw->flash.banks;
- enum ice_status status;
u16 ctrl_word;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) {
@@ -933,7 +952,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
@@ -997,12 +1016,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
-enum ice_status ice_init_nvm(struct ice_hw *hw)
+int ice_init_nvm(struct ice_hw *hw)
{
struct ice_flash_info *flash = &hw->flash;
- enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
+ int status;
/* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
@@ -1021,7 +1040,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */
flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
- return ICE_ERR_NVM_BLANK_MODE;
+ return -EIO;
}
status = ice_discover_flash_size(hw);
@@ -1060,11 +1079,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
*
* Verify NVM PFA checksum validity (0x0706)
*/
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1080,7 +1099,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
- status = ICE_ERR_NVM_CHECKSUM;
+ status = -EIO;
return status;
}
@@ -1088,22 +1107,35 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
/**
* ice_nvm_write_activate
* @hw: pointer to the HW struct
- * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
*
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
+ *
+ * cmd_flags controls which banks to activate, and the preservation level to
+ * use when activating the NVM bank.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
*/
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
+ int err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = cmd_flags;
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
}
/**
@@ -1113,7 +1145,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
* Update empr (0x0709). This command allows SW to
* request an EMPR to activate new FW.
*/
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -1136,7 +1168,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
* as part of the NVM update as the first cmd in the flow.
*/
-enum ice_status
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
@@ -1144,7 +1176,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc;
if (length != 0 && !data)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pkg_data;
@@ -1173,17 +1205,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
* the TransferFlag is set to End or StartAndEnd.
*/
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || !comp_response || !comp_response_code)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pass_comp_tbl;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index c6f05f43d593..856d1ad4398b 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -12,38 +12,34 @@ struct ice_orom_civd_info {
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
} __packed;
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
-enum ice_status
-ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
-enum ice_status
-ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
-enum ice_status
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
+int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+int ice_init_nvm(struct ice_hw *hw);
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
-enum ice_status
+int ice_nvm_validate_checksum(struct ice_hw *hw);
+int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags);
+int ice_aq_nvm_update_empr(struct ice_hw *hw);
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index bf7247c6f58e..ae291d442539 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,8 @@
#define E810_OUT_PROP_DELAY_NS 1
+#define UNKNOWN_INCVAL_E822 0x100000000ULL
+
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -281,6 +283,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
else
val &= ~PFINT_OICR_TSYN_TX_M;
wr32(&pf->hw, PFINT_OICR_ENA, val);
+
+ pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
}
/**
@@ -303,6 +307,9 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+
+ pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
}
/**
@@ -313,18 +320,10 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
* This function will configure timestamping during PTP initialization
* and deinitialization
*/
-static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
{
ice_set_tx_tstamp(pf, ena);
ice_set_rx_tstamp(pf, ena);
-
- if (ena) {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
- } else {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- }
}
/**
@@ -682,6 +681,406 @@ static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
}
/**
+ * ice_base_incval - Get base timer increment value
+ * @pf: Board private structure
+ *
+ * Look up the base timer increment value for this device. The base increment
+ * value is used to define the nominal clock tick rate. This increment value
+ * is programmed during device initialization. It is also used as the basis
+ * for calculating adjustments using scaled_ppm.
+ */
+static u64 ice_base_incval(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u64 incval;
+
+ if (ice_is_e810(hw))
+ incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else
+ incval = UNKNOWN_INCVAL_E822;
+
+ dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
+ incval);
+
+ return incval;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_quad - Reset timestamp memory for one quad
+ * @pf: The PF private data structure
+ * @quad: The quad (0-4)
+ */
+static void ice_ptp_reset_ts_memory_quad(struct ice_pf *pf, int quad)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+}
+
+/**
+ * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
+ * @port: PTP port for which Tx FIFO is checked
+ */
+static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
+{
+ int quad = port->port_num / ICE_PORTS_PER_QUAD;
+ int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u32 val, phy_sts;
+ int err;
+
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+
+ if (port->tx_fifo_busy_cnt == FIFO_OK)
+ return 0;
+
+ /* need to read FIFO state */
+ if (offs == 0 || offs == 1)
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ &val);
+ else
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ &val);
+
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
+ port->port_num, err);
+ return err;
+ }
+
+ if (offs & 0x1)
+ phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ else
+ phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+
+ if (phy_sts & FIFO_EMPTY) {
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ port->tx_fifo_busy_cnt++;
+
+ dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
+ port->tx_fifo_busy_cnt, port->port_num);
+
+ if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "Port %d Tx FIFO still not empty; resetting quad %d\n",
+ port->port_num, quad);
+ ice_ptp_reset_ts_memory_quad(pf, quad);
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+/**
+ * ice_ptp_check_tx_offset_valid - Check if the Tx PHY offset is valid
+ * @port: the PTP port to check
+ *
+ * Checks whether the Tx offset for the PHY associated with this port is
+ * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
+ * not.
+ */
+static int ice_ptp_check_tx_offset_valid(struct ice_ptp_port *port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(port);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int err;
+
+ err = ice_ptp_check_tx_fifo(port);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_TX_OV_STATUS,
+ &val);
+ if (err) {
+ dev_err(dev, "Failed to read TX_OV_STATUS for port %d, err %d\n",
+ port->port_num, err);
+ return -EAGAIN;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_check_rx_offset_valid - Check if the Rx PHY offset is valid
+ * @port: the PTP port to check
+ *
+ * Checks whether the Rx offset for the PHY associated with this port is
+ * valid. Returns 0 if the offset is valid, and a non-zero error code if it is
+ * not.
+ */
+static int ice_ptp_check_rx_offset_valid(struct ice_ptp_port *port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(port);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+ u32 val;
+
+ err = ice_read_phy_reg_e822(hw, port->port_num, P_REG_RX_OV_STATUS,
+ &val);
+ if (err) {
+ dev_err(dev, "Failed to read RX_OV_STATUS for port %d, err %d\n",
+ port->port_num, err);
+ return err;
+ }
+
+ if (!(val & P_REG_RX_OV_STATUS_OV_M))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_check_offset_valid - Check port offset valid bit
+ * @port: Port for which offset valid bit is checked
+ *
+ * Returns 0 if both Tx and Rx offset are valid, and -EAGAIN if one of the
+ * offset is not ready.
+ */
+static int ice_ptp_check_offset_valid(struct ice_ptp_port *port)
+{
+ int tx_err, rx_err;
+
+ /* always check both Tx and Rx offset validity */
+ tx_err = ice_ptp_check_tx_offset_valid(port);
+ rx_err = ice_ptp_check_rx_offset_valid(port);
+
+ if (tx_err || rx_err)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_wait_for_offset_valid - Check for valid Tx and Rx offsets
+ * @work: Pointer to the kthread_work structure for this task
+ *
+ * Check whether both the Tx and Rx offsets are valid for enabling the vernier
+ * calibration.
+ *
+ * Once we have valid offsets from hardware, update the total Tx and Rx
+ * offsets, and exit bypass mode. This enables more precise timestamps using
+ * the extra data measured during the vernier calibration process.
+ */
+static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
+{
+ struct ice_ptp_port *port;
+ int err;
+ struct device *dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+
+ port = container_of(work, struct ice_ptp_port, ov_work.work);
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+ dev = ice_pf_to_dev(pf);
+
+ if (ice_ptp_check_offset_valid(port)) {
+ /* Offsets not ready yet, try again later */
+ kthread_queue_delayed_work(pf->ptp.kworker,
+ &port->ov_work,
+ msecs_to_jiffies(100));
+ return;
+ }
+
+ /* Offsets are valid, so it is safe to exit bypass mode */
+ err = ice_phy_exit_bypass_e822(hw, port->port_num);
+ if (err) {
+ dev_warn(dev, "Failed to exit bypass mode for PHY port %u, err %d\n",
+ port->port_num, err);
+ return;
+ }
+}
+
+/**
+ * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
+ * @ptp_port: PTP port to stop
+ */
+static int
+ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ err = ice_stop_phy_timer_e822(hw, port, true);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
+ * @ptp_port: PTP port for which the PHY start is set
+ *
+ * Start the PHY timestamping block, and initiate Vernier timestamping
+ * calibration. If timestamping cannot be calibrated (such as if link is down)
+ * then disable the timestamping block instead.
+ */
+static int
+ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ if (!ptp_port->link_up)
+ return ice_ptp_port_phy_stop(ptp_port);
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ /* temporarily disable Tx timestamps while calibrating PHY offset */
+ ptp_port->tx.calibrating = true;
+ ptp_port->tx_fifo_busy_cnt = 0;
+
+ /* Start the PHY timer in bypass mode */
+ err = ice_start_phy_timer_e822(hw, port, true);
+ if (err)
+ goto out_unlock;
+
+ /* Enable Tx timestamps right away */
+ ptp_port->tx.calibrating = false;
+
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+
+out_unlock:
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_link_change - Set or clear port registers for timestamping
+ * @pf: Board private structure
+ * @port: Port for which the PHY start is set
+ * @linkup: Link is up or down
+ */
+int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{
+ struct ice_ptp_port *ptp_port;
+
+ if (!test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ return 0;
+
+ if (port >= ICE_NUM_EXTERNAL_PORTS)
+ return -EINVAL;
+
+ ptp_port = &pf->ptp.port;
+ if (ptp_port->port_num != port)
+ return -EINVAL;
+
+ /* Update cached link err for this port immediately */
+ ptp_port->link_up = linkup;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ /* PTP is not setup */
+ return -EAGAIN;
+
+ return ice_ptp_port_phy_restart(ptp_port);
+}
+
+/**
+ * ice_ptp_reset_ts_memory - Reset timestamp memory for all quads
+ * @pf: The PF private data structure
+ */
+static void ice_ptp_reset_ts_memory(struct ice_pf *pf)
+{
+ int quad;
+
+ quad = pf->hw.port_info->lport / ICE_PORTS_PER_QUAD;
+ ice_ptp_reset_ts_memory_quad(pf, quad);
+}
+
+/**
+ * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * @pf: PF private structure
+ * @ena: bool value to enable or disable interrupt
+ * @threshold: Minimum number of packets at which intr is triggered
+ *
+ * Utility function to enable or disable Tx timestamp interrupt and threshold
+ */
+static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+{
+ struct ice_hw *hw = &pf->hw;
+ int err = 0;
+ int quad;
+ u32 val;
+
+ ice_ptp_reset_ts_memory(pf);
+
+ for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ &val);
+ if (err)
+ break;
+
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
+ Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ } else {
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ val);
+ if (err)
+ break;
+ }
+
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
+ err);
+ return err;
+}
+
+/**
+ * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
+ * @pf: Board private structure
+ */
+static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
+{
+ ice_ptp_port_phy_restart(&pf->ptp.port);
+}
+
+/**
* ice_ptp_adjfine - Adjust clock increment rate
* @info: the driver's PTP info structure
* @scaled_ppm: Parts per million with 16-bit fractional field
@@ -698,14 +1097,14 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
int neg_adj = 0;
int err;
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ incval = ice_base_incval(pf);
if (scaled_ppm < 0) {
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+ while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
/* handle overflow by scaling down the scaled_ppm and
* the divisor, losing some precision
*/
@@ -905,7 +1304,10 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
- start_time -= E810_OUT_PROP_DELAY_NS;
+ if (ice_is_e810(hw))
+ start_time -= E810_OUT_PROP_DELAY_NS;
+ else
+ start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1088,6 +1490,12 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
+ /* For Vernier mode, we need to recalibrate after new settime
+ * Start with disabling timestamp block
+ */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
goto exit;
@@ -1104,6 +1512,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
+
+ /* Recalibrate and re-enable timestamp block */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_restart(&pf->ptp.port);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1177,6 +1589,101 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+#ifdef CONFIG_ICE_HWTS
+/**
+ * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * Read device and system (ART) clock simultaneously and return the corrected
+ * clock values in ns.
+ */
+static int
+ice_ptp_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct ice_pf *pf = (struct ice_pf *)ctx;
+ struct ice_hw *hw = &pf->hw;
+ u32 hh_lock, hh_art_ctl;
+ int i;
+
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ return -EFAULT;
+ }
+
+ /* Start the ART and device clock sync sequence */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
+ wr32(hw, GLHH_ART_CTL, hh_art_ctl);
+
+#define MAX_HH_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ /* Wait for sync to complete */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
+ udelay(1);
+ continue;
+ } else {
+ u32 hh_ts_lo, hh_ts_hi, tmr_idx;
+ u64 hh_ts;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ /* Read ART time */
+ hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
+ hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *system = convert_art_ns_to_tsc(hh_ts);
+ /* Read Device source clock time */
+ hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
+ hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *device = ns_to_ktime(hh_ts);
+ break;
+ }
+ }
+ /* Release HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
+ wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
+
+ if (i == MAX_HH_LOCK_TRIES)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the ART and the device PTP hardware
+ * clock. Fill the cross timestamp information and report it back to the
+ * caller.
+ *
+ * This is only valid for E822 devices which have support for generating the
+ * cross timestamp via PCIe PTM.
+ *
+ * In order to correctly correlate the ART timestamp back to the TSC time, the
+ * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ */
+static int
+ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+
+ return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
+ pf, NULL, cts);
+}
+#endif /* CONFIG_ICE_HWTS */
+
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
* @pf: Board private structure
@@ -1205,10 +1712,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
static int
ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
ice_set_tx_tstamp(pf, false);
@@ -1238,7 +1741,6 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- config->rx_filter = HWTSTAMP_FILTER_ALL;
ice_set_rx_tstamp(pf, true);
break;
default:
@@ -1270,8 +1772,8 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
if (err)
return err;
- /* Save these settings for future reference */
- pf->ptp.tstamp_config = config;
+ /* Return the actual configuration set */
+ config = pf->ptp.tstamp_config;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -1403,6 +1905,26 @@ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
}
/**
+ * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+#endif /* CONFIG_ICE_HWTS */
+}
+
+/**
* ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
* @pf: Board private structure
* @info: PTP info to fill
@@ -1441,7 +1963,10 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
- ice_ptp_set_funcs_e810(pf, info);
+ if (ice_is_e810(&pf->hw))
+ ice_ptp_set_funcs_e810(pf, info);
+ else
+ ice_ptp_set_funcs_e822(pf, info);
}
/**
@@ -1540,19 +2065,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
if (err)
continue;
- /* Check if the timestamp is valid */
- if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
- /* clear the timestamp register, so that it won't show valid
- * again when re-used.
- */
- ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
-
/* The timestamp is valid, so we'll go ahead and clear this
* index and then send the timestamp up to the stack.
*/
spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -1591,7 +2113,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
u8 idx;
/* Check if this tracker is initialized */
- if (!tx->init)
+ if (!tx->init || tx->calibrating)
return -1;
spin_lock(&tx->lock);
@@ -1707,13 +2229,34 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
kfree(tx->tstamps);
tx->tstamps = NULL;
- kfree(tx->in_use);
+ bitmap_free(tx->in_use);
tx->in_use = NULL;
tx->len = 0;
}
/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = tx->quad * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
* ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1784,6 +2327,130 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ */
+void ice_ptp_reset(struct ice_pf *pf)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
+ struct timespec64 ts;
+ int err, itr = 1;
+ u64 time_diff;
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ goto pfr;
+
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ goto reset_ts;
+
+ err = ice_ptp_init_phc(hw);
+ if (err)
+ goto err;
+
+ /* Acquire the global hardware lock */
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ /* Write the increment time value to PHY and LAN */
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
+
+ /* Write the initial Time value to PHY and LAN using the cached PHC
+ * time before the reset and time difference between stopping and
+ * starting the clock.
+ */
+ if (ptp->cached_phc_time) {
+ time_diff = ktime_get_real_ns() - ptp->reset_time;
+ ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
+ } else {
+ ts = ktime_to_timespec64(ktime_get_real());
+ }
+ err = ice_ptp_write_init(pf, &ts);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
+
+ /* Release the global hardware lock */
+ ice_ptp_unlock(hw);
+
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err;
+ }
+
+reset_ts:
+ /* Restart the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
+
+pfr:
+ /* Init Tx structures */
+ if (ice_is_e810(&pf->hw)) {
+ err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
+ } else {
+ kthread_init_delayed_work(&ptp->port.ov_work,
+ ice_ptp_wait_for_offset_valid);
+ err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ ptp->port.port_num);
+ }
+ if (err)
+ goto err;
+
+ set_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
+ return;
+
+err:
+ dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
+}
+
+/**
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+ kthread_cancel_work_sync(&ptp->extts_work);
+
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -1793,27 +2460,16 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
*/
static int ice_ptp_init_owner(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- u8 src_idx;
- int err;
-
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ int err, itr = 1;
- /* Clear some HW residue and enable source clock */
- src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
-
- /* Enable source clocks */
- wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
-
- /* Enable PHY time sync */
- err = ice_ptp_init_phy_e810(hw);
- if (err)
- goto err_exit;
-
- /* Clear event status indications for auxiliary pins */
- (void)rd32(hw, GLTSYN_STAT(src_idx));
+ err = ice_ptp_init_phc(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
+ err);
+ return err;
+ }
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
@@ -1822,7 +2478,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
}
/* Write the increment time value to PHY and LAN */
- err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
goto err_exit;
@@ -1839,6 +2495,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err_exit;
+ }
+
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
if (err)
@@ -1852,72 +2515,106 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
err_clk:
pf->ptp.clock = NULL;
err_exit:
- dev_err(dev, "PTP failed to register clock, err %d\n", err);
-
return err;
}
/**
- * ice_ptp_init - Initialize the PTP support after device probe or reset
+ * ice_ptp_init_work - Initialize PTP work threads
* @pf: Board private structure
+ * @ptp: PF PTP structure
+ */
+static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
+{
+ struct kthread_worker *kworker;
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
+ kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+ */
+ kworker = kthread_create_worker(0, "ice-ptp-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+
+ ptp->kworker = kworker;
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_port - Initialize PTP port structure
+ * @pf: Board private structure
+ * @ptp_port: PTP port structure
+ */
+static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
+{
+ mutex_init(&ptp_port->ps_lock);
+
+ if (ice_is_e810(&pf->hw))
+ return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offset_valid);
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+}
+
+/**
+ * ice_ptp_init - Initialize PTP hardware clock support
+ * @pf: Board private structure
+ *
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions, both the function that owns the clock hardware, and the
+ * functions connected to the clock hardware.
*
- * This function sets device up for PTP support. The first time it is run, it
- * will create a clock device. It does not create a clock device if one
- * already exists. It also reconfigures the device after a reset.
+ * The clock owner will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
+ * items used for asynchronous work such as Tx timestamps and periodic work.
*/
void ice_ptp_init(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct kthread_worker *kworker;
+ struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
int err;
- /* PTP is currently only supported on E810 devices */
- if (!ice_is_e810(hw))
- return;
-
- /* Check if this PF owns the source timer */
+ /* If this function owns the clock hardware, it must allocate and
+ * configure the PTP clock device to represent it.
+ */
if (hw->func_caps.ts_func_info.src_tmr_owned) {
err = ice_ptp_init_owner(pf);
if (err)
- return;
+ goto err;
}
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
-
- /* Initialize the PTP port Tx timestamp tracker */
- ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
-
- /* Initialize work functions */
- kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
- kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
+ ptp->port.port_num = hw->pf_id;
+ err = ice_ptp_init_port(pf, &ptp->port);
+ if (err)
+ goto err;
- /* Allocate a kworker for handling work required for the ports
- * connected to the PTP hardware clock.
- */
- kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
- if (IS_ERR(kworker)) {
- err = PTR_ERR(kworker);
- goto err_kworker;
- }
- pf->ptp.kworker = kworker;
+ /* Start the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
set_bit(ICE_FLAG_PTP, pf->flags);
+ err = ice_ptp_init_work(pf, ptp);
+ if (err)
+ goto err;
- /* Start periodic work going */
- kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
-
- dev_info(dev, "PTP init successful\n");
+ dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
-err_kworker:
+err:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
- ptp_clock_unregister(pf->ptp.clock);
+ ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- dev_err(dev, "PTP failed %d\n", err);
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+ dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
/**
@@ -1941,6 +2638,8 @@ void ice_ptp_release(struct ice_pf *pf)
kthread_cancel_delayed_work_sync(&pf->ptp.work);
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+ mutex_destroy(&pf->ptp.port.ps_lock);
if (pf->ptp.kworker) {
kthread_destroy_worker(pf->ptp.kworker);
pf->ptp.kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index f71ad317d6c8..afd048d69959 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -55,15 +55,21 @@ struct ice_perout_channel {
* struct ice_tx_tstamp - Tracking for a single Tx timestamp
* @skb: pointer to the SKB for this timestamp request
* @start: jiffies when the timestamp was first requested
+ * @cached_tstamp: last read timestamp
*
* This structure tracks a single timestamp request. The SKB pointer is
* provided when initiating a request. The start time is used to ensure that
* we discard old requests that were not fulfilled within a 2 second time
* window.
+ * Timestamp values in the PHY are read only and do not get cleared except at
+ * hardware reset or when a new timestamp value is captured. The cached_tstamp
+ * field is used to detect the case where a new timestamp has not yet been
+ * captured, ensuring that we avoid sending stale timestamp data to the stack.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
unsigned long start;
+ u64 cached_tstamp;
};
/**
@@ -76,6 +82,8 @@ struct ice_tx_tstamp {
* @quad_offset: offset into timestamp block of the quad to get the real index
* @len: length of the tstamps and in_use fields.
* @init: if true, the tracker is initialized;
+ * @calibrating: if true, the PHY is calibrating the Tx offset. During this
+ * window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
struct kthread_work work;
@@ -86,6 +94,7 @@ struct ice_ptp_tx {
u8 quad_offset;
u8 len;
u8 init;
+ u8 calibrating;
};
/* Quad and port information for initializing timestamp blocks */
@@ -95,15 +104,24 @@ struct ice_ptp_tx {
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
*
- * This structure contains PTP data related to the external ports. Currently
- * it is used for tracking the Tx timestamps of a port. In the future this
- * structure will also hold information for the E822 port initialization
- * logic.
+ * This structure contains data indicating whether a single external port is
+ * ready for PTP functionality. It is used to track the port initialization
+ * and determine when the port's PHY offset is valid.
*
* @tx: Tx timestamp tracking for this port
+ * @ov_work: delayed work task for tracking when PHY offset is valid
+ * @ps_lock: mutex used to protect the overall PTP PHY start procedure
+ * @link_up: indicates whether the link is up
+ * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy
+ * @port_num: the port number this structure represents
*/
struct ice_ptp_port {
struct ice_ptp_tx tx;
+ struct kthread_delayed_work ov_work;
+ struct mutex ps_lock; /* protects overall PTP PHY start procedure */
+ bool link_up;
+ u8 tx_fifo_busy_cnt;
+ u8 port_num;
};
#define GLTSYN_TGT_H_IDX_MAX 4
@@ -121,6 +139,7 @@ struct ice_ptp_port {
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
+ * @reset_time: kernel time after clock stop on reset
*/
struct ice_ptp {
struct ice_ptp_port port;
@@ -134,6 +153,7 @@ struct ice_ptp {
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
+ u64 reset_time;
};
#define __ptp_port_to_ptp(p) \
@@ -146,9 +166,15 @@ struct ice_ptp {
#define ptp_info_to_pf(i) \
container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
+#define PFTSYN_SEM_BYTES 4
#define PTP_SHARED_CLK_IDX_VALID BIT(31)
+#define TS_CMD_MASK 0xF
+#define SYNC_EXEC_CMD 0x3
#define ICE_PTP_TS_VALID BIT(0)
+#define FIFO_EMPTY BIT(2)
+#define FIFO_OK 0xFF
+#define ICE_PTP_FIFO_NUM_CHECKS 5
/* Per-channel register definitions */
#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
@@ -169,11 +195,13 @@ struct ice_ptp {
#define N_PER_OUT_E810T 3
#define N_PER_OUT_E810T_NO_SMA 2
#define N_EXT_TS_E810_NO_SMA 2
+#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -182,8 +210,11 @@ void ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+void ice_ptp_reset(struct ice_pf *pf);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
+int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -195,6 +226,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
+static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
{
return -1;
@@ -210,7 +242,11 @@ static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+static inline void ice_ptp_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
+static inline int ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{ return 0; }
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
new file mode 100644
index 000000000000..4109aa3b2fcd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_CONSTS_H_
+#define _ICE_PTP_CONSTS_H_
+
+/* Constant definitions related to the hardware clock used for PTP 1588
+ * features and functionality.
+ */
+/* Constants defined for the PTP 1588 clock hardware. */
+
+/* struct ice_time_ref_info_e822
+ *
+ * E822 hardware can use different sources as the reference for the PTP
+ * hardware clock. Each clock has different characteristics such as a slightly
+ * different frequency, etc.
+ *
+ * This lookup table defines several constants that depend on the current time
+ * reference. See the struct ice_time_ref_info_e822 for information about the
+ * meaning of each constant.
+ */
+const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* pll_freq */
+ 823437500, /* 823.4375 MHz PLL */
+ /* nominal_incval */
+ 0x136e44fabULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* pll_freq */
+ 796875000, /* 796.875 MHz */
+ /* nominal_incval */
+ 0x141414141ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* pll_freq */
+ 816000000, /* 816 MHz */
+ /* nominal_incval */
+ 0x139b9b9baULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* pll_freq */
+ 830078125, /* 830.78125 MHz */
+ /* nominal_incval */
+ 0x134679aceULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+};
+
+const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* refclk_pre_div */
+ 1,
+ /* feedback_div */
+ 197,
+ /* frac_n_div */
+ 2621440,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* refclk_pre_div */
+ 10,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+};
+
+/* struct ice_vernier_info_e822
+ *
+ * E822 hardware calibrates the delay of the timestamp indication from the
+ * actual packet transmission or reception during the initialization of the
+ * PHY. To do this, the hardware mechanism uses some conversions between the
+ * various clocks within the PHY block. This table defines constants used to
+ * calculate the correct conversion ratios in the PHY registers.
+ *
+ * Many of the values relate to the PAR/PCS clock conversion registers. For
+ * these registers, a value of 0 means that the associated register is not
+ * used by this link speed, and that the register should be cleared by writing
+ * 0. Other values specify the clock frequency in Hz.
+ */
+const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+ /* ICE_PTP_LNK_SPD_1G */
+ {
+ /* tx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* rx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* tx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* rx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 25140,
+ /* pmd_adj_divisor */
+ 10000000,
+ /* rx_fixed_delay */
+ 17372,
+ },
+ /* ICE_PTP_LNK_SPD_10G */
+ {
+ /* tx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* rx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 6938,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 6212,
+ },
+ /* ICE_PTP_LNK_SPD_25G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2491,
+ },
+ /* ICE_PTP_LNK_SPD_25G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 0, /* unused */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 0, /* unused */
+ /* tx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 3928,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 29535,
+ },
+ /* ICE_PTP_LNK_SPD_40G */
+ {
+ /* tx_par_clk */
+ 257812500,
+ /* rx_par_clk */
+ 257812500,
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_fixed_delay */
+ 5666,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 4244,
+ },
+ /* ICE_PTP_LNK_SPD_50G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2868,
+ },
+ /* ICE_PTP_LNK_SPD_50G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 2095,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 14524,
+ },
+ /* ICE_PTP_LNK_SPD_100G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 1620,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 7775,
+ },
+};
+
+#endif /* _ICE_PTP_CONSTS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 29f947c0cd2e..ec8450f034e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -3,6 +3,8 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
+#include "ice_ptp_consts.h"
+#include "ice_cgu_regs.h"
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
@@ -29,6 +31,15 @@
*
* For E810 devices, the increment frequency is 812.5 MHz
*
+ * For E822 devices the clock can be derived from different sources, and the
+ * increment has an effective frequency of one of the following:
+ * - 823.4375 MHz
+ * - 783.36 MHz
+ * - 796.875 MHz
+ * - 816 MHz
+ * - 830.078125 MHz
+ * - 783.36 MHz
+ *
* The hardware captures timestamps in the PHY for incoming packets, and for
* outgoing packets on request. To support this, the PHY maintains a timer
* that matches the lower 64 bits of the global source timer.
@@ -37,6 +48,24 @@
* shadow registers are used to prepare the desired initial values. A special
* sync command is issued to trigger copying from the shadow registers into
* the appropriate source and PHY registers simultaneously.
+ *
+ * The driver supports devices which have different PHYs with subtly different
+ * mechanisms to program and control the timers. We divide the devices into
+ * families named after the first major device, E810 and similar devices, and
+ * E822 and similar devices.
+ *
+ * - E822 based devices have additional support for fine grained Vernier
+ * calibration which requires significant setup
+ * - The layout of timestamp data in the PHY register blocks is different
+ * - The way timer synchronization commands are issued is different.
+ *
+ * To support this, very low level functions have an e810 or e822 suffix
+ * indicating what type of device they work on. Higher level abstractions for
+ * tasks that can be done on both devices do not have the suffix and will
+ * correctly look up the appropriate low level function when running.
+ *
+ * Functions which only make sense on a single device family may not have
+ * a suitable generic implementation
*/
/**
@@ -51,6 +80,2447 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
return hw->func_caps.ts_func_info.tmr_index_assoc;
}
+/**
+ * ice_ptp_read_src_incval - Read source timer increment value
+ * @hw: pointer to HW struct
+ *
+ * Read the increment value of the source timer and return it.
+ */
+static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
+{
+ u32 lo, hi;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+
+ return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
+ * @hw: pointer to HW struct
+ *
+ * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
+ * write immediately. This triggers the hardware to begin executing all of the
+ * source and PHY timer commands synchronously.
+ */
+static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
+{
+ wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_flush(hw);
+}
+
+/* E822 family functions
+ *
+ * The following functions operate on the E822 family of devices.
+ */
+
+/**
+ * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * @msg: the PHY message buffer to fill in
+ * @port: the port to access
+ * @offset: the register offset
+ */
+static void
+ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+{
+ int phy_port, phy, quadtype;
+
+ phy_port = port % ICE_PORTS_PER_PHY;
+ phy = port / ICE_PORTS_PER_PHY;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+
+ if (quadtype == 0) {
+ msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
+ } else {
+ msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
+ }
+
+ if (phy == 0)
+ msg->dest_dev = rmn_0;
+ else if (phy == 1)
+ msg->dest_dev = rmn_1;
+ else
+ msg->dest_dev = rmn_2;
+}
+
+/**
+ * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Checks if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_PAR_PCS_TX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
+ return true;
+ case P_REG_PAR_PCS_RX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
+ return true;
+ case P_REG_PAR_TX_TIME_L:
+ *high_addr = P_REG_PAR_TX_TIME_U;
+ return true;
+ case P_REG_PAR_RX_TIME_L:
+ *high_addr = P_REG_PAR_RX_TIME_U;
+ return true;
+ case P_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case P_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case P_REG_UIX66_10G_40G_L:
+ *high_addr = P_REG_UIX66_10G_40G_U;
+ return true;
+ case P_REG_UIX66_25G_100G_L:
+ *high_addr = P_REG_UIX66_25G_100G_U;
+ return true;
+ case P_REG_TX_CAPTURE_L:
+ *high_addr = P_REG_TX_CAPTURE_U;
+ return true;
+ case P_REG_RX_CAPTURE_L:
+ *high_addr = P_REG_RX_CAPTURE_U;
+ return true;
+ case P_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case P_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Checks if the provided low address is one of the known 40bit PHY values
+ * split into two registers with the lower 8 bits in the low register and the
+ * upper 32 bits in the high register. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_TIMETUS_L:
+ *high_addr = P_REG_TIMETUS_U;
+ return true;
+ case P_REG_PAR_RX_TUS_L:
+ *high_addr = P_REG_PAR_RX_TUS_U;
+ return true;
+ case P_REG_PAR_TX_TUS_L:
+ *high_addr = P_REG_PAR_TX_TUS_U;
+ return true;
+ case P_REG_PCS_RX_TUS_L:
+ *high_addr = P_REG_PCS_RX_TUS_U;
+ return true;
+ case P_REG_PCS_TX_TUS_L:
+ *high_addr = P_REG_PCS_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_RX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_TX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_RX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_TX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_TX_TUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_phy_reg_e822 - Read a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @offset: PHY register offset to read
+ * @val: on return, the contents read from the PHY
+ *
+ * Read a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 64bit value and returns it in the
+ * val pointer. The offset always specifies the lower register offset to use.
+ * The high offset is looked up. This function only operates on registers
+ * known to be two parts of a 64bit value.
+ */
+static int
+ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = (u64)high << 32 | low;
+
+ return 0;
+}
+
+/**
+ * ice_write_phy_reg_e822 - Write a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to write to
+ * @offset: PHY register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
+ */
+static int
+ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into a lower 8 bit
+ * register and an upper 32 bit register.
+ */
+ if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = (u32)(val & P_REG_40B_LOW_M);
+ high = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Write the 64bit value to the two associated 32bit PHY registers. The offset
+ * is always specified as the lower register, and the high address is looked
+ * up. This function only operates on registers known to be two parts of
+ * a 64bit value.
+ */
+static int
+ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = lower_32_bits(val);
+ high = upper_32_bits(val);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * @msg: the PHY message buffer to fill in
+ * @quad: the quad to access
+ * @offset: the register offset
+ *
+ * Fill a message buffer for accessing a register in a quad shared between
+ * multiple PHYs.
+ */
+static void
+ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+{
+ u32 addr;
+
+ msg->dest_dev = rmn_0;
+
+ if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ addr = Q_0_BASE + offset;
+ else
+ addr = Q_1_BASE + offset;
+
+ msg->msg_addr_low = lower_16_bits(addr);
+ msg->msg_addr_high = upper_16_bits(addr);
+}
+
+/**
+ * ice_read_quad_reg_e822 - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to read from
+ * @offset: quad register offset to read
+ * @val: on return, the contents read from the quad
+ *
+ * Read a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_quad_reg_e822 - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to write to
+ * @offset: quad register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated registers in the
+ * quad memory block that is shared between the internal PHYs of the E822
+ * family of devices.
+ */
+static int
+ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For E822 based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
+ * shared between the internal PHYs on the E822 devices.
+ */
+static int
+ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+{
+ u16 lo_addr, hi_addr;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_cgu_reg_e822 - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_rd;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return err;
+}
+
+/**
+ * ice_write_cgu_reg_e822 - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_wr;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+ cgu_msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Convert the specified TIME_REF clock frequency to a string.
+ */
+static const char *ice_clk_freq_str(u8 clk_freq)
+{
+ switch ((enum ice_time_ref_freq)clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Convert the specified clock source to its string name.
+ */
+static const char *ice_clk_src_str(u8 clk_src)
+{
+ switch ((enum ice_clk_src)clk_src) {
+ case ICE_CLK_SRC_TCX0:
+ return "TCX0";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ */
+static int
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCX0 &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCX0 only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.field.ts_pll_enable) {
+ dw24.field.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.field.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.field.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.field.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.field.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.field.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.field.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ */
+static int ice_init_cgu_e822(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.field.i_plllock_sel_0 = 0;
+ cntr_bist.field.i_plllock_sel_1 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
+ * @hw: pointer to the HW struct
+ *
+ * Set the window length used for the vernier port calibration process.
+ */
+static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ PTP_VERNIER_WL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E822 devices.
+ */
+static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+{
+ int err;
+ u32 regval;
+
+ /* Enable reading switch and PHY registers over the sideband queue */
+#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
+#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
+ regval = rd32(hw, PF_SB_REM_DEV_CTL);
+ regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
+ PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, regval);
+
+ /* Initialize the Clock Generation Unit */
+ err = ice_init_cgu_e822(hw);
+ if (err)
+ return err;
+
+ /* Set window length for all the ports */
+ return ice_ptp_set_vernier_wl(hw);
+}
+
+/**
+ * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ */
+static int
+ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+ int err;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ /* Tx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_TX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_RX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port Tx and Rx clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ */
+int
+ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ */
+static int
+ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock. We shift the provided adjustment in nanoseconds to
+ * calculate the appropriate adjustment to program into the PHY ports.
+ */
+ if (adj > 0)
+ cycles = (s64)adj << 32;
+ else
+ cycles = -(((s64)-adj) << 32);
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an INIT_INCVAL command.
+ */
+static int
+ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+{
+ int err;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ incval);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_read_port_capture - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Note this has no equivalent for the E810 devices.
+ */
+static int
+ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
+ (unsigned long long)*tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
+ (unsigned long long)*rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, val;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_PHY_SRC;
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= PHY_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= PHY_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= PHY_CMD_READ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ break;
+ }
+
+ /* Tx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * @hw: pointer to the HW struct
+ * @cmd: timer command to prepare
+ *
+ * Prepare all ports connected to this device for an upcoming timer sync
+ * command.
+ */
+static int
+ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_one_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* E822 Vernier calibration functions
+ *
+ * The following functions are used as part of the vernier calibration of
+ * a port. This calibration increases the precision of the timestamps on the
+ * port.
+ */
+
+/**
+ * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * @hw: pointer to HW struct
+ * @port: the port to read from
+ * @link_out: if non-NULL, holds link speed on success
+ * @fec_out: if non-NULL, holds FEC algorithm on success
+ *
+ * Read the serdes data for the PHY port and extract the link speed and FEC
+ * algorithm.
+ */
+static int
+ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd *link_out,
+ enum ice_ptp_fec_mode *fec_out)
+{
+ enum ice_ptp_link_spd link;
+ enum ice_ptp_fec_mode fec;
+ u32 serdes;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
+ return err;
+ }
+
+ /* Determine the FEC algorithm */
+ fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
+
+ serdes &= P_REG_LINK_SPEED_SERDES_M;
+
+ /* Determine the link speed */
+ if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
+ switch (serdes) {
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G_RS;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G_RS;
+ break;
+ case ICE_PTP_SERDES_100G:
+ link = ICE_PTP_LNK_SPD_100G_RS;
+ break;
+ default:
+ return -EIO;
+ }
+ } else {
+ switch (serdes) {
+ case ICE_PTP_SERDES_1G:
+ link = ICE_PTP_LNK_SPD_1G;
+ break;
+ case ICE_PTP_SERDES_10G:
+ link = ICE_PTP_LNK_SPD_10G;
+ break;
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G;
+ break;
+ case ICE_PTP_SERDES_40G:
+ link = ICE_PTP_LNK_SPD_40G;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G;
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ if (link_out)
+ *link_out = link;
+ if (fec_out)
+ *fec_out = fec;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * @hw: pointer to HW struct
+ * @port: to configure the quad for
+ */
+static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ int err;
+ u32 val;
+ u8 quad;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
+ err);
+ return;
+ }
+
+ quad = port / ICE_PORTS_PER_QUAD;
+
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
+ err);
+ return;
+ }
+
+ if (link_spd >= ICE_PTP_LNK_SPD_40G)
+ val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+ else
+ val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
+ err);
+ return;
+ }
+}
+
+/**
+ * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * @hw: pointer to the HW structure
+ * @port: the port to configure
+ *
+ * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
+ * hardware clock time units (TUs). That is, determine the number of TUs per
+ * serdes unit interval, and program the UIX registers with this conversion.
+ *
+ * This conversion is used as part of the calibration process when determining
+ * the additional error of a timestamp vs the real time of transmission or
+ * receipt of the packet.
+ *
+ * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
+ * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
+ *
+ * To calculate the conversion ratio, we use the following facts:
+ *
+ * a) the clock frequency in Hz (cycles per second)
+ * b) the number of TUs per cycle (the increment value of the clock)
+ * c) 1 second per 1 billion nanoseconds
+ * d) the duration of 66 UIs in nanoseconds
+ *
+ * Given these facts, we can use the following table to work out what ratios
+ * to multiply in order to get the number of TUs per 66 UIs:
+ *
+ * cycles | 1 second | incval (TUs) | nanoseconds
+ * -------+--------------+--------------+-------------
+ * second | 1 billion ns | cycle | 66 UIs
+ *
+ * To perform the multiplication using integers without too much loss of
+ * precision, we can take use the following equation:
+ *
+ * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
+ *
+ * We scale up to using 6600 UI instead of 66 in order to avoid fractional
+ * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
+ *
+ * The increment value has a maximum expected range of about 34 bits, while
+ * the frequency value is about 29 bits. Multiplying these values shouldn't
+ * overflow the 64 bits. However, we must then further multiply them again by
+ * the Serdes unit interval duration. To avoid overflow here, we split the
+ * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
+ * a divide by 390,625,000. This does lose some precision, but avoids
+ * miscalculation due to arithmetic overflow.
+ */
+static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, uix;
+ int err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second divided by 256 */
+ tu_per_sec = (cur_freq * clk_incval) >> 8;
+
+#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
+#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
+
+ /* Program the 10Gb/40Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Program the 25Gb/100Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process. This depends on the link speed, as the PHY
+ * uses different markers depending on the speed.
+ *
+ * 1Gb/10Gb/25Gb:
+ * - Tx/Rx PAR/PCS markers
+ *
+ * 25Gb RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ *
+ * 40Gb/50Gb:
+ * - Tx/Rx PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ *
+ * 50G RS and 100GB RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ * - Tx PAR/PCS markers
+ *
+ * To calculate the conversion, we use the PHC clock frequency (cycles per
+ * second), the increment value (TUs per cycle), and the related PHY clock
+ * frequency to calculate the TUs per unit of the PHY link clock. The
+ * following table shows how the units convert:
+ *
+ * cycles | TUs | second
+ * -------+-------+--------
+ * second | cycle | cycles
+ *
+ * For each conversion register, look up the appropriate frequency from the
+ * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
+ * this to the appropriate register, preparing hardware to perform timestamp
+ * calibration to calculate the total Tx or Rx offset to adjust the timestamp
+ * in order to calibrate for the internal PHY delays.
+ *
+ * Note that the increment value ranges up to ~34 bits, and the clock
+ * frequency is ~29 bits, so multiplying them together should fit within the
+ * 64 bit arithmetic.
+ */
+static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per cycle of the PHC clock */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* For each PHY conversion register, look up the appropriate link
+ * speed frequency and determine the TUs per that clock's cycle time.
+ * Split this into a high and low value and then program the
+ * appropriate register. If that link speed does not use the
+ * associated register, write zeros to clear it instead.
+ */
+
+ /* P_REG_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ phy_tus);
+}
+
+/**
+ * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * @hw: pointer to the HW struct
+ * @link_spd: the Link speed to calculate for
+ *
+ * Calculate the fixed offset due to known static latency data.
+ */
+static u64
+ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Tx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
+ * adjust Tx timestamps by. This is calculated by combining some known static
+ * latency along with the Vernier offset computations done by hardware.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, val;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G ||
+ link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_TX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* For Tx, we only need to use the second Vernier offset for
+ * multi-lane link speeds with RS-FEC. The lanes will always be
+ * aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_TX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_fixed_tx_offset_e822 - Configure Tx offset for bypass mode
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Calculate and program the fixed Tx offset, and indicate that the offset is
+ * ready. This can be used when operating in bypass mode.
+ */
+static int
+ice_phy_cfg_fixed_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Program the fixed Tx offset into the P_REG_TOTAL_TX_OFFSET_L
+ * register, then indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ *
+ * Note that this skips including the more precise offsets generated
+ * by the Vernier calibration.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to adjust for
+ * @link_spd: the current link speed of the PHY
+ * @fec_mode: the current FEC mode of the PHY
+ * @pmd_adj: on return, the amount to adjust the Rx total offset by
+ *
+ * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
+ * This varies by link speed and FEC mode. The value calculated accounts for
+ * various delays caused when receiving a packet.
+ */
+static int
+ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd link_spd,
+ enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
+ u8 pmd_align;
+ u32 val;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
+ err);
+ return err;
+ }
+
+ pmd_align = (u8)val;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* The PMD alignment adjustment measurement depends on the link speed,
+ * and whether FEC is enabled. For each link speed, the alignment
+ * adjustment is calculated by dividing a value by the length of
+ * a Time Unit in nanoseconds.
+ *
+ * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
+ * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 10G w/FEC: align * 0.1 * 32/33
+ * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 25G w/FEC: align * 0.4 * 32/33
+ * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 40G w/FEC: align * 0.1 * 32/33
+ * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 50G w/FEC: align * 0.8 * 32/33
+ *
+ * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
+ *
+ * To allow for calculating this value using integer arithmetic, we
+ * instead start with the number of TUs per second, (inverse of the
+ * length of a Time Unit in nanoseconds), multiply by a value based
+ * on the PMD alignment register, and then divide by the right value
+ * calculated based on the table above. To avoid integer overflow this
+ * division is broken up into a step of dividing by 125 first.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G) {
+ if (pmd_align == 4)
+ mult = 10;
+ else
+ mult = (pmd_align + 6) % 10;
+ } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ /* If Clause 74 FEC, always calculate PMD adjust */
+ if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
+ mult = pmd_align;
+ else
+ mult = 0;
+ } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ if (pmd_align < 17)
+ mult = pmd_align + 40;
+ else
+ mult = pmd_align;
+ } else {
+ ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
+ link_spd);
+ mult = 0;
+ }
+
+ /* In some cases, there's no need to adjust for the PMD alignment */
+ if (!mult) {
+ *pmd_adj = 0;
+ return 0;
+ }
+
+ /* Calculate the adjustment by multiplying TUs per second by the
+ * appropriate multiplier and divisor. To avoid overflow, we first
+ * divide by 125, and then handle remaining divisor based on the link
+ * speed pmd_adj_divisor value.
+ */
+ adj = div_u64(tu_per_sec, 125);
+ adj *= mult;
+ adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
+ * cycle count is necessary.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = (4 - rx_cycle) * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = rx_cycle * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ }
+
+ /* Return the calculated adjustment */
+ *pmd_adj = adj;
+
+ return 0;
+}
+
+/**
+ * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * @hw: pointer to HW struct
+ * @link_spd: The Link speed to calculate for
+ *
+ * Determine the fixed Rx latency for a given link speed.
+ */
+static u64
+ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Rx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
+ * adjust Rx timestamps by. This combines calculations from the Vernier offset
+ * measurements taken in hardware with some data about known fixed delay as
+ * well as adjusting for multi-lane alignment delay.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, pmd, val;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_RX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+
+ /* For Rx, all multi-lane link speeds include a second Vernier
+ * calibration, because the lanes might not be aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_RX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* In addition, Rx must account for the PMD alignment */
+ err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ if (err)
+ return err;
+
+ /* For RS-FEC, this adjustment adds delay, but for other modes, it
+ * subtracts delay.
+ */
+ if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
+ total_offset += pmd;
+ else
+ total_offset -= pmd;
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_fixed_rx_offset_e822 - Configure fixed Rx offset for bypass mode
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Calculate and program the fixed Rx offset, and indicate that the offset is
+ * ready. This can be used when operating in bypass mode.
+ */
+static int
+ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Program the fixed Rx offset into the P_REG_TOTAL_RX_OFFSET_L
+ * register, then indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ *
+ * Note that this skips including the more precise offsets generated
+ * by Vernier calibration.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
+ * timer values.
+ */
+static int
+ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a READ_TIME capture command */
+ ice_ptp_src_cmd(hw, READ_TIME);
+
+ /* Prepare the PHY timer for a READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw),
+ "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, (unsigned long long)tx_time,
+ (unsigned long long)rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a READ_TIME command which triggers a simultaneous
+ * read of the PHY timer and PHC timer. Then we use the difference to
+ * calculate an appropriate 2s complement addition to add to the PHY timer in
+ * order to ensure it reads the same value as the primary PHC timer.
+ */
+static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, (unsigned long long)phy_time,
+ (unsigned long long)phc_time);
+
+ ice_ptp_unlock(hw);
+
+ return 0;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ */
+int
+ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+ u32 val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ if (soft_reset) {
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ * @bypass: if true, start the PHY in bypass mode
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Bypass mode enables timestamps immediately without waiting for Vernier
+ * calibration to complete. Hardware will still continue taking Vernier
+ * measurements on Tx or Rx of packets, but they will not be applied to
+ * timestamps. Use ice_phy_exit_bypass_e822 to exit bypass mode once hardware
+ * has completed offset calculation.
+ */
+int
+ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
+{
+ u32 lo, hi, val;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_e822(hw, port, false);
+ if (err)
+ return err;
+
+ ice_phy_cfg_lane_e822(hw, port);
+
+ err = ice_phy_cfg_uix_e822(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_parpcs_e822(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ val |= P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_LOAD_OFFSET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_e822(hw, port);
+ if (err)
+ return err;
+
+ if (bypass) {
+ val |= P_REG_PS_BYPASS_MODE_M;
+ /* Enter BYPASS mode, enabling timestamps immediately. */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ /* Program the fixed Tx offset */
+ err = ice_phy_cfg_fixed_tx_offset_e822(hw, port);
+ if (err)
+ return err;
+
+ /* Program the fixed Rx offset */
+ err = ice_phy_cfg_fixed_rx_offset_e822(hw, port);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * After hardware finishes vernier calculations for the Tx and Rx offset, this
+ * function can be used to exit bypass mode by updating the total Tx and Rx
+ * offsets, and then disabling bypass. This will enable hardware to include
+ * the more precise offset calibrations, increasing precision of the generated
+ * timestamps.
+ *
+ * This cannot be done until hardware has measured the offsets, which requires
+ * waiting until at least one packet has been sent and received by the device.
+ */
+int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
+ port);
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
+ port);
+ return -EBUSY;
+ }
+
+ err = ice_phy_cfg_tx_offset_e822(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ err = ice_phy_cfg_rx_offset_e822(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ /* Exit bypass mode now that the offset has been updated */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(val & P_REG_PS_BYPASS_MODE_M))
+ ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
+ port);
+
+ val &= ~P_REG_PS_BYPASS_MODE_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ dev_info(ice_hw_to_dev(hw), "Exiting bypass mode on PHY port %u\n",
+ port);
+
+ return 0;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -68,18 +2538,18 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
*val = msg.data;
@@ -98,7 +2568,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
@@ -106,11 +2576,11 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -130,23 +2600,23 @@ static int
ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
{
u32 lo_addr, hi_addr, lo, hi;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
/* For E810 devices, the timestamp is reported with the lower 32 bits
@@ -169,23 +2639,23 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_write_phy_reg_e810(hw, lo_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, hi_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -200,17 +2670,32 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
*/
int ice_ptp_init_phy_e810(struct ice_hw *hw)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
- GLTSYN_ENA_TSYN_ENA_M);
- if (status)
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+ GLTSYN_ENA_TSYN_ENA_M);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
- status);
+ err);
- return status;
+ return err;
+}
+
+/**
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E810-specific PTP hardware clock initialization steps.
+ */
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
+{
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+ /* Initialize the PHY */
+ return ice_ptp_init_phy_e810(hw);
}
/**
@@ -227,22 +2712,22 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
*/
static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -263,26 +2748,26 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*/
static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
* nanoseconds. Sub-nanosecond adjustment is not supported.
*/
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -300,25 +2785,25 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
u32 high, low;
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -335,7 +2820,7 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
- int status;
+ int err;
switch (cmd) {
case INIT_TIME:
@@ -356,20 +2841,20 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
}
/* Read, modify, write */
- status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
+ return err;
}
/* Modify necessary bits only and perform write */
val &= ~TS_CMD_MASK_E810;
val |= cmd_val;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
+ return err;
}
return 0;
@@ -377,12 +2862,9 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
/* Device agnostic functions
*
- * The following functions implement useful behavior to hide the differences
- * between E810 and other devices. They call the device-specific
- * implementations where necessary.
- *
- * Currently, the driver only supports E810, but future work will enable
- * support for E822-based devices.
+ * The following functions implement shared behavior common to both E822 and
+ * E810 devices, possibly calling a device specific implementation where
+ * necessary.
*/
/**
@@ -433,42 +2915,6 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
- * @cmd: Timer command
- *
- * Prepare the source timer for an upcoming timer sync command.
- */
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u32 cmd_val;
- u8 tmr_idx;
-
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
-
- switch (cmd) {
- case INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
- break;
- case INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
- break;
- case ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
- break;
- case ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
- break;
- }
-
- wr32(hw, GLTSYN_CMD, cmd_val);
-}
-
-/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -480,23 +2926,26 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- int status;
+ int err;
/* First, prepare the source timer */
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- status = ice_ptp_port_cmd_e810(hw, cmd);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
- cmd, status);
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_port_cmd_e810(hw, cmd);
+ else
+ err = ice_ptp_port_cmd_e822(hw, cmd);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
+ cmd, err);
+ return err;
}
- /* Write the sync command register to drive both source and PHY timer commands
- * synchronously
+ /* Write the sync command register to drive both source and PHY timer
+ * commands synchronously
*/
- wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_ptp_exec_tmr_cmd(hw);
return 0;
}
@@ -516,8 +2965,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
int ice_ptp_init_time(struct ice_hw *hw, u64 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -528,9 +2977,12 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+ else
+ err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_TIME);
}
@@ -551,8 +3003,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -560,9 +3012,12 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- status = ice_ptp_prep_phy_incval_e810(hw, incval);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_incval_e810(hw, incval);
+ else
+ err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
}
@@ -576,16 +3031,16 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
{
- int status;
+ int err;
if (!ice_ptp_lock(hw))
return -EBUSY;
- status = ice_ptp_write_incval(hw, incval);
+ err = ice_ptp_write_incval(hw, incval);
ice_ptp_unlock(hw);
- return status;
+ return err;
}
/**
@@ -603,8 +3058,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -616,9 +3071,12 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- status = ice_ptp_prep_phy_adj_e810(hw, adj);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_adj_e810(hw, adj);
+ else
+ err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, ADJ_TIME);
}
@@ -630,11 +3088,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
* @idx: the timestamp index to read
* @tstamp: on return, the 40bit timestamp value
*
- * Read a 40bit timestamp value out of the timestamp block.
+ * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
+ * the block is the quad to read from. For E810 devices, the block is the
+ * logical port to read from.
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ if (ice_is_e810(hw))
+ return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ else
+ return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
}
/**
@@ -643,11 +3106,16 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block.
+ * Clear a timestamp, resetting its valid bit, from the timestamp block. For
+ * E822 devices, the block is the quad to clear from. For E810 devices, the
+ * block is the logical port to clear from.
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- return ice_clear_phy_tstamp_e810(hw, block, idx);
+ if (ice_is_e810(hw))
+ return ice_clear_phy_tstamp_e810(hw, block, idx);
+ else
+ return ice_clear_phy_tstamp_e822(hw, block, idx);
}
/* E810T SMA functions
@@ -800,3 +3268,25 @@ bool ice_is_pca9575_present(struct ice_hw *hw)
return !status && handle;
}
+
+/**
+ * ice_ptp_init_phc - Initialize PTP hardware clock
+ * @hw: pointer to the HW struct
+ *
+ * Perform the steps required to initialize the PTP hardware clock.
+ */
+int ice_ptp_init_phc(struct ice_hw *hw)
+{
+ u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Clear event err indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ if (ice_is_e810(hw))
+ return ice_ptp_init_phc_e810(hw);
+ else
+ return ice_ptp_init_phc_e822(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index b2984b5c22c1..519e75462e67 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -12,6 +12,112 @@ enum ice_ptp_tmr_cmd {
READ_TIME
};
+enum ice_ptp_serdes {
+ ICE_PTP_SERDES_1G,
+ ICE_PTP_SERDES_10G,
+ ICE_PTP_SERDES_25G,
+ ICE_PTP_SERDES_40G,
+ ICE_PTP_SERDES_50G,
+ ICE_PTP_SERDES_100G
+};
+
+enum ice_ptp_link_spd {
+ ICE_PTP_LNK_SPD_1G,
+ ICE_PTP_LNK_SPD_10G,
+ ICE_PTP_LNK_SPD_25G,
+ ICE_PTP_LNK_SPD_25G_RS,
+ ICE_PTP_LNK_SPD_40G,
+ ICE_PTP_LNK_SPD_50G,
+ ICE_PTP_LNK_SPD_50G_RS,
+ ICE_PTP_LNK_SPD_100G_RS,
+ NUM_ICE_PTP_LNK_SPD /* Must be last */
+};
+
+enum ice_ptp_fec_mode {
+ ICE_PTP_FEC_MODE_NONE,
+ ICE_PTP_FEC_MODE_CLAUSE74,
+ ICE_PTP_FEC_MODE_RS_FEC
+};
+
+/**
+ * struct ice_time_ref_info_e822
+ * @pll_freq: Frequency of PLL that drives timer ticks in Hz
+ * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
+ * @pps_delay: propagation delay of the PPS output signal
+ *
+ * Characteristic information for the various TIME_REF sources possible in the
+ * E822 devices
+ */
+struct ice_time_ref_info_e822 {
+ u64 pll_freq;
+ u64 nominal_incval;
+ u8 pps_delay;
+};
+
+/**
+ * struct ice_vernier_info_e822
+ * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
+ * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
+ * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
+ * @rx_pcs_clk: Frequency used to calculate P_REG_PCS_RX_TUS
+ * @tx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_TX_TUS
+ * @rx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_RX_TUS
+ * @tx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_TX_TUS
+ * @rx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_RX_TUS
+ * @tx_fixed_delay: Fixed Tx latency measured in 1/100th nanoseconds
+ * @pmd_adj_divisor: Divisor used to calculate PDM alignment adjustment
+ * @rx_fixed_delay: Fixed Rx latency measured in 1/100th nanoseconds
+ *
+ * Table of constants used during as part of the Vernier calibration of the Tx
+ * and Rx timestamps. This includes frequency values used to compute TUs per
+ * PAR/PCS clock cycle, and static delay values measured during hardware
+ * design.
+ *
+ * Note that some values are not used for all link speeds, and the
+ * P_REG_DESK_PAR* registers may represent different clock markers at
+ * different link speeds, either the deskew marker for multi-lane link speeds
+ * or the Reed Solomon gearbox marker for RS-FEC.
+ */
+struct ice_vernier_info_e822 {
+ u32 tx_par_clk;
+ u32 rx_par_clk;
+ u32 tx_pcs_clk;
+ u32 rx_pcs_clk;
+ u32 tx_desk_rsgb_par;
+ u32 rx_desk_rsgb_par;
+ u32 tx_desk_rsgb_pcs;
+ u32 rx_desk_rsgb_pcs;
+ u32 tx_fixed_delay;
+ u32 pmd_adj_divisor;
+ u32 rx_fixed_delay;
+};
+
+/**
+ * struct ice_cgu_pll_params_e822
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ * @post_pll_div: Post PLL divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF frequency.
+ */
+struct ice_cgu_pll_params_e822 {
+ u32 refclk_pre_div;
+ u32 feedback_div;
+ u32 frac_n_div;
+ u32 post_pll_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants related to possible TIME_REF sources */
+extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants for Vernier calibration on E822 */
+extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
@@ -27,6 +133,59 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+int ice_ptp_init_phc(struct ice_hw *hw);
+
+/* E822 family functions */
+int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
+
+/**
+ * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current TIME_REF from the capabilities structure.
+ */
+static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+{
+ return hw->func_caps.ts_func_info.time_ref;
+}
+
+/**
+ * ice_set_e822_time_ref - Set new TIME_REF
+ * @hw: pointer to the HW structure
+ * @time_ref: new TIME_REF to set
+ *
+ * Update the TIME_REF in the capabilities structure in response to some
+ * change, such as an update to the CGU registers.
+ */
+static inline void
+ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+{
+ hw->func_caps.ts_func_info.time_ref = time_ref;
+}
+
+static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pll_freq;
+}
+
+static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].nominal_incval;
+}
+
+static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pps_delay;
+}
+
+/* E822 Vernier calibration functions */
+int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass);
+int ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
@@ -36,19 +195,194 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
+#define ICE_PTP_CLOCK_INDEX_0 0x00
+#define ICE_PTP_CLOCK_INDEX_1 0x01
+
/* PHY timer commands */
#define SEL_CPK_SRC 8
+#define SEL_PHY_SRC 3
/* Time Sync command Definitions */
#define GLTSYN_CMD_INIT_TIME BIT(0)
#define GLTSYN_CMD_INIT_INCVAL BIT(1)
+#define GLTSYN_CMD_INIT_TIME_INCVAL (BIT(0) | BIT(1))
#define GLTSYN_CMD_ADJ_TIME BIT(2)
#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
#define GLTSYN_CMD_READ_TIME BIT(7)
+/* PHY port Time Sync command definitions */
+#define PHY_CMD_INIT_TIME BIT(0)
+#define PHY_CMD_INIT_INCVAL BIT(1)
+#define PHY_CMD_ADJ_TIME (BIT(0) | BIT(1))
+#define PHY_CMD_ADJ_TIME_AT_TIME (BIT(0) | BIT(2))
+#define PHY_CMD_READ_TIME (BIT(0) | BIT(1) | BIT(2))
+
#define TS_CMD_MASK_E810 0xFF
+#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+/* Macros to derive port low and high addresses on both quads */
+#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
+#define P_Q0_H(a, p) ((((a) + (0x2000 * (p)))) >> 16)
+#define P_Q1_L(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) & 0xFFFF)
+#define P_Q1_H(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) >> 16)
+
+/* PHY QUAD register base addresses */
+#define Q_0_BASE 0x94000
+#define Q_1_BASE 0x114000
+
+/* Timestamp memory reset registers */
+#define Q_REG_TS_CTRL 0x618
+#define Q_REG_TS_CTRL_S 0
+#define Q_REG_TS_CTRL_M BIT(0)
+
+/* Timestamp availability status registers */
+#define Q_REG_TX_MEMORY_STATUS_L 0xCF0
+#define Q_REG_TX_MEMORY_STATUS_U 0xCF4
+
+/* Tx FIFO status registers */
+#define Q_REG_FIFO23_STATUS 0xCF8
+#define Q_REG_FIFO01_STATUS 0xCFC
+#define Q_REG_FIFO02_S 0
+#define Q_REG_FIFO02_M ICE_M(0x3FF, 0)
+#define Q_REG_FIFO13_S 10
+#define Q_REG_FIFO13_M ICE_M(0x3FF, 10)
+
+/* Interrupt control Config registers */
+#define Q_REG_TX_MEM_GBL_CFG 0xC08
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
+
+/* Tx Timestamp data registers */
+#define Q_REG_TX_MEMORY_BANK_START 0xA00
+
+/* PHY port register base addresses */
+#define P_0_BASE 0x80000
+#define P_4_BASE 0x106000
+
+/* Timestamp init registers */
+#define P_REG_RX_TIMER_INC_PRE_L 0x46C
+#define P_REG_RX_TIMER_INC_PRE_U 0x470
+#define P_REG_TX_TIMER_INC_PRE_L 0x44C
+#define P_REG_TX_TIMER_INC_PRE_U 0x450
+
+/* Timestamp match and adjust target registers */
+#define P_REG_RX_TIMER_CNT_ADJ_L 0x474
+#define P_REG_RX_TIMER_CNT_ADJ_U 0x478
+#define P_REG_TX_TIMER_CNT_ADJ_L 0x454
+#define P_REG_TX_TIMER_CNT_ADJ_U 0x458
+
+/* Timestamp capture registers */
+#define P_REG_RX_CAPTURE_L 0x4D8
+#define P_REG_RX_CAPTURE_U 0x4DC
+#define P_REG_TX_CAPTURE_L 0x4B4
+#define P_REG_TX_CAPTURE_U 0x4B8
+
+/* Timestamp PHY incval registers */
+#define P_REG_TIMETUS_L 0x410
+#define P_REG_TIMETUS_U 0x414
+
+#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_HIGH_S 8
+
+/* PHY window length registers */
+#define P_REG_WL 0x40C
+
+#define PTP_VERNIER_WL 0x111ed
+
+/* PHY start registers */
+#define P_REG_PS 0x408
+#define P_REG_PS_START_S 0
+#define P_REG_PS_START_M BIT(0)
+#define P_REG_PS_BYPASS_MODE_S 1
+#define P_REG_PS_BYPASS_MODE_M BIT(1)
+#define P_REG_PS_ENA_CLK_S 2
+#define P_REG_PS_ENA_CLK_M BIT(2)
+#define P_REG_PS_LOAD_OFFSET_S 3
+#define P_REG_PS_LOAD_OFFSET_M BIT(3)
+#define P_REG_PS_SFT_RESET_S 11
+#define P_REG_PS_SFT_RESET_M BIT(11)
+
+/* PHY offset valid registers */
+#define P_REG_TX_OV_STATUS 0x4D4
+#define P_REG_TX_OV_STATUS_OV_S 0
+#define P_REG_TX_OV_STATUS_OV_M BIT(0)
+#define P_REG_RX_OV_STATUS 0x4F8
+#define P_REG_RX_OV_STATUS_OV_S 0
+#define P_REG_RX_OV_STATUS_OV_M BIT(0)
+
+/* PHY offset ready registers */
+#define P_REG_TX_OR 0x45C
+#define P_REG_RX_OR 0x47C
+
+/* PHY total offset registers */
+#define P_REG_TOTAL_RX_OFFSET_L 0x460
+#define P_REG_TOTAL_RX_OFFSET_U 0x464
+#define P_REG_TOTAL_TX_OFFSET_L 0x440
+#define P_REG_TOTAL_TX_OFFSET_U 0x444
+
+/* Timestamp PAR/PCS registers */
+#define P_REG_UIX66_10G_40G_L 0x480
+#define P_REG_UIX66_10G_40G_U 0x484
+#define P_REG_UIX66_25G_100G_L 0x488
+#define P_REG_UIX66_25G_100G_U 0x48C
+#define P_REG_DESK_PAR_RX_TUS_L 0x490
+#define P_REG_DESK_PAR_RX_TUS_U 0x494
+#define P_REG_DESK_PAR_TX_TUS_L 0x498
+#define P_REG_DESK_PAR_TX_TUS_U 0x49C
+#define P_REG_DESK_PCS_RX_TUS_L 0x4A0
+#define P_REG_DESK_PCS_RX_TUS_U 0x4A4
+#define P_REG_DESK_PCS_TX_TUS_L 0x4A8
+#define P_REG_DESK_PCS_TX_TUS_U 0x4AC
+#define P_REG_PAR_RX_TUS_L 0x420
+#define P_REG_PAR_RX_TUS_U 0x424
+#define P_REG_PAR_TX_TUS_L 0x428
+#define P_REG_PAR_TX_TUS_U 0x42C
+#define P_REG_PCS_RX_TUS_L 0x430
+#define P_REG_PCS_RX_TUS_U 0x434
+#define P_REG_PCS_TX_TUS_L 0x438
+#define P_REG_PCS_TX_TUS_U 0x43C
+#define P_REG_PAR_RX_TIME_L 0x4F0
+#define P_REG_PAR_RX_TIME_U 0x4F4
+#define P_REG_PAR_TX_TIME_L 0x4CC
+#define P_REG_PAR_TX_TIME_U 0x4D0
+#define P_REG_PAR_PCS_RX_OFFSET_L 0x4E8
+#define P_REG_PAR_PCS_RX_OFFSET_U 0x4EC
+#define P_REG_PAR_PCS_TX_OFFSET_L 0x4C4
+#define P_REG_PAR_PCS_TX_OFFSET_U 0x4C8
+#define P_REG_LINK_SPEED 0x4FC
+#define P_REG_LINK_SPEED_SERDES_S 0
+#define P_REG_LINK_SPEED_SERDES_M ICE_M(0x7, 0)
+#define P_REG_LINK_SPEED_FEC_MODE_S 3
+#define P_REG_LINK_SPEED_FEC_MODE_M ICE_M(0x3, 3)
+#define P_REG_LINK_SPEED_FEC_MODE(reg) \
+ (((reg) & P_REG_LINK_SPEED_FEC_MODE_M) >> \
+ P_REG_LINK_SPEED_FEC_MODE_S)
+
+/* PHY timestamp related registers */
+#define P_REG_PMD_ALIGNMENT 0x0FC
+#define P_REG_RX_80_TO_160_CNT 0x6FC
+#define P_REG_RX_80_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_80_TO_160_CNT_RXCYC_M BIT(0)
+#define P_REG_RX_40_TO_160_CNT 0x8FC
+#define P_REG_RX_40_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_40_TO_160_CNT_RXCYC_M ICE_M(0x3, 0)
+
+/* Rx FIFO status registers */
+#define P_REG_RX_OV_FS 0x4F8
+#define P_REG_RX_OV_FS_FIFO_STATUS_S 2
+#define P_REG_RX_OV_FS_FIFO_STATUS_M ICE_M(0x3FF, 2)
+
+/* Timestamp command registers */
+#define P_REG_TX_TMR_CMD 0x448
+#define P_REG_RX_TMR_CMD 0x468
+
/* E810 timesync enable register */
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
@@ -68,9 +402,20 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
/* Timestamp block macros */
#define TS_LOW_M 0xFFFFFFFF
+#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
+#define TS_PHY_LOW_M 0xFF
+#define TS_PHY_HIGH_M 0xFFFFFFFF
+#define TS_PHY_HIGH_S 8
+
#define BYTES_PER_IDX_ADDR_L_U 8
+#define BYTES_PER_IDX_ADDR_L 4
+
+/* Internal PHY timestamp address */
+#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
+#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
+ BYTES_PER_IDX_ADDR_L))
/* External PHY timestamp address */
#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index af8e6ef5f571..dcc310e29300 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -244,6 +244,14 @@ static int ice_repr_add(struct ice_vf *vf)
if (!repr)
return -ENOMEM;
+#ifdef CONFIG_ICE_SWITCHDEV
+ repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL);
+ if (!repr->mac_rule) {
+ err = -ENOMEM;
+ goto err_alloc_rule;
+ }
+#endif
+
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
@@ -287,6 +295,11 @@ err_alloc_q_vector:
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
+#ifdef CONFIG_ICE_SWITCHDEV
+ kfree(repr->mac_rule);
+ repr->mac_rule = NULL;
+err_alloc_rule:
+#endif
kfree(repr);
vf->repr = NULL;
return err;
@@ -304,6 +317,10 @@ static void ice_repr_rem(struct ice_vf *vf)
unregister_netdev(vf->repr->netdev);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
+#ifdef CONFIG_ICE_SWITCHDEV
+ kfree(vf->repr->mac_rule);
+ vf->repr->mac_rule = NULL;
+#endif
kfree(vf->repr);
vf->repr = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 806de22933c6..0c77ff050d15 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -13,6 +13,11 @@ struct ice_repr {
struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
+#ifdef CONFIG_ICE_SWITCHDEV
+ /* info about slow path MAC rule */
+ struct ice_rule_query_data *mac_rule;
+ u8 rule_added;
+#endif
};
int ice_repr_add_for_all_vfs(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ce3c7bded4cb..7947223536e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -11,7 +11,7 @@
* This function inserts the root node of the scheduling tree topology
* to the SW DB.
*/
-static enum ice_status
+static int
ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_aqc_txsched_elem_data *info)
{
@@ -19,20 +19,20 @@ ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_hw *hw;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
if (!root)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&root->info, info, sizeof(*info));
@@ -96,14 +96,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
*
* This function sends a scheduling elements cmd (cmd_opc)
*/
-static enum ice_status
+static int
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 elems_req, void *buf, u16 buf_size,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -145,18 +145,18 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* This function inserts a scheduler node to the SW DB.
*/
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_sched_node *node;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -166,7 +166,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!parent) {
ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
le32_to_cpu(info->parent_teid));
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* query the current node information from FW before adding it
@@ -178,7 +178,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (hw->max_children[layer]) {
/* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
@@ -186,7 +186,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
}
@@ -209,7 +209,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
*
* Delete scheduling elements (0x040F)
*/
-static enum ice_status
+static int
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
@@ -228,19 +228,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* This function remove nodes from HW
*/
-static enum ice_status
+static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
- enum ice_status status;
u16 buf_size;
+ int status;
buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -369,14 +369,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
*
* Get default scheduler topology (0x400)
*/
-static enum ice_status
+static int
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
@@ -399,7 +399,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
*
* Add scheduling elements (0x0401)
*/
-static enum ice_status
+static int
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
@@ -420,7 +420,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Configure scheduling elements (0x0403)
*/
-static enum ice_status
+static int
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
@@ -441,7 +441,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static enum ice_status
+static int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -462,7 +462,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Suspend scheduling elements (0x0409)
*/
-static enum ice_status
+static int
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -482,7 +482,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* resume scheduling elements (0x040A)
*/
-static enum ice_status
+static int
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -500,7 +500,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* Query scheduler resource allocation (0x0412)
*/
-static enum ice_status
+static int
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
@@ -520,18 +520,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static enum ice_status
+static int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
u16 i, buf_size, num_elem_ret = 0;
- enum ice_status status;
__le32 *buf;
+ int status;
buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_nodes; i++)
buf[i] = cpu_to_le32(node_teids[i]);
@@ -558,7 +558,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -566,7 +566,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -574,7 +574,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -585,7 +585,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
@@ -602,7 +602,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -610,7 +610,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate RDMA queue contexts */
if (!vsi_ctx->rdma_q_ctx[tc]) {
vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -618,7 +618,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->rdma_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
return 0;
}
@@ -629,7 +629,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
@@ -651,14 +651,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*
* RL profile function to add, query, or remove profile(s)
*/
-static enum ice_status
+static int
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.rl_profile;
@@ -682,7 +682,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*
* Add RL profile (0x0410)
*/
-static enum ice_status
+static int
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
@@ -702,7 +702,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Remove RL profile (0x0415)
*/
-static enum ice_status
+static int
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
@@ -721,24 +721,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
* its associated parameters from HW DB,and locally. The caller needs to
* hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
- enum ice_status status;
u16 num_profiles = 1;
+ int status;
if (rl_info->prof_id_ref != 0)
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* Safe to remove profile ID */
buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
- return ICE_ERR_CFG;
+ return -EIO;
/* Delete stale entry now */
list_del(&rl_info->list_entry);
@@ -763,7 +763,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln], list_entry) {
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
@@ -875,7 +875,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-static enum ice_status
+static int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid)
@@ -883,15 +883,15 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
u16 i, num_groups_added = 0;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
size_t buf_size;
+ int status = 0;
u32 teid;
buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -918,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
- return ICE_ERR_CFG;
+ return -EIO;
}
*num_nodes_added = num_nodes;
@@ -974,7 +974,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*
* Add nodes into specific HW layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -989,7 +989,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* max children per node per layer */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -998,8 +998,8 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
- return ICE_ERR_CFG;
- return ICE_ERR_MAX_LIMIT;
+ return -EIO;
+ return -ENOSPC;
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
@@ -1018,7 +1018,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1027,7 +1027,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
- enum ice_status status = 0;
+ int status = 0;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
@@ -1045,14 +1045,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added);
- status = ICE_ERR_CFG;
+ status = -EIO;
break;
}
/* break if all the nodes are added successfully */
if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
- if (status && status != ICE_ERR_MAX_LIMIT)
+ if (status && status != -ENOSPC)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -1152,7 +1152,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
}
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
+ int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
@@ -1198,23 +1198,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
* resources, default topology created by firmware and storing the information
* in SW DB.
*/
-enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+int ice_sched_init_port(struct ice_port_info *pi)
{
struct ice_aqc_get_topo_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 num_branches;
u16 num_elems;
+ int status;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Query the Default Topology from FW */
buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Query default scheduling tree topology */
status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
@@ -1226,7 +1226,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
num_branches);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1237,7 +1237,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
num_elems);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1300,11 +1300,11 @@ err_init_port:
*
* query FW for allocated scheduler resources and store in HW struct
*/
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+int ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
- enum ice_status status = 0;
__le16 max_sibl;
+ int status = 0;
u16 i;
if (hw->layer_info)
@@ -1312,7 +1312,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
if (status)
@@ -1341,7 +1341,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
sizeof(*hw->layer_info)),
GFP_KERNEL);
if (!hw->layer_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto sched_query_out;
}
@@ -1614,31 +1614,31 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* This function adds the VSI child nodes to tree. It gets called for
* LAN and RDMA separately.
*/
-static enum ice_status
+static int
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
+ int status;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1717,18 +1717,18 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
-static enum ice_status
+static int
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
@@ -1737,7 +1737,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1749,7 +1749,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (i == vsil)
parent->vsi_handle = vsi_handle;
@@ -1766,7 +1766,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
*
* This function adds a new VSI into scheduler tree
*/
-static enum ice_status
+static int
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
@@ -1774,7 +1774,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
@@ -1794,7 +1794,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
*
* This function updates the VSI child nodes based on the number of queues
*/
-static enum ice_status
+static int
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
@@ -1802,21 +1802,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
+ int status = 0;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
prev_numqs = vsi_ctx->sched.max_lanq[tc];
@@ -1869,22 +1869,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* enabled and VSI is in suspended state then resume the VSI back. If TC is
* disabled then suspend the VSI if it is not already.
*/
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */
@@ -1908,7 +1908,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
@@ -1993,11 +1993,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
-static enum ice_status
+static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
+ int status = -EINVAL;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
@@ -2022,7 +2022,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = ICE_ERR_IN_USE;
+ status = -EBUSY;
goto exit_sched_rm_vsi_cfg;
}
while (j < vsi_node->num_children) {
@@ -2065,7 +2065,7 @@ exit_sched_rm_vsi_cfg:
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
@@ -2078,7 +2078,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
* This function clears the VSI and its RDMA children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
}
@@ -2188,36 +2188,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-static enum ice_status
+static int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
- enum ice_status status = 0;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ int status = 0;
u16 buf_len;
hw = pi->hw;
if (!parent || !num_items)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Does parent have enough space */
if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer])
- return ICE_ERR_AQ_FULL;
+ return -ENOSPC;
buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto move_err_exit;
}
@@ -2228,7 +2228,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto move_err_exit;
}
@@ -2251,28 +2251,28 @@ move_err_exit:
* This function moves a VSI to an aggregator node or its subtree.
* Intermediate nodes may be created if required.
*/
-static enum ice_status
+static int
ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
u8 tc)
{
struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u32 first_node_teid, vsi_teid;
- enum ice_status status;
u16 num_nodes_added;
u8 aggl, vsil, i;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
@@ -2302,7 +2302,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2314,7 +2314,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
}
move_nodes:
@@ -2333,14 +2333,14 @@ move_nodes:
* aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
struct ice_sched_agg_info *agg_info, u8 tc,
bool rm_vsi_info)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *tmp;
- enum ice_status status = 0;
+ int status = 0;
list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
list_entry) {
@@ -2397,7 +2397,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
* This function removes the aggregator node and intermediate nodes if any
* from the given TC
*/
-static enum ice_status
+static int
ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *tc_node, *agg_node;
@@ -2405,15 +2405,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Can't remove the aggregator node if it has children */
if (ice_sched_is_agg_inuse(pi, agg_node))
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* need to remove the whole subtree if aggregator node is the
* only child.
@@ -2422,7 +2422,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
struct ice_sched_node *parent = agg_node->parent;
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (parent->num_children > 1)
break;
@@ -2445,11 +2445,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* the aggregator configuration completely for requested TC. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
u8 tc, bool rm_vsi_info)
{
- enum ice_status status = 0;
+ int status = 0;
/* If nothing to remove - return success */
if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
@@ -2478,7 +2478,7 @@ exit_rm_agg_cfg_tc:
* Save aggregator TC bitmap. This function needs to be called with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
unsigned long *tc_bitmap)
{
@@ -2486,7 +2486,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2501,20 +2501,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
* This function creates an aggregator node and intermediate nodes if required
* for the given TC
*/
-static enum ice_status
+static int
ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *parent, *agg_node, *tc_node;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u32 first_node_teid;
u16 num_nodes_added;
+ int status = 0;
u8 i, aggl;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
/* Does Agg node already exist ? */
@@ -2549,14 +2549,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
parent = tc_node;
for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2591,13 +2591,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* resources and remove aggregator ID.
* This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, unsigned long *tc_bitmap)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
agg_info = ice_get_agg_info(hw, agg_id);
@@ -2606,7 +2606,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
GFP_KERNEL);
if (!agg_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
agg_info->agg_id = agg_id;
agg_info->agg_type = agg_type;
@@ -2653,19 +2653,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
*
* This function configures aggregator node(s).
*/
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
- status = ice_sched_cfg_agg(pi, agg_id, agg_type,
- (unsigned long *)&bitmap);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
if (!status)
- status = ice_save_agg_tc_bitmap(pi, agg_id,
- (unsigned long *)&bitmap);
+ status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
mutex_unlock(&pi->sched_lock);
return status;
}
@@ -2724,7 +2722,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
* Save VSI to aggregator TC bitmap. This function needs to call with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
unsigned long *tc_bitmap)
{
@@ -2733,11 +2731,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2754,21 +2752,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* already associated to the aggregator node then no operation is performed on
* the tree. This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* If the VSI is already part of another aggregator then update
* its VSI info list
*/
@@ -2790,7 +2788,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*agg_vsi_info), GFP_KERNEL);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* add VSI ID into the aggregator list */
agg_vsi_info->vsi_handle = vsi_handle;
@@ -2851,14 +2849,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
+ int status;
buf = *info;
/* Parent TEID is reserved field in this aq call */
@@ -2874,7 +2872,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
&elem_cfgd, NULL);
if (status || elem_cfgd != num_elems) {
ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Config success case */
@@ -2893,7 +2891,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*
* This function configures node element's BW allocation.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -2909,7 +2907,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
} else {
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -2925,12 +2923,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Move or associate VSI to a new or default aggregator node.
*/
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
@@ -3098,12 +3096,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
*
* This function converts the BW to profile structure format.
*/
-static enum ice_status
+static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile)
{
- enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
+ int status = -EINVAL;
bool found = false;
s32 encode = 0;
s64 mv = 0;
@@ -3150,7 +3148,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_encode = cpu_to_le16(encode);
status = 0;
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
return status;
@@ -3176,9 +3174,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
+ int status;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return NULL;
@@ -3249,7 +3247,7 @@ exit_add_rl_prof:
*
* This function configures node element's BW limit.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 rl_prof_id)
{
@@ -3268,7 +3266,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
* hence only one of them may be set for any given element
*/
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
- return ICE_ERR_CFG;
+ return -EIO;
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
break;
@@ -3291,7 +3289,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
(le16_to_cpu(data->eir_bw.bw_profile_idx) !=
ICE_SCHED_DFLT_RL_PROF_ID))
- return ICE_ERR_CFG;
+ return -EIO;
/* EIR BW is set to default, disable it */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
/* Okay to enable shared BW now */
@@ -3300,7 +3298,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
break;
default:
/* Unknown rate limit type */
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -3420,15 +3418,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
* scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
- enum ice_status status = 0;
+ int status = 0;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3441,11 +3439,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
- if (status && status != ICE_ERR_IN_USE)
+ if (status && status != -EBUSY)
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
- if (status == ICE_ERR_IN_USE)
+ if (status == -EBUSY)
status = 0;
return status;
}
@@ -3461,16 +3459,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 layer_num)
{
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
u16 rl_prof_id;
u16 old_id;
+ int status;
hw = pi->hw;
switch (rl_type) {
@@ -3488,7 +3486,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Save existing RL prof ID for later clean up */
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
@@ -3518,7 +3516,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* them may be set for any given element. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
struct ice_sched_node *node,
u8 layer_num, enum ice_rl_type rl_type, u32 bw)
@@ -3562,14 +3560,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_info;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
+ int status = -EINVAL;
rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
if (!rl_prof_info)
@@ -3608,31 +3606,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
* EIR, or SRL. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
struct ice_sched_node *cfg_node = node;
- enum ice_status status;
+ int status;
struct ice_hw *hw;
u8 layer_num;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rl_type == ICE_SHARED_BW) {
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(node, layer_num);
if (!cfg_node)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
@@ -3657,7 +3655,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type)
@@ -3675,7 +3673,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
* behalf of the requested node (first argument). This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
{
/* SRL profiles are not available on all layers. Check if the
@@ -3690,7 +3688,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
(node->parent && node->parent->num_children == 1)))
return 0;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -3701,7 +3699,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
*
* Save BW information of queue type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
{
switch (rl_type) {
@@ -3715,7 +3713,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -3731,16 +3729,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
*
* This function sets BW limit of queue scheduling node.
*/
-static enum ice_status
+static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
struct ice_q_ctx *q_ctx;
+ int status = -EINVAL;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
if (!q_ctx)
@@ -3762,7 +3760,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (sel_layer >= pi->hw->num_tx_sched_layers) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit_q_bw_lmt;
}
status = ice_sched_validate_srl_node(node, sel_layer);
@@ -3794,7 +3792,7 @@ exit_q_bw_lmt:
*
* This function configures BW limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
@@ -3812,7 +3810,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* This function configures BW default limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type)
{
@@ -3880,13 +3878,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-static enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
+ int status = -EINVAL;
if (!pi)
return status;
@@ -3921,7 +3919,7 @@ exit_set_node_bw_lmt_per_tc:
* This function configures BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3948,7 +3946,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function configures default BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
@@ -3976,13 +3974,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* burst size value is used for future rate limit calls. It doesn't change the
* existing or previously created RL profiles.
*/
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
{
u16 burst_size_to_prog;
if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
bytes > ICE_MAX_BURST_SIZE_ALLOWED)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (ice_round_to_num(bytes, 64) <=
ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
/* 64 byte granularity case */
@@ -4017,13 +4015,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
* This function configures node element's priority value. It
* needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -4044,12 +4042,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
* This function restores node's BW from bw_t_info. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status = ICE_ERR_PARAM;
+ int status = -EINVAL;
u16 bw_alloc;
if (!node)
@@ -4137,7 +4135,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
ICE_MAX_TRAFFIC_CLASS)) {
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
ice_sched_get_ena_tc_bitmap(pi,
@@ -4191,18 +4189,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
* their node bandwidth information. This function needs to be called with
* scheduler lock held.
*/
-static enum ice_status
-ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_port_info *pi = hw->port_info;
struct ice_sched_agg_info *agg_info;
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info)
return 0; /* Not present in list - default Agg case */
@@ -4233,10 +4230,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_replay_vsi_agg(hw, vsi_handle);
@@ -4252,14 +4249,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays queue type node bandwidth. This function needs to be
* called with scheduler lock held.
*/
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
{
struct ice_sched_node *q_node;
/* Following also checks the presence of node in tree */
q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
if (!q_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 6bddcbecaf5e..4f91577fed56 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -65,12 +65,12 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
-enum ice_status ice_sched_init_port(struct ice_port_info *pi);
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+int ice_sched_init_port(struct ice_port_info *pi);
+int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
@@ -79,7 +79,7 @@ void ice_sched_clear_agg(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
@@ -87,35 +87,38 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index aa11d07793d4..52c6bac41bf7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -18,7 +18,7 @@
* queue and asynchronously sending message via
* ice_sq_send_cmd() function
*/
-enum ice_status
+int
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
@@ -228,7 +228,7 @@ ice_mbx_traverse(struct ice_hw *hw,
* sent per VF and marks the VF as malicious if it exceeds
* the permissible number of messages to send.
*/
-static enum ice_status
+static int
ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
enum ice_mbx_snapshot_state *new_state,
bool *is_malvf)
@@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++;
@@ -297,7 +297,7 @@ static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
* Detect: If pending message count exceeds watermark traverse
* the static snapshot and look for a malicious VF.
*/
-enum ice_status
+int
ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_data *mbx_data, u16 vf_id,
bool *is_malvf)
@@ -306,10 +306,10 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_snap_buffer_data *snap_buf;
struct ice_ctl_q_info *cq = &hw->mailboxq;
enum ice_mbx_snapshot_state new_state;
- enum ice_status status = 0;
+ int status = 0;
if (!is_malvf || !mbx_data)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
@@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* interrupt is not less than the defined AVF message threshold.
*/
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
/* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox
@@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
*/
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf;
@@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = ICE_ERR_CFG;
+ status = -EIO;
}
snap_buf->state = new_state;
@@ -405,20 +405,20 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* the input vf_id against the bitmap to verify if the VF has been
* detected in any previous mailbox iterations.
*/
-enum ice_status
+int
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf)
{
if (!all_malvfs || !report_malvf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
if (vf_id >= bitmap_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* If the vf_id is found in the bitmap set bit and boolean to true */
if (!test_and_set_bit(vf_id, all_malvfs))
@@ -441,19 +441,19 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
* that the new VF loaded is not considered malicious before going
* through the overflow detection algorithm.
*/
-enum ice_status
+int
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id)
{
if (!snap || !all_malvfs)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (bitmap_len < snap->mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
/* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
clear_bit(vf_id, all_malvfs);
@@ -482,7 +482,7 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
* called to ensure that the vf_count can be compared against the number
* of VFs supported as defined in the functional capabilities of the device.
*/
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
@@ -491,13 +491,13 @@ enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
* the functional capabilities of the PF.
*/
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
sizeof(*snap->mbx_vf.vf_cntr),
GFP_KERNEL);
if (!snap->mbx_vf.vf_cntr)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities.
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 161dc55d9e9c..68686a3fd7e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -14,24 +14,24 @@
#define ICE_ASYNC_VF_MSG_THRESHOLD 63
#ifdef CONFIG_PCI_IOV
-enum ice_status
+int
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
-enum ice_status
+int
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
u16 vf_id, bool *is_mal_vf);
-enum ice_status
+int
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id);
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
void ice_mbx_deinit_snapshot(struct ice_hw *hw);
-enum ice_status
+int
ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf);
#else /* CONFIG_PCI_IOV */
-static inline enum ice_status
+static inline int
ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
u16 __always_unused vfid, u32 __always_unused v_opcode,
u32 __always_unused v_retval, u8 __always_unused *msg,
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
deleted file mode 100644
index dbf66057371d..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_STATUS_H_
-#define _ICE_STATUS_H_
-
-/* Error Codes */
-enum ice_status {
- ICE_SUCCESS = 0,
-
- /* Generic codes : Range -1..-49 */
- ICE_ERR_PARAM = -1,
- ICE_ERR_NOT_IMPL = -2,
- ICE_ERR_NOT_READY = -3,
- ICE_ERR_NOT_SUPPORTED = -4,
- ICE_ERR_BAD_PTR = -5,
- ICE_ERR_INVAL_SIZE = -6,
- ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
- ICE_ERR_RESET_FAILED = -9,
- ICE_ERR_FW_API_VER = -10,
- ICE_ERR_NO_MEMORY = -11,
- ICE_ERR_CFG = -12,
- ICE_ERR_OUT_OF_RANGE = -13,
- ICE_ERR_ALREADY_EXISTS = -14,
- ICE_ERR_DOES_NOT_EXIST = -15,
- ICE_ERR_IN_USE = -16,
- ICE_ERR_MAX_LIMIT = -17,
- ICE_ERR_RESET_ONGOING = -18,
- ICE_ERR_HW_TABLE = -19,
- ICE_ERR_FW_DDP_MISMATCH = -20,
-
- ICE_ERR_NVM = -50,
- ICE_ERR_NVM_CHECKSUM = -51,
- ICE_ERR_BUF_TOO_SHORT = -52,
- ICE_ERR_NVM_BLANK_MODE = -53,
- ICE_ERR_AQ_ERROR = -100,
- ICE_ERR_AQ_TIMEOUT = -101,
- ICE_ERR_AQ_FULL = -102,
- ICE_ERR_AQ_NO_WORK = -103,
- ICE_ERR_AQ_EMPTY = -104,
- ICE_ERR_AQ_FW_CRITICAL = -105,
-};
-
-#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 183d93033890..11ae0bee3590 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -528,7 +528,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
+int ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -536,7 +536,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
sizeof(*recps), GFP_KERNEL);
if (!recps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
@@ -576,14 +576,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* in response buffer. The caller of this function to use *num_elems while
* parsing the response buffer.
*/
-static enum ice_status
+static int
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -606,14 +606,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-static enum ice_status
+static int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
res = &desc.params.add_update_free_vsi_res;
@@ -650,14 +650,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware (0x0213)
*/
-static enum ice_status
+static int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -685,14 +685,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-static enum ice_status
+static int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -832,15 +832,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
* If this function gets called after reset for existing VSIs then update
* with the new HW VSI number in the corresponding VSI handle list entry.
*/
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_vsi_ctx *tmp_vsi_ctx;
- enum ice_status status;
+ int status;
if (vsi_handle >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_aq_add_vsi(hw, vsi_ctx, cd);
if (status)
return status;
@@ -851,7 +851,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
ice_aq_free_vsi(hw, vsi_ctx, false, cd);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
*tmp_vsi_ctx = *vsi_ctx;
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
@@ -873,14 +873,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware as well as from VSI handle list
*/
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
if (!status)
@@ -897,12 +897,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware
*/
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
return ice_aq_update_vsi(hw, vsi_ctx, cd);
}
@@ -927,7 +927,7 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
- return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
+ return ice_update_vsi(hw, vsi_handle, ctx, NULL);
}
/**
@@ -939,20 +939,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
*
* allocates or free a VSI list resource
*/
-static enum ice_status
+static int
ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *vsi_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -966,7 +966,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
}
@@ -998,17 +998,17 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
opc != ice_aqc_opc_remove_sw_rules)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1018,7 +1018,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
return status;
}
@@ -1032,7 +1032,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
-static enum ice_status
+static int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
@@ -1069,18 +1069,18 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
-static enum ice_status
+static int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 buf_size;
+ int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.add_get_recipe;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
@@ -1104,7 +1104,7 @@ ice_aq_get_recipe(struct ice_hw *hw,
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
-static enum ice_status
+static int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -1130,13 +1130,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
-static enum ice_status
+static int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.recipe_to_profile;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
@@ -1154,16 +1154,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
-static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = kzalloc(buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
@@ -1230,7 +1230,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
* bookkeeping so that we have a current list of all the recipes that are
* programmed in the firmware.
*/
-static enum ice_status
+static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
bool *refresh_required)
{
@@ -1238,16 +1238,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
struct ice_aqc_recipe_data_elem *tmp;
u16 num_recps = ICE_MAX_NUM_RECIPES;
struct ice_prot_lkup_ext *lkup_exts;
- enum ice_status status;
u8 fv_word_idx = 0;
u16 sub_recps;
+ int status;
bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
/* we need a buffer big enough to accommodate all the recipes */
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp[0].recipe_indx = rid;
status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
@@ -1284,7 +1284,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
GFP_KERNEL);
if (!rg_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1364,7 +1364,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
GFP_KERNEL);
if (!recps[rid].root_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1407,19 +1407,19 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
/* ice_get_initial_sw_cfg - Get initial port and default VSI data
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+int ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
- enum ice_status status;
u16 req_desc = 0;
u16 num_elems;
+ int status;
u16 i;
rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
GFP_KERNEL);
if (!rbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Multiple calls to ice_aq_get_sw_cfg may be required
* to get all the switch configuration information. The need
@@ -1670,7 +1670,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
*/
-static enum ice_status
+static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
@@ -1681,14 +1681,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
- enum ice_status status;
u16 lg_act_size;
u16 rules_size;
+ int status;
u32 act;
u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
@@ -1699,7 +1699,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
@@ -1808,19 +1808,19 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list ID
*/
-static enum ice_status
+static int
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
u16 s_rule_size;
u16 rule_type;
+ int status;
int i;
if (!num_vsi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (lkup_type == ICE_SW_LKUP_MAC ||
lkup_type == ICE_SW_LKUP_MAC_VLAN ||
@@ -1834,15 +1834,15 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
- return ICE_ERR_PARAM;
+ return -EINVAL;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_vsi; i++) {
if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
@@ -1869,11 +1869,11 @@ exit:
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
-static enum ice_status
+static int
ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
- enum ice_status status;
+ int status;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1895,7 +1895,7 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* to the corresponding filter management list to track this switch rule
* and VSI mapping
*/
-static enum ice_status
+static int
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
@@ -1903,16 +1903,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_sw_lkup_type l_type;
struct ice_sw_recipe *recp;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_pkt_fwd_rule_exit;
}
@@ -1959,16 +1959,16 @@ ice_create_pkt_fwd_rule_exit:
* Call AQ command to update a previously created switch rule with a
* VSI list ID
*/
-static enum ice_status
+static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
@@ -1988,13 +1988,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
+ int status = 0;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@@ -2044,24 +2044,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_add_update_vsi_list(struct ice_hw *hw,
struct ice_fltr_mgmt_list_entry *m_entry,
struct ice_fltr_info *cur_fltr,
struct ice_fltr_info *new_fltr)
{
- enum ice_status status = 0;
u16 vsi_list_id = 0;
+ int status = 0;
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -2073,7 +2073,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
vsi_handle_arr[1] = new_fltr->vsi_handle;
@@ -2101,7 +2101,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
vsi_list_id);
if (!m_entry->vsi_list_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
@@ -2116,7 +2116,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -2209,7 +2209,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
*
* Adds or updates the rule lists for a given recipe
*/
-static enum ice_status
+static int
ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
@@ -2217,10 +2217,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2255,18 +2255,18 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
* The VSI list should be emptied before this function is called to remove the
* VSI list.
*/
-static enum ice_status
+static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
u16 s_rule_size;
+ int status;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
@@ -2288,21 +2288,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
- enum ice_status status = 0;
u16 vsi_list_id;
+ int status = 0;
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = fm_list->fltr_info.lkup_type;
vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
@@ -2324,7 +2324,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -2375,19 +2375,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
-static enum ice_status
+static int
ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *list_elem;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
bool remove_rule = false;
u16 vsi_handle;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2395,14 +2395,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
mutex_lock(rule_lock);
list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
if (!list_elem) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
remove_rule = true;
} else if (!list_elem->vsi_list_info) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
} else if (list_elem->vsi_list_info->ref_cnt > 1) {
/* a ref_cnt > 1 indicates that the vsi_list is being
@@ -2435,7 +2435,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto exit;
}
@@ -2590,7 +2590,7 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
@@ -2598,12 +2598,12 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
u16 num_unicast = 0;
+ int status = 0;
u8 elem_sent;
if (!m_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
s_rule = NULL;
sw = hw->switch_info;
@@ -2616,23 +2616,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
m_list_itr->fltr_info.flag = ICE_FLTR_TX;
vsi_handle = m_list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
m_list_itr->fltr_info.src = hw_vsi_id;
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
/* Don't overwrite the unicast address */
mutex_lock(rule_lock);
if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
&m_list_itr->fltr_info)) {
mutex_unlock(rule_lock);
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
}
mutex_unlock(rule_lock);
num_unicast++;
@@ -2660,7 +2660,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_add_mac_exit;
}
@@ -2710,7 +2710,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*fm_entry), GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_add_mac_exit;
}
fm_entry->fltr_info = *f_info;
@@ -2737,7 +2737,7 @@ ice_add_mac_exit:
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
*/
-static enum ice_status
+static int
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -2746,10 +2746,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
enum ice_sw_lkup_type lkup_type;
u16 vsi_list_id = 0, vsi_handle;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2757,10 +2757,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (new_fltr->src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
lkup_type = new_fltr->lkup_type;
@@ -2799,7 +2799,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
new_fltr);
if (!v_list_itr) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
/* reuse VSI list for new rule and increment ref_cnt */
@@ -2835,7 +2835,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto exit;
}
@@ -2845,7 +2845,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* A rule already exists with the new VSI being added */
if (cur_handle == vsi_handle) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto exit;
}
@@ -2890,16 +2890,16 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(v_list_itr, v_list, list_entry) {
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->fltr_info.flag = ICE_FLTR_TX;
v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
if (v_list_itr->status)
@@ -2917,13 +2917,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(em_list_itr, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2931,7 +2930,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
@@ -2946,13 +2945,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
* @hw: pointer to the hardware structure
* @em_list: list of ethertype or ethertype MAC entries
*/
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr, *tmp;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2960,7 +2958,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_remove_rule_internal(hw, l_type,
em_list_itr);
@@ -3020,18 +3018,17 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_info f_info;
enum ice_adminq_opc opcode;
- enum ice_status status;
u16 s_rule_size;
u16 hw_vsi_id;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
@@ -3039,7 +3036,7 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memset(&f_info, 0, sizeof(f_info));
@@ -3137,18 +3134,18 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* This function removes either a MAC filter rule or a specific VSI from a
* VSI list for a multicast MAC address.
*
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
+ * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
+ * be aware that this call will only work if all the entries passed into m_list
+ * were added previously. It will not attempt to do a partial remove of entries
+ * that were found.
*/
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
@@ -3157,11 +3154,11 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
@@ -3174,7 +3171,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
&list_itr->fltr_info)) {
mutex_unlock(rule_lock);
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
mutex_unlock(rule_lock);
}
@@ -3192,19 +3189,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_VLAN,
v_list_itr);
@@ -3242,7 +3238,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
* fltr_info.fwd_id fields. These are set such that later logic can
* extract which VSI to remove the fltr from, and pass on that information.
*/
-static enum ice_status
+static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *vsi_list_head,
struct ice_fltr_info *fi)
@@ -3254,7 +3250,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
*/
tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp->fltr_info = *fi;
@@ -3285,17 +3281,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
* Note that this means all entries in vsi_list_head must be explicitly
* deallocated by the caller when done with list.
*/
-static enum ice_status
+static int
ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
+ int status = 0;
/* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
@@ -3349,9 +3345,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @recp_id: recipe ID for which the rule needs to removed
* @v_list: list of promisc entries
*/
-static enum ice_status
-ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
- struct list_head *v_list)
+static int
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
@@ -3371,7 +3366,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
@@ -3381,11 +3376,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct ice_fltr_mgmt_list_entry *itr;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
@@ -3444,20 +3439,20 @@ free_fltr_list:
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
-enum ice_status
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
- enum ice_status status = 0;
bool is_tx_fltr;
+ int status = 0;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&new_fltr, 0, sizeof(new_fltr));
@@ -3558,7 +3553,7 @@ set_promisc_exit:
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
{
@@ -3567,8 +3562,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct list_head vsi_list_head;
struct list_head *vlan_head;
struct mutex *vlan_lock; /* Lock to protect filter rule list */
- enum ice_status status;
u16 vlan_id;
+ int status;
INIT_LIST_HEAD(&vsi_list_head);
vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
@@ -3616,7 +3611,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status;
+ int status;
INIT_LIST_HEAD(&remove_list_head);
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
@@ -3681,19 +3676,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* @num_items: number of entries requested for FD resource type
* @counter_id: counter index returned by AQ call
*/
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Allocate resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3719,19 +3714,19 @@ exit:
* @num_items: number of entries to be freed for FD resource type
* @counter_id: counter ID resource which needs to be freed
*/
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Free resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3941,7 +3936,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
* and start grouping them in 4-word groups. Each group makes up one
* recipe.
*/
-static enum ice_status
+static int
ice_create_first_fit_recp_def(struct ice_hw *hw,
struct ice_prot_lkup_ext *lkup_exts,
struct list_head *rg_list,
@@ -3965,7 +3960,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
sizeof(*entry),
GFP_KERNEL);
if (!entry)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
list_add(&entry->l_entry, rg_list);
grp = &entry->r_group;
(*recp_cnt)++;
@@ -3991,7 +3986,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
-static enum ice_status
+static int
ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
struct list_head *rg_list)
{
@@ -4033,7 +4028,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
* invalid pair
*/
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
}
@@ -4075,10 +4070,8 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
u16 bit;
- bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
@@ -4115,7 +4108,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
* @rm: recipe management list entry
* @profiles: bitmap of profiles that will be associated.
*/
-static enum ice_status
+static int
ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
@@ -4123,11 +4116,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
- enum ice_status status;
u16 free_res_idx;
u16 recipe_count;
u8 chain_idx;
u8 recps = 0;
+ int status;
/* When more than one recipe are required, another recipe is needed to
* chain them together. Matching a tunnel metadata ID takes up one of
@@ -4143,22 +4136,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (rm->n_grp_count > 1) {
if (rm->n_grp_count > free_res_idx)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
rm->n_grp_count++;
}
if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
GFP_KERNEL);
if (!buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_mem;
}
@@ -4218,7 +4211,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
*/
if (chain_idx >= ICE_MAX_FV_WORDS) {
ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = ICE_ERR_MAX_LIMIT;
+ status = -ENOSPC;
goto err_unroll;
}
@@ -4249,7 +4242,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
sizeof(buf[0].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
/* Applicable only for ROOT_RECIPE, set the fwd_priority for
@@ -4285,7 +4278,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
sizeof(*last_chain_entry),
GFP_KERNEL);
if (!last_chain_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
last_chain_entry->rid = rid;
@@ -4320,7 +4313,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
sizeof(buf[recps].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
buf[recps].content.act_ctrl_fwd_priority = rm->priority;
@@ -4354,7 +4347,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
if (!idx_found) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto err_unroll;
}
@@ -4407,12 +4400,12 @@ err_mem:
* @rm: recipe management list entry
* @lkup_exts: lookup elements
*/
-static enum ice_status
+static int
ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_prot_lkup_ext *lkup_exts)
{
- enum ice_status status;
u8 recp_count = 0;
+ int status;
rm->n_grp_count = 0;
@@ -4442,21 +4435,21 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
* @bm: bitmap of field vectors to consider
* @fv_list: pointer to a list that holds the returned field vectors
*/
-static enum ice_status
+static int
ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
unsigned long *bm, struct list_head *fv_list)
{
- enum ice_status status;
u8 *prot_ids;
+ int status;
u16 i;
prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
if (!prot_ids)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < lkups_cnt; i++)
if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto free_mem;
}
@@ -4493,7 +4486,7 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
*/
-static enum ice_status
+static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
struct ice_prot_lkup_ext *lkup_exts)
{
@@ -4510,7 +4503,7 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
lkup_exts->field_mask[word] = mask;
} else {
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
}
@@ -4561,7 +4554,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
* @rinfo: other information regarding the rule e.g. priority and action info
* @rid: return the recipe ID of the recipe created
*/
-static enum ice_status
+static int
ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
{
@@ -4572,16 +4565,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_sw_fv_list_entry *fvit;
struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
- enum ice_status status = 0;
struct ice_sw_recipe *rm;
+ int status = 0;
u8 i;
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
if (!lkup_exts)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Determine the number of words to be matched and if it exceeds a
* recipe's restrictions
@@ -4590,20 +4583,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
count = ice_fill_valid_words(&lkups[i], lkup_exts);
if (!count) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
}
rm = kzalloc(sizeof(*rm), GFP_KERNEL);
if (!rm) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_free_lkup_exts;
}
@@ -4849,7 +4842,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt_len: packet length of dummy packet
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
const u8 *dummy_pkt, u16 pkt_len,
@@ -4883,7 +4876,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
}
/* this should never happen in a correct calling sequence */
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
switch (lkups[i].type) {
case ICE_MAC_OFOS:
@@ -4920,12 +4913,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_udp_tnl_hdr);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* the length should be a word multiple */
if (len % ICE_BYTES_PER_WORD)
- return ICE_ERR_CFG;
+ return -EIO;
/* We have the offset to the header start, the length, the
* caller's header values and mask. Use this information to
@@ -4955,7 +4948,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt: dummy packet to fill in
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
{
@@ -4964,11 +4957,11 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) {
case ICE_SW_TUN_VXLAN:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
- return ICE_ERR_CFG;
+ return -EIO;
break;
case ICE_SW_TUN_GENEVE:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
- return ICE_ERR_CFG;
+ return -EIO;
break;
default:
/* Nothing needs to be done for this tunnel type */
@@ -4989,7 +4982,7 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
}
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -5054,25 +5047,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_adv_add_update_vsi_list(struct ice_hw *hw,
struct ice_adv_fltr_mgmt_list_entry *m_entry,
struct ice_adv_rule_info *cur_fltr,
struct ice_adv_rule_info *new_fltr)
{
- enum ice_status status;
u16 vsi_list_id = 0;
+ int status;
if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -5085,7 +5078,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
new_fltr->sw_act.fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
@@ -5118,7 +5111,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->sw_act.vsi_handle;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -5160,7 +5153,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
* rinfo describes other information related to this rule such as forwarding
* IDs, priority of this rule, etc.
*/
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry)
@@ -5171,10 +5164,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_aqc_sw_rules_elem *s_rule = NULL;
struct list_head *rule_head;
struct ice_switch_info *sw;
- enum ice_status status;
const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
+ int status;
u8 q_rgn;
/* Initialize profile to result index bitmap */
@@ -5184,7 +5177,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* get # of words we need to match */
word_cnt = 0;
@@ -5198,13 +5191,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* make sure that we can locate a dummy packet */
ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
&pkt_offsets);
if (!pkt) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_ice_add_adv_rule;
}
@@ -5212,11 +5205,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_handle = rinfo->sw_act.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -5250,7 +5243,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -5284,7 +5277,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ICE_SINGLE_ACT_VALID_BIT;
break;
default:
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_ice_add_adv_rule;
}
@@ -5329,14 +5322,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
sizeof(struct ice_adv_fltr_mgmt_list_entry),
GFP_KERNEL);
if (!adv_fltr) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
lkups_cnt * sizeof(*lkups), GFP_KERNEL);
if (!adv_fltr->lkups) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
@@ -5379,12 +5372,12 @@ err_ice_add_adv_rule:
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
* It is required to pass valid VSI handle.
*/
-static enum ice_status
+static int
ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
struct list_head *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- enum ice_status status = 0;
+ int status = 0;
u16 hw_vsi_id;
if (list_empty(list_head))
@@ -5433,22 +5426,22 @@ end:
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_adv_fltr_mgmt_list_entry *fm_list)
{
struct ice_vsi_list_map_info *vsi_list_info;
enum ice_sw_lkup_type lkup_type;
- enum ice_status status;
u16 vsi_list_id;
+ int status;
if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = ICE_SW_LKUP_LAST;
vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
@@ -5468,7 +5461,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -5532,27 +5525,27 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* header. rinfo describes other information related to this rule such as
* forwarding IDs, priority of this rule, etc.
*/
-static enum ice_status
+static int
ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
{
struct ice_adv_fltr_mgmt_list_entry *list_elem;
struct ice_prot_lkup_ext lkup_exts;
- enum ice_status status = 0;
bool remove_rule = false;
struct mutex *rule_lock; /* Lock to protect filter rule list */
u16 i, rid, vsi_handle;
+ int status = 0;
memset(&lkup_exts, 0, sizeof(lkup_exts));
for (i = 0; i < lkups_cnt; i++) {
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST)
- return ICE_ERR_CFG;
+ return -EIO;
count = ice_fill_valid_words(&lkups[i], &lkup_exts);
if (!count)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Create any special protocol/offset pairs, such as looking at tunnel
@@ -5565,7 +5558,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
@@ -5597,7 +5590,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
s_rule->pdata.lkup_tx_rx.act = 0;
s_rule->pdata.lkup_tx_rx.index =
cpu_to_le16(list_elem->rule_info.fltr_rule_id);
@@ -5605,7 +5598,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL);
- if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
mutex_lock(rule_lock);
@@ -5630,7 +5623,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* the remove_entry parameter. This function will remove rule for a given
* vsi_handle with a given rule_id which is passed as parameter in remove_entry
*/
-enum ice_status
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry)
{
@@ -5641,7 +5634,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
sw = hw->switch_info;
if (!sw->recp_list[remove_entry->rid].recp_created)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_head = &sw->recp_list[remove_entry->rid].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->rule_info.fltr_rule_id ==
@@ -5653,7 +5646,92 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
}
/* either list is empty or unable to find rule */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
+}
+
+/**
+ * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
+ * given VSI handle
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
+ *
+ * This function is used to remove all the rules for a given VSI and as soon
+ * as removing a rule fails, it will return immediately with the error code,
+ * else it will return success.
+ */
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
+ struct ice_vsi_list_map_info *map_info;
+ struct ice_adv_rule_info rinfo;
+ struct list_head *list_head;
+ struct ice_switch_info *sw;
+ int status;
+ u8 rid;
+
+ sw = hw->switch_info;
+ for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
+ if (!sw->recp_list[rid].recp_created)
+ continue;
+ if (!sw->recp_list[rid].adv_rule)
+ continue;
+
+ list_head = &sw->recp_list[rid].filt_rules;
+ list_for_each_entry_safe(list_itr, tmp_entry, list_head,
+ list_entry) {
+ rinfo = list_itr->rule_info;
+
+ if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+ map_info = list_itr->vsi_list_info;
+ if (!map_info)
+ continue;
+
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
+ continue;
+ }
+
+ rinfo.sw_act.vsi_handle = vsi_handle;
+ status = ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ if (status)
+ return status;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replay the advanced rule for the given VSI.
+ */
+static int
+ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
+ struct list_head *list_head)
+{
+ struct ice_rule_query_data added_entry = { 0 };
+ struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
+ int status = 0;
+
+ if (list_empty(list_head))
+ return status;
+ list_for_each_entry(adv_fltr, list_head, list_entry) {
+ struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
+ u16 lk_cnt = adv_fltr->lkups_cnt;
+
+ if (vsi_handle != rinfo->sw_act.vsi_handle)
+ continue;
+ status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
+ &added_entry);
+ if (status)
+ break;
+ }
+ return status;
}
/**
@@ -5663,17 +5741,20 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
*
* Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
+ int status;
u8 i;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct list_head *head;
head = &sw->recp_list[i].filt_replay_rules;
- status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ if (!sw->recp_list[i].adv_rule)
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ else
+ status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
if (status)
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8a38906f16f..d8334beaaa8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -256,7 +256,7 @@ struct ice_vsi_list_map_info {
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_status status;
+ int status;
struct ice_fltr_info fltr_info;
};
@@ -302,75 +302,69 @@ enum ice_promisc_flags {
};
/* VSI related commands */
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+int ice_get_initial_sw_cfg(struct ice_hw *hw);
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id);
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry);
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-int
-ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
+int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
-enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
/* Promisc/defport setup for VSIs */
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
-enum ice_status
+int ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-enum ice_status
-ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+int ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 25cca5c4ae57..e8aab664270a 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -398,9 +398,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_hw *hw = &vsi->back->hw;
struct ice_adv_lkup_elem *list;
u32 flags = fltr->flags;
- enum ice_status status;
int lkups_cnt;
- int ret = 0;
+ int ret;
int i;
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
@@ -449,14 +448,13 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
- status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
- } else if (status) {
+ } else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
@@ -1162,7 +1160,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.vsi_handle = fltr->dest_id;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
- if (err == ICE_ERR_DOES_NOT_EXIST) {
+ if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index bc3ba19dc88f..3e38695f1c9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -3,8 +3,9 @@
/* The driver transmit and receive code */
-#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
#include <net/xdp.h>
@@ -219,6 +220,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
+ /* get the bql data ready */
+ if (!ice_ring_is_xdp(tx_ring))
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
@@ -232,6 +237,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (!eop_desc)
break;
+ /* follow the guidelines of other drivers */
+ prefetchw(&tx_buf->skb->users);
+
smp_rmb(); /* prevent any other reads prior to eop_desc */
ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
@@ -304,8 +312,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
- total_bytes);
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
+
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
@@ -314,11 +324,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->q_index) &&
+ if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->q_index);
+ netif_tx_wake_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
}
}
@@ -419,7 +427,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
}
rx_skip_free:
- memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+ if (rx_ring->xsk_pool)
+ memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+ else
+ memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -446,8 +457,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
- devm_kfree(rx_ring->dev, rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ } else {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ }
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +491,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
- GFP_KERNEL);
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
@@ -505,7 +520,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return 0;
err:
- devm_kfree(dev, rx_ring->rx_buf);
+ kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
return -ENOMEM;
}
@@ -561,7 +576,7 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
goto out_failure;
return ICE_XDP_REDIR;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -933,7 +948,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1517,7 +1532,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/
static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
+ netif_tx_stop_queue(txring_txq(tx_ring));
/* Memory barrier before checking head and tail */
smp_mb();
@@ -1525,8 +1540,8 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
if (likely(ICE_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
- /* A reprieve! - use start_subqueue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_tx_start_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
return 0;
}
@@ -1568,6 +1583,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct sk_buff *skb;
skb_frag_t *frag;
dma_addr_t dma;
+ bool kick;
td_tag = off->td_l2tag1;
td_cmd = off->td_cmd;
@@ -1649,9 +1665,6 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
tx_buf = &tx_ring->tx_buf[i];
}
- /* record bytecount for BQL */
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(first->skb);
@@ -1680,7 +1693,10 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
+ netdev_xmit_more());
+ if (kick)
+ /* notify HW of packet */
writel(i, tx_ring->tail);
return;
@@ -2265,6 +2281,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
return NETDEV_TX_BUSY;
}
+ /* prefetch for bql data which is infrequently used */
+ netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
+
offload.tx_ring = tx_ring;
/* record the location of the first descriptor for this packet */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c56dd1749903..b7b3bd4816f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -24,7 +24,6 @@
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
-#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 1dd7e84f41f8..0e87b98e0966 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
+#include <linux/filter.h>
+
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
#include "ice_lib.h"
@@ -187,8 +189,7 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
- (rx_ring, rx_desc));
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9e0c2923c62e..546145dd1f02 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -6,8 +6,8 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
+#define ICE_CHNL_MAX_TC 16
-#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
@@ -230,8 +230,8 @@ enum ice_fd_hw_seg {
ICE_FD_HW_SEG_MAX,
};
-/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */
-#define ICE_MAX_FDIR_VSI_PER_FILTER 2
+/* 1 ICE_VSI_PF + 1 ICE_VSI_CTRL + ICE_CHNL_MAX_TC */
+#define ICE_MAX_FDIR_VSI_PER_FILTER (2 + ICE_CHNL_MAX_TC)
struct ice_fd_hw_prof {
struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX];
@@ -279,6 +279,10 @@ struct ice_hw_common_caps {
#define ICE_NVM_PENDING_NETLIST BIT(2)
bool nvm_unified_update;
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance;
+ /* Post update reset restriction */
+ bool reset_restrict_support;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -295,9 +299,30 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_S 24
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
+/* TIME_REF clock rate specification */
+enum ice_time_ref_freq {
+ ICE_TIME_REF_FREQ_25_000 = 0,
+ ICE_TIME_REF_FREQ_122_880 = 1,
+ ICE_TIME_REF_FREQ_125_000 = 2,
+ ICE_TIME_REF_FREQ_153_600 = 3,
+ ICE_TIME_REF_FREQ_156_250 = 4,
+ ICE_TIME_REF_FREQ_245_760 = 5,
+
+ NUM_ICE_TIME_REF_FREQ
+};
+
+/* Clock source specification */
+enum ice_clk_src {
+ ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
+
+ NUM_ICE_CLK_SRC
+};
+
struct ice_ts_func_info {
/* Function specific info */
- u32 clk_freq;
+ enum ice_time_ref_freq time_ref;
+ u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
u8 ena;
@@ -873,8 +898,6 @@ struct ice_hw {
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
- enum ice_aq_err pkg_dwnld_status;
-
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
@@ -919,6 +942,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
u16 io_expander_handle;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index eee180d8c024..d64df81d4893 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -47,197 +47,6 @@ struct virtchnl_fdir_fltr_conf {
u32 flow_id;
};
-static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_GTPU_EH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-struct virtchnl_fdir_pattern_match_item {
- enum virtchnl_proto_hdr_type *list;
- u64 input_set;
- u64 *meta;
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
- {vc_pattern_ether, 0, NULL},
- {vc_pattern_ipv4_gtpu, 0, NULL},
- {vc_pattern_ipv4_gtpu_eh, 0, NULL},
- {vc_pattern_ipv4_l2tpv3, 0, NULL},
- {vc_pattern_ipv6_l2tpv3, 0, NULL},
- {vc_pattern_ipv4_esp, 0, NULL},
- {vc_pattern_ipv6_esp, 0, NULL},
- {vc_pattern_ipv4_ah, 0, NULL},
- {vc_pattern_ipv6_ah, 0, NULL},
- {vc_pattern_ipv4_nat_t_esp, 0, NULL},
- {vc_pattern_ipv6_nat_t_esp, 0, NULL},
- {vc_pattern_ipv4_pfcp, 0, NULL},
- {vc_pattern_ipv6_pfcp, 0, NULL},
-};
-
struct virtchnl_fdir_inset_map {
enum virtchnl_proto_hdr_field field;
enum ice_flow_field fld;
@@ -751,7 +560,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_flow_seg_info *old_seg;
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *vf_prof;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -794,29 +602,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
tun ? ICE_FLTR_PTYPE_MAX : 0);
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ tun + 1, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id);
goto err_prof;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
if (ret) {
dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
@@ -911,83 +716,6 @@ err_exit:
}
/**
- * ice_vc_fdir_match_pattern
- * @fltr: virtual channel add cmd buffer
- * @type: virtual channel protocol filter header type
- *
- * Matching the header type by comparing fltr and type's value.
- *
- * Return: true on success, and false on error.
- */
-static bool
-ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
- enum virtchnl_proto_hdr_type *type)
-{
- struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
- int i = 0;
-
- while ((i < proto->count) &&
- (*type == proto->proto_hdr[i].type) &&
- (*type != VIRTCHNL_PROTO_HDR_NONE)) {
- type++;
- i++;
- }
-
- return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
-}
-
-/**
- * ice_vc_fdir_get_pattern - get while list pattern
- * @vf: pointer to the VF info
- * @len: filter list length
- *
- * Return: pointer to allowed filter list
- */
-static const struct virtchnl_fdir_pattern_match_item *
-ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
-{
- const struct virtchnl_fdir_pattern_match_item *item;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- item = vc_fdir_pattern_comms;
- *len = ARRAY_SIZE(vc_fdir_pattern_comms);
- } else {
- item = vc_fdir_pattern_os;
- *len = ARRAY_SIZE(vc_fdir_pattern_os);
- }
-
- return item;
-}
-
-/**
- * ice_vc_fdir_search_pattern
- * @vf: pointer to the VF info
- * @fltr: virtual channel add cmd buffer
- *
- * Search for matched pattern from supported pattern list
- *
- * Return: 0 on success, and other on error.
- */
-static int
-ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
-{
- const struct virtchnl_fdir_pattern_match_item *pattern;
- int len, i;
-
- pattern = ice_vc_fdir_get_pattern(vf, &len);
-
- for (i = 0; i < len; i++)
- if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
- return 0;
-
- return -EINVAL;
-}
-
-/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -1299,11 +1027,11 @@ static int
ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- ret = ice_vc_fdir_search_pattern(vf, fltr);
- if (ret)
- return ret;
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1467,7 +1195,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
struct ice_fdir_fltr *input = &conf->input;
struct ice_vsi *vsi, *ctrl_vsi;
struct ice_fltr_desc desc;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -1497,8 +1224,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- ret = ice_status_to_errno(status);
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 6427e7ec93de..39b80124d282 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -9,6 +9,7 @@
#include "ice_flow.h"
#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
#define FIELD_SELECTOR(proto_hdr_field) \
BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
@@ -18,18 +19,7 @@ struct ice_vc_hdr_match_type {
u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
};
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
{VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
{VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
{VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
@@ -67,83 +57,7 @@ struct ice_vc_hash_field_match_type {
};
static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
{VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
@@ -289,37 +203,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
}
/**
- * ice_err_to_virt_err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -770,8 +653,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
struct ice_hw *hw = &vsi->back->hw;
struct ice_aqc_vsi_props *info;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
@@ -794,12 +676,10 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -968,8 +848,8 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
{
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- enum ice_status status;
u8 broadcast[ETH_ALEN];
+ int status;
if (ice_is_eswitch_mode_switchdev(vf->pf))
return 0;
@@ -977,9 +857,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
- vf->vf_id, ice_stat_str(status));
- return ice_status_to_errno(status);
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
}
vf->num_mac++;
@@ -988,10 +868,10 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
&vf->hw_lan_addr.addr[0], vf->vf_id,
- ice_stat_str(status));
- return ice_status_to_errno(status);
+ status);
+ return status;
}
vf->num_mac++;
@@ -1341,45 +1221,50 @@ static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
ice_flush(hw);
}
-/**
- * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @promisc_m: mask of promiscuous config bits
- * @rm_promisc: promisc flag request from the VF to remove or add filter
- *
- * This function configures VF VSI promiscuous mode, based on the VF requests,
- * for Unicast, Multicast and VLAN
- */
-static enum ice_status
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
- bool rm_promisc)
+static int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_pf *pf = vf->pf;
- enum ice_status status = 0;
- struct ice_hw *hw;
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
- hw = &pf->hw;
- if (vsi->num_vlan) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- rm_promisc);
- } else if (vf->port_vlan_info) {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- } else {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
+ if (vf->port_vlan_info)
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_info & VLAN_VID_MASK);
+ else if (vsi->num_vlan > 1)
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (vf->port_vlan_info)
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_info & VLAN_VID_MASK);
+ else if (vsi->num_vlan > 1)
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
}
- return status;
+ return 0;
}
static void ice_vf_clear_counters(struct ice_vf *vf)
@@ -1415,8 +1300,8 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
if (!vsi->agg_node)
return;
@@ -1743,10 +1628,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ if (ice_vf_clear_vsi_promisc(vf, vsi, promisc_m))
dev_err(dev, "disabling promiscuous mode failed\n");
}
+ ice_eswitch_del_vf_mac_rule(vf);
+
ice_vf_fdir_exit(vf);
ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
@@ -1765,6 +1652,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_vf_post_vsi_rebuild(vf);
vsi = ice_get_vf_vsi(vf);
ice_eswitch_update_repr(vsi);
+ ice_eswitch_replay_vf_mac_rule(vf);
/* if the VF has been reset allow it to come up again */
if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
@@ -1846,7 +1734,6 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
u8 broadcast[ETH_ALEN];
- enum ice_status status;
struct ice_vsi *vsi;
struct device *dev;
int err;
@@ -1866,11 +1753,10 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
}
eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
- vf->vf_id, ice_stat_str(status));
- err = ice_status_to_errno(status);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
+ vf->vf_id, err);
goto release_vsi;
}
@@ -2116,7 +2002,6 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int err;
err = ice_check_sriov_allowed(pf);
@@ -2136,9 +2021,9 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
return -EBUSY;
}
- status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (status)
- return ice_status_to_errno(status);
+ err = ice_mbx_init_snapshot(&pf->hw, num_vfs);
+ if (err)
+ return err;
err = ice_pci_sriov_ena(pf, num_vfs);
if (err) {
@@ -2272,9 +2157,9 @@ int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
- enum ice_status aq_ret;
struct device *dev;
struct ice_pf *pf;
+ int aq_ret;
if (!vf)
return -EINVAL;
@@ -2306,8 +2191,8 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
- vf->vf_id, ice_stat_str(aq_ret),
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
+ vf->vf_id, aq_ret,
ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -2556,6 +2441,100 @@ static bool ice_vc_isvalid_ring_len(u16 ring_len)
}
/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
* ice_vc_parse_rss_cfg - parses hash fields and headers from
* a specific virtchnl RSS cfg
* @hw: pointer to the hardware
@@ -2578,18 +2557,10 @@ ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
const struct ice_vc_hdr_match_type *hdr_list;
int i, hf_list_len, hdr_list_len;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- hf_list = ice_vc_hash_field_list_comms;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
- hdr_list = ice_vc_hdr_list_comms;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
- } else {
- hf_list = ice_vc_hash_field_list_os;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
- hdr_list = ice_vc_hdr_list_os;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
- }
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
struct virtchnl_proto_hdr *proto_hdr =
@@ -2691,10 +2662,15 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
goto error_param;
}
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
struct ice_vsi_ctx *ctx;
- enum ice_status status;
u8 lut_type, hash_type;
+ int status;
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
@@ -2723,9 +2699,8 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
} else {
vsi->info.q_opt_rss = ctx->info.q_opt_rss;
@@ -2750,19 +2725,18 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
vsi->vsi_num, v_ret);
}
} else {
- enum ice_status status;
+ int status;
status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs);
- /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
- * if two configurations share the same profile remove
- * one of them actually removes both, since the
- * profile is deleted.
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
*/
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ if (status && status != -ENOENT) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
- vf->vf_id, ice_stat_str(status));
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
}
}
}
@@ -2920,7 +2894,6 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
struct ice_pf *pf = np->vsi->back;
struct ice_vsi_ctx *ctx;
struct ice_vsi *vf_vsi;
- enum ice_status status;
struct device *dev;
struct ice_vf *vf;
int ret;
@@ -2970,12 +2943,10 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
}
- status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
- ice_stat_str(status));
- ret = -EIO;
+ ret = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
+ ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, ret);
goto out;
}
@@ -3021,10 +2992,10 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- enum ice_status mcast_status = 0, ucast_status = 0;
bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
+ int mcast_err = 0, ucast_err = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
@@ -3106,24 +3077,21 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
ucast_m = ICE_UCAST_PROMISC_BITS;
}
- ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m,
- !alluni);
- if (ucast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- alluni ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(ucast_status);
- }
+ if (alluni)
+ ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
+ else
+ ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
- mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m,
- !allmulti);
- if (mcast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- allmulti ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(mcast_status);
- }
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ucast_err || mcast_err)
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
- if (!mcast_status) {
+ if (!mcast_err) {
if (allmulti &&
!test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
@@ -3133,7 +3101,7 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
vf->vf_id);
}
- if (!ucast_status) {
+ if (!ucast_err) {
if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
vf->vf_id);
@@ -3813,8 +3781,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
{
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
- int ret = 0;
+ int ret;
/* device MAC already added */
if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
@@ -3825,18 +3792,17 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return -EPERM;
}
- status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (ret == -EEXIST) {
dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
/* don't return since we might need to update
* the primary MAC in ice_vfhw_mac_add() below
*/
- ret = -EEXIST;
- } else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
- return -EIO;
+ } else if (ret) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, ret);
+ return ret;
} else {
vf->num_mac++;
}
@@ -3913,20 +3879,20 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
{
struct device *dev = ice_pf_to_dev(vf->pf);
u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
+ int status;
if (!ice_can_vf_change_mac(vf) &&
ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
+ if (status == -ENOENT) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
return -EIO;
}
@@ -4533,6 +4499,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
for (i = 0; i < al->num_elements; i++) {
u8 *mac_addr = al->list[i].addr;
+ int result;
if (!is_unicast_ether_addr(mac_addr) ||
ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
@@ -4544,6 +4511,13 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
goto handle_mac_exit;
}
+ result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
+ if (result) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, result);
+ goto handle_mac_exit;
+ }
+
ice_vfhw_mac_add(vf, &al->list[i]);
vf->num_mac++;
break;
@@ -5289,9 +5263,9 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
s16 vf_id = le16_to_cpu(event->desc.retval);
struct device *dev = ice_pf_to_dev(pf);
struct ice_mbx_data mbxdata;
- enum ice_status status;
bool malvf = false;
struct ice_vf *vf;
+ int status;
if (ice_validate_vf_id(pf, vf_id))
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 7e28ecbbe7af..752487a1bdd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -203,6 +203,8 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index bb9a80847298..2388837d6d6c 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -12,6 +12,11 @@
#include "ice_txrx_lib.h"
#include "ice_lib.h"
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->xdp_buf[idx];
+}
+
/**
* ice_qp_reset_stats - Resets all stats for rings of given index
* @vsi: VSI that contains rings of interest
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
dma_addr_t dma;
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = &rx_ring->xdp_buf[ntu];
+ xdp = ice_xdp_buf(rx_ring, ntu);
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
}
ntu += nb_buffs;
- if (ntu == rx_ring->count) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- xdp = rx_ring->xdp_buf;
+ if (ntu == rx_ring->count)
ntu = 0;
- }
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu);
return count == nb_buffs;
@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_buff *xdp = *xdp_arr;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
skb_metadata_set(skb, metasize);
xsk_buff_free(xdp);
- *xdp_arr = NULL;
return skb;
}
@@ -483,7 +481,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -507,7 +505,6 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct xdp_buff **xdp;
+ struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size)
- break;
+ if (!size) {
+ xdp->data = NULL;
+ xdp->data_end = NULL;
+ xdp->data_hard_start = NULL;
+ xdp->data_meta = NULL;
+ goto construct_skb;
+ }
- xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
- xsk_buff_set_size(*xdp, size);
- xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(*xdp);
+ xsk_buff_free(xdp);
- *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
- cleaned_count++;
ice_bump_ntc(rx_ring);
continue;
}
-
+construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
break;
}
- cleaned_count++;
ice_bump_ntc(rx_ring);
if (eth_skb_pad(skb)) {
@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- if (cleaned_count >= ICE_RX_BUF_WRITE)
- failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+ failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!xdp)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
- *xdp = NULL;
+ xsk_buff_free(xdp);
}
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 9265901455cd..b9b9d35494d2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -792,7 +792,6 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
**/
s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
{
- s32 ret_val = 0;
struct e1000_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igb_acquire_nvm_i210;
@@ -813,7 +812,7 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
- return ret_val;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index fb1029352c3e..51a2dcaf553d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -864,7 +864,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
}
static void igb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -875,7 +877,9 @@ static void igb_get_ringparam(struct net_device *netdev,
}
static int igb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd54d3ef890b..38ba92022cd4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2927,7 +2927,7 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2961,7 +2961,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -6739,12 +6739,119 @@ void igb_update_stats(struct igb_adapter *adapter)
}
}
-static void igb_tsync_interrupt(struct igb_adapter *adapter)
+static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
+{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec64 ts;
+ u32 tsauxc;
+
+ if (pin < 0 || pin >= IGB_N_PEROUT)
+ return;
+
+ spin_lock(&adapter->tmreg_lock);
+
+ if (hw->mac.type == e1000_82580 ||
+ hw->mac.type == e1000_i354 ||
+ hw->mac.type == e1000_i350) {
+ s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
+ u32 systiml, systimh, level_mask, level, rem;
+ u64 systim, now;
+
+ /* read systim registers in sequence */
+ rd32(E1000_SYSTIMR);
+ systiml = rd32(E1000_SYSTIML);
+ systimh = rd32(E1000_SYSTIMH);
+ systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
+ now = timecounter_cyc2time(&adapter->tc, systim);
+
+ if (pin < 2) {
+ level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
+ level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
+ } else {
+ level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
+ level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
+ }
+
+ div_u64_rem(now, ns, &rem);
+ systim = systim + (ns - rem);
+
+ /* synchronize pin level with rising/falling edges */
+ div_u64_rem(now, ns << 1, &rem);
+ if (rem < ns) {
+ /* first half of period */
+ if (level == 0) {
+ /* output is already low, skip this period */
+ systim += ns;
+ pr_notice("igb: periodic output on %s missed falling edge\n",
+ adapter->sdp_config[pin].name);
+ }
+ } else {
+ /* second half of period */
+ if (level == 1) {
+ /* output is already high, skip this period */
+ systim += ns;
+ pr_notice("igb: periodic output on %s missed rising edge\n",
+ adapter->sdp_config[pin].name);
+ }
+ }
+
+ /* for this chip family tv_sec is the upper part of the binary value,
+ * so not seconds
+ */
+ ts.tv_nsec = (u32)systim;
+ ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
+ } else {
+ ts = timespec64_add(adapter->perout[pin].start,
+ adapter->perout[pin].period);
+ }
+
+ /* u32 conversion of tv_sec is safe until y2106 */
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsauxc |= TSAUXC_EN_TT0;
+ wr32(E1000_TSAUXC, tsauxc);
+ adapter->perout[pin].start = ts;
+
+ spin_unlock(&adapter->tmreg_lock);
+}
+
+static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
+ int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
+ int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec64 ts;
- u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
+
+ if (pin < 0 || pin >= IGB_N_EXTTS)
+ return;
+
+ if (hw->mac.type == e1000_82580 ||
+ hw->mac.type == e1000_i354 ||
+ hw->mac.type == e1000_i350) {
+ s64 ns = rd32(auxstmpl);
+
+ ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
+ ts = ns_to_timespec64(ns);
+ } else {
+ ts.tv_nsec = rd32(auxstmpl);
+ ts.tv_sec = rd32(auxstmph);
+ }
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = tsintr_tt;
+ event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+}
+
+static void igb_tsync_interrupt(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
@@ -6760,51 +6867,22 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
}
if (tsicr & TSINTR_TT0) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[0].start,
- adapter->perout[0].period);
- /* u32 conversion of tv_sec is safe until y2106 */
- wr32(E1000_TRGTTIML0, ts.tv_nsec);
- wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT0;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[0].start = ts;
- spin_unlock(&adapter->tmreg_lock);
+ igb_perout(adapter, 0);
ack |= TSINTR_TT0;
}
if (tsicr & TSINTR_TT1) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[1].start,
- adapter->perout[1].period);
- wr32(E1000_TRGTTIML1, ts.tv_nsec);
- wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT1;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[1].start = ts;
- spin_unlock(&adapter->tmreg_lock);
+ igb_perout(adapter, 1);
ack |= TSINTR_TT1;
}
if (tsicr & TSINTR_AUTT0) {
- nsec = rd32(E1000_AUXSTMPL0);
- sec = rd32(E1000_AUXSTMPH0);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
+ igb_extts(adapter, 0);
ack |= TSINTR_AUTT0;
}
if (tsicr & TSINTR_AUTT1) {
- nsec = rd32(E1000_AUXSTMPL1);
- sec = rd32(E1000_AUXSTMPH1);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
+ igb_extts(adapter, 1);
ack |= TSINTR_AUTT1;
}
@@ -7648,6 +7726,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
struct vf_mac_filter *entry = NULL;
int ret = 0;
+ if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
+ !vf_data->trusted) {
+ dev_warn(&pdev->dev,
+ "VF %d requested MAC filter but is administratively denied\n",
+ vf);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC filter\n",
+ vf);
+ return -EINVAL;
+ }
+
switch (info) {
case E1000_VF_MAC_FILTER_CLR:
/* remove all unicast MAC filters related to the current VF */
@@ -7661,20 +7753,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
}
break;
case E1000_VF_MAC_FILTER_ADD:
- if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
- !vf_data->trusted) {
- dev_warn(&pdev->dev,
- "VF %d requested MAC filter but is administratively denied\n",
- vf);
- return -EINVAL;
- }
- if (!is_valid_ether_addr(addr)) {
- dev_warn(&pdev->dev,
- "VF %d attempted to set invalid MAC filter\n",
- vf);
- return -EINVAL;
- }
-
/* try to find empty slot in the list */
list_for_each(pos, &adapter->vf_macs.l) {
entry = list_entry(pos, struct vf_mac_filter, l);
@@ -8367,7 +8445,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -8422,7 +8500,7 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
result = IGB_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -9254,7 +9332,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9375,24 @@ static int __maybe_unused igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- rtnl_lock();
+ if (!rpm)
+ rtnl_lock();
if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ if (!rpm)
+ rtnl_unlock();
return err;
}
+static int __maybe_unused igb_resume(struct device *dev)
+{
+ return __igb_resume(dev, false);
+}
+
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9411,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
static int __maybe_unused igb_runtime_resume(struct device *dev)
{
- return igb_resume(dev);
+ return __igb_resume(dev, true);
}
static void igb_shutdown(struct pci_dev *pdev)
@@ -9442,7 +9527,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
+ * resembles the first-half of the __igb_resume routine.
**/
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
@@ -9482,7 +9567,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * second-half of the __igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0011b15e678c..6580fcddb4be 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,6 +69,7 @@
#define IGB_NBITS_82580 40
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+static void igb_ptp_sdp_init(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
@@ -507,6 +508,158 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct igb_adapter *igb =
+ container_of(ptp, struct igb_adapter, ptp_caps);
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, systiml,
+ systimh, level_mask, level, rem;
+ struct e1000_hw *hw = &igb->hw;
+ struct timespec64 ts, start;
+ unsigned long flags;
+ u64 systim, now;
+ int pin = -1;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ if (rq->extts.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TS1;
+ tsim_mask = TSINTR_AUTT1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TS0;
+ tsim_mask = TSINTR_AUTT0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (on) {
+ igb_pin_extts(igb, rq->extts.index, pin);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ } else {
+ tsauxc &= ~tsauxc_mask;
+ tsim &= ~tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+ ns = ns >> 1;
+ if (on && ns < 8LL)
+ return -EINVAL;
+ ts = ns_to_timespec64(ns);
+ if (rq->perout.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ trgttiml = E1000_TRGTTIML1;
+ trgttimh = E1000_TRGTTIMH1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ trgttiml = E1000_TRGTTIML0;
+ trgttimh = E1000_TRGTTIMH0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+ tsim &= ~TSINTR_TT1;
+ } else {
+ tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+ tsim &= ~TSINTR_TT0;
+ }
+ if (on) {
+ int i = rq->perout.index;
+
+ /* read systim registers in sequence */
+ rd32(E1000_SYSTIMR);
+ systiml = rd32(E1000_SYSTIML);
+ systimh = rd32(E1000_SYSTIMH);
+ systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
+ now = timecounter_cyc2time(&igb->tc, systim);
+
+ if (pin < 2) {
+ level_mask = (i == 1) ? 0x80000 : 0x40000;
+ level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
+ } else {
+ level_mask = (i == 1) ? 0x80 : 0x40;
+ level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
+ }
+
+ div_u64_rem(now, ns, &rem);
+ systim = systim + (ns - rem);
+
+ /* synchronize pin level with rising/falling edges */
+ div_u64_rem(now, ns << 1, &rem);
+ if (rem < ns) {
+ /* first half of period */
+ if (level == 0) {
+ /* output is already low, skip this period */
+ systim += ns;
+ }
+ } else {
+ /* second half of period */
+ if (level == 1) {
+ /* output is already high, skip this period */
+ systim += ns;
+ }
+ }
+
+ start = ns_to_timespec64(systim + (ns - rem));
+ igb_pin_perout(igb, i, pin, 0);
+ igb->perout[i].start.tv_sec = start.tv_sec;
+ igb->perout[i].start.tv_nsec = start.tv_nsec;
+ igb->perout[i].period.tv_sec = ts.tv_sec;
+ igb->perout[i].period.tv_nsec = ts.tv_nsec;
+
+ wr32(trgttiml, (u32)systim);
+ wr32(trgttimh, ((u32)(systim >> 32)) & 0xFF);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -1015,10 +1168,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
@@ -1192,7 +1341,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- int i;
switch (hw->mac.type) {
case e1000_82576:
@@ -1215,16 +1363,21 @@ void igb_ptp_init(struct igb_adapter *adapter)
case e1000_82580:
case e1000_i354:
case e1000_i350:
+ igb_ptp_sdp_init(adapter);
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
- adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
+ adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
+ adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_feature_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable_82580;
+ adapter->ptp_caps.verify = igb_ptp_verify_pin;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -1233,13 +1386,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
break;
case e1000_i210:
case e1000_i211:
- for (i = 0; i < IGB_N_SDP; i++) {
- struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
-
- snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
- ppd->index = i;
- ppd->func = PTP_PF_NONE;
- }
+ igb_ptp_sdp_init(adapter);
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
@@ -1285,6 +1432,23 @@ void igb_ptp_init(struct igb_adapter *adapter)
}
/**
+ * igb_ptp_sdp_init - utility function which inits the SDP config structs
+ * @adapter: Board private structure.
+ **/
+void igb_ptp_sdp_init(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < IGB_N_SDP; i++) {
+ struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
+}
+
+/**
* igb_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
*
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 06e5bd646a0e..9d4322b74163 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -175,7 +175,9 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
}
static void igbvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -188,7 +190,9 @@ static void igbvf_get_ringparam(struct net_device *netdev,
}
static int igbvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 74ccd622251a..b78407289741 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1520,7 +1520,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
/* Allow time for pending master requests to run */
if (mac->ops.reset_hw(hw))
- dev_warn(&adapter->pdev->dev, "PF still resetting\n");
+ dev_info(&adapter->pdev->dev, "PF still resetting\n");
mac->ops.init_hw(hw);
@@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hw_init:
+ netif_napi_del(&adapter->rx_ring->napi);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index c7fe61509d5b..5c66b97c0cfa 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -85,9 +85,6 @@
#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
@@ -584,7 +581,6 @@
#define IGC_GEN_POLL_TIMEOUT 1920
/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
@@ -605,9 +601,6 @@
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-/* Bit definitions for valid PHY IDs. I = Integrated E = External */
-#define I225_I_PHY_ID 0x67C9DC00
-
/* MDI Control */
#define IGC_MDIC_DATA_MASK 0x0000FFFF
#define IGC_MDIC_REG_MASK 0x001F0000
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index e0a76ac1bbbc..8cc077b712ad 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -567,8 +567,11 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void igc_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+igc_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -578,8 +581,11 @@ static void igc_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = adapter->tx_ring_count;
}
-static int igc_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int
+igc_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 587db7483f25..b1e72ec5f131 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -55,7 +55,6 @@ enum igc_mac_type {
enum igc_phy_type {
igc_phy_unknown = 0,
- igc_phy_none,
igc_phy_i225,
};
@@ -68,8 +67,6 @@ enum igc_media_type {
enum igc_nvm_type {
igc_nvm_unknown = 0,
igc_nvm_eeprom_spi,
- igc_nvm_flash_hw,
- igc_nvm_invm,
};
struct igc_info {
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b2ef9fde97b3..66ea566488d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -473,13 +473,11 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw)
/* NVM Function Pointers */
if (igc_get_flash_presence_i225(hw)) {
- hw->nvm.type = igc_nvm_flash_hw;
nvm->ops.read = igc_read_nvm_srrd_i225;
nvm->ops.write = igc_write_nvm_srwr_i225;
nvm->ops.validate = igc_validate_nvm_checksum_i225;
nvm->ops.update = igc_update_nvm_checksum_i225;
} else {
- hw->nvm.type = igc_nvm_invm;
nvm->ops.read = igc_read_nvm_eerd;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
@@ -636,7 +634,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
ltrv = rd32(IGC_LTRMAXV);
if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
- (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+ (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
wr32(IGC_LTRMAXV, ltrv);
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e448288ee26..2f17f36e94fd 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1718,24 +1718,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- union igc_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGC_SKB_PAD, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGC_SKB_PAD);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, size);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
igc_rx_buffer_flip(rx_buffer, truesize);
return skb;
@@ -1746,6 +1748,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct xdp_buff *xdp,
ktime_t timestamp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
void *va = xdp->data;
@@ -1753,10 +1756,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ IGC_RX_HDR_LEN + metasize);
if (unlikely(!skb))
return NULL;
@@ -1769,7 +1773,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* update all of the pointers */
size -= headlen;
@@ -2231,7 +2241,7 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter,
return IGC_XDP_REDIRECT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2354,7 +2364,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
- igc_rx_offset(rx_ring) + pkt_offset, size, false);
+ igc_rx_offset(rx_ring) + pkt_offset,
+ size, true);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2378,7 +2389,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
else
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
timestamp);
@@ -2448,8 +2459,10 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
- if (metasize)
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
return skb;
}
@@ -5467,6 +5480,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5510,6 +5526,9 @@ static irqreturn_t igc_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 30568e3544cd..0d6e3215e98f 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -560,10 +560,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct hwtstamp_config *config)
{
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
igc_ptp_disable_tx_timestamp(adapter);
@@ -768,7 +764,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
*/
static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
{
- return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
+ if (!IS_ENABLED(CONFIG_X86_TSC))
+ return false;
+
+ /* FIXME: it was noticed that enabling support for PCIe PTM in
+ * some i225-V models could cause lockups when bringing the
+ * interface up/down. There should be no downsides to
+ * disabling crosstimestamping support for i225-V, as it
+ * doesn't have any PTP support. That way we gain some time
+ * while root causing the issue.
+ */
+ if (adapter->pdev->device == IGC_DEV_ID_I225_V)
+ return false;
+
+ return pcie_ptm_enabled(adapter->pdev);
}
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index a8cf5374be47..aeeb34e64610 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <linux/if_vlan.h>
#include <net/xdp_sock_drv.h>
#include "igc.h"
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 582099a5ad41..46efcfab7234 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -464,7 +464,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
static void
ixgb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
@@ -478,7 +480,9 @@ ixgb_get_ringparam(struct net_device *netdev,
static int
ixgb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 8362822316a9..f70967c32116 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1118,7 +1118,9 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -1131,7 +1133,9 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0f9f022260d7..89b467006291 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2170,7 +2170,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -2235,7 +2235,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
result = IXGBE_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -3247,8 +3247,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* If Flow Director is enabled, set interrupt affinity */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
- irq_set_affinity_hint(entry->vector,
- &q_vector->affinity_mask);
+ irq_update_affinity_hint(entry->vector,
+ &q_vector->affinity_mask);
}
}
@@ -3264,8 +3264,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(adapter->msix_entries[vector].vector,
- NULL);
+ irq_update_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
@@ -3398,7 +3398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
continue;
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(entry->vector, NULL);
+ irq_update_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector);
}
@@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
+ /* remove NBASE-T speeds from default autonegotiation
+ * to accommodate broken network switches in the field
+ * which cannot cope with advertised NBASE-T speeds
+ */
speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
IXGBE_LINK_SPEED_2_5GB_FULL);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 23ddfd79fc8b..336426a67ac1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -992,10 +992,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index a82533f21d36..bba3feaf3318 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -35,8 +35,6 @@ int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
struct xsk_buff_pool *pool,
u16 qid);
-void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
-
bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9724ffb16518..e4b50c7781ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
+ /* set MDIO speed before talking to the PHY in case it's the 1st time */
+ ixgbe_set_mdio_speed(hw);
+
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index db2bc58dfcfd..b3fd8e5cd85b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -131,7 +131,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 6bace746eaac..5f08779c0e4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -281,6 +281,10 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_INVALID_MAC_ADDR -1
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_MBX -5
+#define IXGBE_ERR_TIMEOUT -6
+#define IXGBE_ERR_PARAM -7
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8380f905e708..3b41f83c8dff 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -225,7 +225,9 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +238,9 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e3e4676af9e4..e763cee0695e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -40,16 +40,16 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
spin_lock_bh(&adapter->mbx_lock);
- ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
- ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (ret)
goto out;
ret = (int)msgbuf[1];
- if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
+ if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
ret = -1;
out:
@@ -77,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
- err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_mbx(hw, msgbuf, 2);
if (err)
goto out;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (err)
goto out;
@@ -623,6 +623,7 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a0e325774819..e257390a4f6a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -430,6 +430,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
@@ -491,4 +492,8 @@ void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#define hw_dbg(hw, format, arg...) \
netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg)
+
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+
#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d81811ab4ec4..0015fcf1df2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -944,7 +944,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1070,7 +1070,7 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2266,6 +2266,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
ixgbe_mbox_api_12,
@@ -2284,6 +2285,12 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
+ if (hw->api_version >= ixgbe_mbox_api_15) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+ }
+
spin_unlock_bh(&adapter->mbx_lock);
}
@@ -2627,6 +2634,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -4565,7 +4573,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
- memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
sizeof(struct ixgbe_mbx_operations));
/* setup the private structure */
@@ -4625,6 +4633,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 6bc1953263b9..a55dd978f7ca 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -15,16 +15,15 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_msg)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_msg(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
@@ -38,87 +37,82 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_ack)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_ack(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
- * ixgbevf_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_read_mailbox_vf - read VF's mailbox register
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully received a message notification and
- * copied it into the receive buffer.
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
**/
-static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
- if (!mbx->ops.read)
- goto out;
+ vf_mailbox |= hw->mbx.vf_mailbox;
+ hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
- ret_val = ixgbevf_poll_for_msg(hw);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size);
-out:
- return ret_val;
+ return vf_mailbox;
}
/**
- * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_clear_msg_vf - clear PF status bit
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
+ * This function is used to clear PFSTS bit in the VFMAILBOX register
**/
-static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+ hw->mbx.stats.reqs++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+ }
+}
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size);
+/**
+ * ixgbevf_clear_ack_vf - clear PF ACK bit
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to clear PFACK bit in the VFMAILBOX register
+ **/
+static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbevf_poll_for_ack(hw);
-out:
- return ret_val;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+ hw->mbx.stats.acks++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+ }
}
/**
- * ixgbevf_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
+ * ixgbevf_clear_rst_vf - clear PF reset bit
+ * @hw: pointer to the HW structure
*
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
+ * This function is used to clear reset indication and reset done bit in
+ * VFMAILBOX register after reset the shared resources and the reset sequence.
**/
-static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw)
{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- return v2p_mailbox;
+ if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+ hw->mbx.stats.rsts++;
+ hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD);
+ }
}
/**
@@ -131,14 +125,12 @@ static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
- u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
s32 ret_val = IXGBE_ERR_MBX;
- if (v2p_mailbox & mask)
+ if (vf_mailbox & mask)
ret_val = 0;
- hw->mbx.v2p_mailbox &= ~mask;
-
return ret_val;
}
@@ -172,6 +164,7 @@ static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ret_val = 0;
+ ixgbevf_clear_ack_vf(hw);
hw->mbx.stats.acks++;
}
@@ -191,6 +184,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
IXGBE_VFMAILBOX_RSTI))) {
ret_val = 0;
+ ixgbevf_clear_rst_vf(hw);
hw->mbx.stats.rsts++;
}
@@ -205,19 +199,59 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_MBX;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+ int countdown = mbx->timeout;
+ u32 vf_mailbox;
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+ if (!mbx->timeout)
+ return ret_val;
- /* reserve mailbox for VF use */
- if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = 0;
+ while (countdown--) {
+ /* Reserve mailbox for VF use */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* Verify that VF is the owner of the lock */
+ if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+ ret_val = 0;
+ break;
+ }
+
+ /* Wait a bit before trying again */
+ udelay(mbx->udelay);
+ }
+
+ if (ret_val)
+ ret_val = IXGBE_ERR_TIMEOUT;
return ret_val;
}
/**
+ * ixgbevf_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox;
+
+ /* Return ownership of the buffer */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbevf_release_mbx_lock_vf_legacy - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw)
+{
+}
+
+/**
* ixgbevf_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
@@ -227,6 +261,50 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent PF/VF race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_clear_msg_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* interrupt the PF to tell it a message has been sent */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+out_no_write:
+ hw->mbx.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val;
u16 i;
@@ -237,7 +315,9 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
/* flush msg and acks as we are overwriting the message buffer */
ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_clear_msg_vf(hw);
ixgbevf_check_for_ack_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -263,6 +343,42 @@ out_no_write:
**/
static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* check if there is a message from PF */
+ ret_val = ixgbevf_check_for_msg_vf(hw);
+ if (ret_val)
+ return ret_val;
+
+ ixgbevf_clear_msg_vf(hw);
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf_legacy - Reads a message from the inbox intended for VF
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val = 0;
u16 i;
@@ -298,7 +414,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications
*/
- mbx->timeout = 0;
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
@@ -312,12 +428,79 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbevf_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout)
+ return ret_val;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx - Write a message to the mailbox and wait for ACK
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ACK to that message within specified period
+ **/
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /**
+ * exit if either we can't write, release
+ * or there is no timeout defined
+ */
+ if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release ||
+ !mbx->timeout)
+ return ret_val;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_PARAM;
+ else
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ return ret_val;
+}
+
const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,
- .read_posted = ixgbevf_read_posted_mbx,
- .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
+const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf_legacy,
+ .read = ixgbevf_read_mbx_vf_legacy,
+ .write = ixgbevf_write_mbx_vf_legacy,
.check_for_msg = ixgbevf_check_for_msg_vf,
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 853796c8ef0e..7346ccf014a5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -7,7 +7,6 @@
#include "vf.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -39,14 +38,17 @@
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
*/
-/* Messages below or'd with this are the ACK */
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000
-/* Messages below or'd with this are the NACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000
-/* Indicates that VF is still clear to send requests */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000
+#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this
+ * have succeeded
+ */
+#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this
+ * have failed
+ */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests
+ */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -63,6 +65,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d459f5c8e98f..61d8970c6d1d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -13,13 +13,12 @@
static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
u32 *retmsg, u16 size)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 retval = mbx->ops.write_posted(hw, msg, size);
+ s32 retval = ixgbevf_write_mbx(hw, msg, size);
if (retval)
return retval;
- return mbx->ops.read_posted(hw, retmsg, size);
+ return ixgbevf_poll_mbx(hw, retmsg, size);
}
/**
@@ -75,6 +74,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
+ sizeof(struct ixgbe_mbx_operations));
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
@@ -92,7 +94,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
- mbx->ops.write_posted(hw, msgbuf, 1);
+ ixgbevf_write_mbx(hw, msgbuf, 1);
mdelay(10);
@@ -100,7 +102,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
- ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
@@ -108,11 +110,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* to indicate that no MAC address has yet been assigned for
* the VF.
*/
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
- msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_INVALID_MAC_ADDR;
- if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
ether_addr_copy(hw->mac.perm_addr, addr);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
@@ -269,7 +271,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
return -ENOMEM;
}
@@ -311,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -323,12 +326,12 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] = IXGBE_VF_GET_RETA;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
+ err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
if (err)
return err;
@@ -336,14 +339,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* ixgbevf doesn't support more than 2 queues at the moment */
@@ -379,6 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -390,12 +394,12 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
}
msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 11);
if (err)
return err;
@@ -403,14 +407,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
@@ -442,7 +446,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
@@ -545,8 +549,9 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
fallthrough;
- case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return -EOPNOTSUPP;
@@ -561,7 +566,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return err;
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
return 0;
@@ -606,7 +611,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
- if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
err = IXGBE_ERR_INVALID_ARGUMENT;
mbx_err:
@@ -705,12 +710,15 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
- if (mbx->ops.read(hw, &in_msg, 1))
+ if (mbx->ops.read(hw, &in_msg, 1)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = false;
goto out;
+ }
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
- if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
ret_val = -1;
goto out;
}
@@ -816,7 +824,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
- (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_MBX;
return 0;
@@ -863,7 +871,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
- if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
+ IXGBE_VT_MSGTYPE_SUCCESS)) {
hw->api_version = api;
return 0;
}
@@ -901,6 +910,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return 0;
@@ -918,7 +928,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
* some sort of mailbox error so we should treat it
* as such
*/
- if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* record and validate values from message */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 1d8209df4162..54158dac8707 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -73,10 +73,9 @@ struct ixgbe_mac_info {
struct ixgbe_mbx_operations {
s32 (*init_params)(struct ixgbe_hw *hw);
+ void (*release)(struct ixgbe_hw *hw);
s32 (*read)(struct ixgbe_hw *, u32 *, u16);
s32 (*write)(struct ixgbe_hw *, u32 *, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
s32 (*check_for_msg)(struct ixgbe_hw *);
s32 (*check_for_ack)(struct ixgbe_hw *);
s32 (*check_for_rst)(struct ixgbe_hw *);
@@ -96,7 +95,7 @@ struct ixgbe_mbx_info {
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 udelay;
- u32 v2p_mailbox;
+ u32 vf_mailbox;
u16 size;
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 072391c494ce..9b6fa27b7daf 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -65,8 +65,8 @@
/* use 2 static channels for TX/RX */
#define LTQ_ETOP_TX_CHANNEL 1
#define LTQ_ETOP_RX_CHANNEL 6
-#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
-#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
+#define IS_TX(x) ((x) == LTQ_ETOP_TX_CHANNEL)
+#define IS_RX(x) ((x) == LTQ_ETOP_RX_CHANNEL)
#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
@@ -111,9 +111,9 @@ ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
- ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
+ ch->dma.desc_base[ch->dma.desc].addr =
+ dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data,
+ MAX_DMA_DATA_LEN, DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
CPHYSADDR(ch->skb[ch->dma.desc]->data);
ch->dma.desc_base[ch->dma.desc].ctl =
@@ -135,7 +135,7 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_lock_irqsave(&priv->lock, flags);
if (ltq_etop_alloc_skb(ch)) {
netdev_err(ch->netdev,
- "failed to allocate new rx buffer, stopping DMA\n");
+ "failed to allocate new rx buffer, stopping DMA\n");
ltq_dma_close(&ch->dma);
}
ch->dma.desc++;
@@ -185,7 +185,7 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
dev_kfree_skb_any(ch->skb[ch->tx_free]);
ch->skb[ch->tx_free] = NULL;
memset(&ch->dma.desc_base[ch->tx_free], 0,
- sizeof(struct ltq_dma_desc));
+ sizeof(struct ltq_dma_desc));
ch->tx_free++;
ch->tx_free %= LTQ_DESC_NUM;
}
@@ -218,6 +218,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
int desc;
+
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
@@ -246,18 +247,18 @@ ltq_etop_hw_init(struct net_device *dev)
switch (priv->pldata->mii_mode) {
case PHY_INTERFACE_MODE_RMII:
- ltq_etop_w32_mask(ETOP_MII_MASK,
- ETOP_MII_REVERSE, LTQ_ETOP_CFG);
+ ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_REVERSE,
+ LTQ_ETOP_CFG);
break;
case PHY_INTERFACE_MODE_MII:
- ltq_etop_w32_mask(ETOP_MII_MASK,
- ETOP_MII_NORMAL, LTQ_ETOP_CFG);
+ ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_NORMAL,
+ LTQ_ETOP_CFG);
break;
default:
netdev_err(dev, "unknown mii mode %d\n",
- priv->pldata->mii_mode);
+ priv->pldata->mii_mode);
return -ENOTSUPP;
}
@@ -270,7 +271,8 @@ ltq_etop_hw_init(struct net_device *dev)
int irq = LTQ_DMA_CH0_INT + i;
struct ltq_etop_chan *ch = &priv->ch[i];
- ch->idx = ch->dma.nr = i;
+ ch->dma.nr = i;
+ ch->idx = ch->dma.nr;
ch->dma.dev = &priv->pdev->dev;
if (IS_TX(i)) {
@@ -305,9 +307,9 @@ ltq_etop_hw_init(struct net_device *dev)
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
- strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
+ strscpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
@@ -399,7 +401,7 @@ ltq_etop_mdio_init(struct net_device *dev)
priv->mii_bus->write = ltq_etop_mdio_wr;
priv->mii_bus->name = "ltq_mii";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- priv->pdev->name, priv->pdev->id);
+ priv->pdev->name, priv->pdev->id);
if (mdiobus_register(priv->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdiobus;
@@ -496,8 +498,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
- desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
+ desc->addr = ((unsigned int)dma_map_single(&priv->pdev->dev, skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
+ /* Make sure the address is written before we give it to HW */
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
@@ -539,7 +542,7 @@ ltq_etop_set_mac_address(struct net_device *dev, void *p)
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
- LTQ_ETOP_MAC_DA1);
+ LTQ_ETOP_MAC_DA1);
spin_unlock_irqrestore(&priv->lock, flags);
}
return ret;
@@ -652,15 +655,15 @@ ltq_etop_probe(struct platform_device *pdev)
}
res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
+ resource_size(res), dev_name(&pdev->dev));
if (!res) {
dev_err(&pdev->dev, "failed to request etop resource\n");
err = -EBUSY;
goto err_out;
}
- ltq_etop_membase = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
+ ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
@@ -687,22 +690,22 @@ ltq_etop_probe(struct platform_device *pdev)
err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
if (err < 0) {
dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
- return err;
+ goto err_free;
}
err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
if (err < 0) {
dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
- return err;
+ goto err_free;
}
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_tx, 8);
+ ltq_etop_poll_tx, 8);
else if (IS_RX(i))
netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_rx, 32);
+ ltq_etop_poll_rx, 32);
priv->ch[i].netdev = dev;
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 0da09ea81980..41d11137cde0 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -27,6 +27,9 @@
#define XRX200_DMA_TX 1
#define XRX200_DMA_BURST_LEN 8
+#define XRX200_DMA_PACKET_COMPLETE 0
+#define XRX200_DMA_PACKET_IN_PROGRESS 1
+
/* cpu port mac */
#define PMAC_RX_IPG 0x0024
#define PMAC_RX_IPG_MASK 0xf
@@ -60,7 +63,14 @@ struct xrx200_chan {
struct napi_struct napi;
struct ltq_dma_channel dma;
- struct sk_buff *skb[LTQ_DESC_NUM];
+
+ union {
+ struct sk_buff *skb[LTQ_DESC_NUM];
+ void *rx_buff[LTQ_DESC_NUM];
+ };
+
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
struct xrx200_priv *priv;
};
@@ -71,6 +81,9 @@ struct xrx200_priv {
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;
+ u16 rx_buf_size;
+ u16 rx_skb_size;
+
struct net_device *net_dev;
struct device *dev;
@@ -97,6 +110,22 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
xrx200_pmac_w32(priv, val, offset);
}
+static int xrx200_max_frame_len(int mtu)
+{
+ return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
+static int xrx200_skb_size(u16 buf_size)
+{
+ return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -109,8 +138,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
- ETH_FCS_LEN);
+ ch->priv->rx_buf_size;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -156,35 +184,34 @@ static int xrx200_close(struct net_device *net_dev)
return 0;
}
-static int xrx200_alloc_skb(struct xrx200_chan *ch)
+static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
{
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
- struct sk_buff *skb = ch->skb[ch->dma.desc];
+ void *buf = ch->rx_buff[ch->dma.desc];
+ struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- len);
- if (!ch->skb[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
+ if (!ch->rx_buff[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
- dev_kfree_skb_any(ch->skb[ch->dma.desc]);
- ch->skb[ch->dma.desc] = skb;
+ mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
+ skb_free_frag(ch->rx_buff[ch->dma.desc]);
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
/* Make sure the address is written before we give it to HW */
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
return ret;
}
@@ -193,12 +220,14 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
{
struct xrx200_priv *priv = ch->priv;
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
- struct sk_buff *skb = ch->skb[ch->dma.desc];
- int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
+ void *buf = ch->rx_buff[ch->dma.desc];
+ u32 ctl = desc->ctl;
+ int len = (ctl & LTQ_DMA_SIZE_MASK);
struct net_device *net_dev = priv->net_dev;
+ struct sk_buff *skb;
int ret;
- ret = xrx200_alloc_skb(ch);
+ ret = xrx200_alloc_buf(ch, napi_alloc_frag);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
@@ -209,13 +238,39 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
return ret;
}
+ skb = build_skb(buf, priv->rx_skb_size);
+ skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, net_dev);
- netif_receive_skb(skb);
- net_dev->stats.rx_packets++;
- net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
- return 0;
+ /* add buffers to skb via skb->frag_list */
+ if (ctl & LTQ_DMA_SOP) {
+ ch->skb_head = skb;
+ ch->skb_tail = skb;
+ skb_reserve(skb, NET_IP_ALIGN);
+ } else if (ch->skb_head) {
+ if (ch->skb_head == ch->skb_tail)
+ skb_shinfo(ch->skb_tail)->frag_list = skb;
+ else
+ ch->skb_tail->next = skb;
+ ch->skb_tail = skb;
+ ch->skb_head->len += skb->len;
+ ch->skb_head->data_len += skb->len;
+ ch->skb_head->truesize += skb->truesize;
+ }
+
+ if (ctl & LTQ_DMA_EOP) {
+ ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
+ netif_receive_skb(ch->skb_head);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += ch->skb_head->len;
+ ch->skb_head = NULL;
+ ch->skb_tail = NULL;
+ ret = XRX200_DMA_PACKET_COMPLETE;
+ } else {
+ ret = XRX200_DMA_PACKET_IN_PROGRESS;
+ }
+
+ return ret;
}
static int xrx200_poll_rx(struct napi_struct *napi, int budget)
@@ -230,7 +285,9 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
ret = xrx200_hw_receive(ch);
- if (ret)
+ if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
+ continue;
+ if (ret != XRX200_DMA_PACKET_COMPLETE)
return ret;
rx++;
} else {
@@ -351,11 +408,13 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
struct xrx200_chan *ch_rx = &priv->chan_rx;
int old_mtu = net_dev->mtu;
bool running = false;
- struct sk_buff *skb;
+ void *buff;
int curr_desc;
int ret = 0;
net_dev->mtu = new_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
if (new_mtu <= old_mtu)
return ret;
@@ -371,13 +430,15 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
- skb = ch_rx->skb[ch_rx->dma.desc];
- ret = xrx200_alloc_skb(ch_rx);
+ buff = ch_rx->rx_buff[ch_rx->dma.desc];
+ ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
net_dev->mtu = old_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
}
- dev_kfree_skb_any(skb);
+ skb_free_frag(buff);
}
ch_rx->dma.desc = curr_desc;
@@ -430,7 +491,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv)
ltq_dma_alloc_rx(&ch_rx->dma);
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
- ret = xrx200_alloc_skb(ch_rx);
+ ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret)
goto rx_free;
}
@@ -465,7 +526,7 @@ rx_ring_free:
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++) {
if (priv->chan_rx.skb[i])
- dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ skb_free_frag(priv->chan_rx.rx_buff[i]);
}
rx_free:
@@ -482,7 +543,7 @@ static void xrx200_hw_cleanup(struct xrx200_priv *priv)
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++)
- dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ skb_free_frag(priv->chan_rx.rx_buff[i]);
}
static int xrx200_probe(struct platform_device *pdev)
@@ -505,7 +566,9 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
@@ -550,8 +613,10 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
- netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
+ NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping,
+ NAPI_POLL_WEIGHT);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index bb14fa2241a3..105247582684 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1638,7 +1638,9 @@ static int mv643xx_eth_set_coalesce(struct net_device *dev,
}
static void
-mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -1650,7 +1652,9 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
}
static int
-mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -3197,7 +3201,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
- dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5a7bdca22a63..83c8908f0cc7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -38,6 +38,7 @@
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool.h>
+#include <net/pkt_cls.h>
#include <linux/bpf_trace.h>
/* Registers */
@@ -247,12 +248,39 @@
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
#define MVNETA_PORT_TX_RESET 0x3cf0
#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TXQ_CMD1_REG 0x3e00
+#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3)
+#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0)
+#define MVNETA_REFILL_NUM_CLK_REG 0x3e08
+#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff
#define MVNETA_TX_MTU 0x3e0c
#define MVNETA_TX_TOKEN_SIZE 0x3e14
#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2))
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20
+#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+/* The values of the bucket refill base period and refill period are taken from
+ * the reference manual, and adds up to a base resolution of 10Kbps. This allows
+ * to cover all rate-limit values from 10Kbps up to 5Gbps
+ */
+
+/* Base period for the rate limit algorithm */
+#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
+
+/* Number of Base Period to wait between each bucket refill */
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000
+
+/* The base resolution for rate limiting, in bps. Any max_rate value should be
+ * a multiple of that value.
+ */
+#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
+ (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD))
+
#define MVNETA_LPI_CTRL_0 0x2cc0
#define MVNETA_LPI_CTRL_1 0x2cc4
#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
@@ -492,13 +520,13 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- u8 prio_tc_map[8];
phy_interface_t phy_interface;
struct device_node *dn;
unsigned int tx_csum_limit;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvneta_bm *bm_priv;
@@ -2212,7 +2240,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(pp->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(pp->dev, prog, act);
@@ -3819,58 +3847,31 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ return container_of(pcs, struct mvneta_port, phylink_pcs);
+}
+static int mvneta_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
* When in 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
*/
if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg)) {
- linkmode_zero(supported);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- /* Asymmetric pause is unsupported */
- phylink_set(mask, Pause);
-
- /* Half-duplex at speeds higher than 100Mbit is unsupported */
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return 0;
}
-static void mvneta_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
u32 gmac_stat;
gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
@@ -3888,17 +3889,71 @@ static void mvneta_mac_pcs_get_state(struct phylink_config *config,
state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- state->pause = 0;
if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_RX;
if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_TX;
}
-static void mvneta_mac_an_restart(struct phylink_config *config)
+static int mvneta_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode, phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVNETA_GMAC_INBAND_AN_ENABLE;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_EN bit selects either the hardware
+ * automatically or the CONFIG_FLOW_CTRL manually
+ * controls the GMAC pause mode.
+ */
+ if (permit_pause_to_mac)
+ val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
+
+ /* Update the advertisement bits */
+ mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (phylink_test(advertising, Pause))
+ val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ }
+ } else {
+ /* Phy or fixed speed - disable in-band AN modes */
+ val = 0;
+ }
+
+ old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = old_an ^ an;
+ if (changed)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
+
+ /* We are only interested in the advertisement bits changing */
+ return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
+}
+
+static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
@@ -3907,6 +3962,47 @@ static void mvneta_mac_an_restart(struct phylink_config *config)
gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
}
+static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
+ .pcs_validate = mvneta_pcs_validate,
+ .pcs_get_state = mvneta_pcs_get_state,
+ .pcs_config = mvneta_pcs_config,
+ .pcs_an_restart = mvneta_pcs_an_restart,
+};
+
+static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ if (pp->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode. According to Armada 370 documentation, we
+ * can only change the port mode and in-band enable when the
+ * link is down.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ if (pp->phy_interface != interface)
+ WARN_ON(phy_power_off(pp->comphy));
+
+ /* Enable the 1ms clock */
+ if (phylink_autoneg_inband(mode)) {
+ unsigned long rate = clk_get_rate(pp->clk);
+
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
+ MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
+ }
+
+ return 0;
+}
+
static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -3915,20 +4011,11 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
- u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
MVNETA_GMAC2_PORT_RESET);
new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
- new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_INBAND_RESTART_AN |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
- MVNETA_GMAC_AN_FLOW_CTRL_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
/* Even though it might look weird, when we're configured in
* SGMII or QSGMII mode, the RGMII bit needs to be set.
@@ -3940,9 +4027,6 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
phy_interface_mode_is_8023z(state->interface))
new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
- if (phylink_test(state->advertising, Pause))
- new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
-
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - nothing to do, leave the
* configured speed, duplex and flow control as-is.
@@ -3950,66 +4034,23 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII mode receives the state from the PHY */
new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
- MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN;
} else {
/* 802.3z negotiation - only 1000base-X */
new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_CONFIG_MII_SPEED)) |
- MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- /* The MAC only supports FD mode */
- MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
- }
-
- /* Armada 370 documentation says we can only change the port mode
- * and in-band enable when the link is down, so force it down
- * while making these changes. We also do this for GMAC_CTRL2
- */
- if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
- (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
- (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
- MVNETA_GMAC_FORCE_LINK_DOWN);
}
-
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
- if (pp->phy_interface != state->interface) {
- if (pp->comphy)
- WARN_ON(phy_power_off(pp->comphy));
- WARN_ON(mvneta_config_interface(pp, state->interface));
- }
-
if (new_ctrl0 != gmac_ctrl0)
mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
if (new_ctrl2 != gmac_ctrl2)
mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
if (new_ctrl4 != gmac_ctrl4)
mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
- if (new_clk != gmac_clk)
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
- if (new_an != gmac_an)
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
@@ -4018,6 +4059,36 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
}
}
+static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val, clk;
+
+ /* Disable 1ms clock if not in in-band mode */
+ if (!phylink_autoneg_inband(mode)) {
+ clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
+ }
+
+ if (pp->phy_interface != interface)
+ /* Enable the Serdes PHY */
+ WARN_ON(mvneta_config_interface(pp, interface));
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ return 0;
+}
+
static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
{
u32 lpi_ctl1;
@@ -4104,10 +4175,10 @@ static void mvneta_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
- .validate = mvneta_validate,
- .mac_pcs_get_state = mvneta_mac_pcs_get_state,
- .mac_an_restart = mvneta_mac_an_restart,
+ .validate = phylink_generic_validate,
+ .mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
+ .mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
};
@@ -4539,8 +4610,11 @@ static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
}
-static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(netdev);
@@ -4550,8 +4624,11 @@ static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = pp->tx_ring_size;
}
-static int mvneta_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -4919,43 +4996,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
}
-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
{
- u32 val = 0;
- int i;
+ u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
- for (i = 0; i < rxq_number; i++)
- val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
+ val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
}
+static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ unsigned long core_clk_rate;
+ u32 refill_cycles;
+ u32 val;
+
+ core_clk_rate = clk_get_rate(pp->clk);
+ if (!core_clk_rate)
+ return -EINVAL;
+
+ refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
+ (NSEC_PER_SEC / core_clk_rate);
+
+ if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
+ return -EINVAL;
+
+ /* Enable bw limit algorithm version 3 */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+ val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+
+ /* Set the base refill rate */
+ mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
+
+ return 0;
+}
+
+static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+
+ val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+}
+
+static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
+ u64 min_rate, u64 max_rate)
+{
+ u32 refill_val, rem;
+ u32 val = 0;
+
+ /* Convert to from Bps to bps */
+ max_rate *= 8;
+
+ if (min_rate)
+ return -EINVAL;
+
+ refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
+ &rem);
+
+ if (rem || !refill_val ||
+ refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
+ return -EINVAL;
+
+ val = refill_val;
+ val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
+
+ mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
+
+ return 0;
+}
+
static int mvneta_setup_mqprio(struct net_device *dev,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt_offload *mqprio)
{
struct mvneta_port *pp = netdev_priv(dev);
+ int rxq, txq, tc, ret;
u8 num_tc;
- int i;
- qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = qopt->num_tc;
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
if (num_tc > rxq_number)
return -EINVAL;
+ mvneta_clear_rx_prio_map(pp);
+
if (!num_tc) {
- mvneta_clear_rx_prio_map(pp);
+ mvneta_disable_per_queue_rate_limit(pp);
netdev_reset_tc(dev);
return 0;
}
- memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
+ netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
+ mqprio->qopt.offset[tc]);
+
+ for (rxq = mqprio->qopt.offset[tc];
+ rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ rxq++) {
+ if (rxq >= rxq_number)
+ return -EINVAL;
- mvneta_setup_rx_prio_map(pp);
+ mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
+ }
+ }
- netdev_set_num_tc(dev, qopt->num_tc);
- for (i = 0; i < qopt->num_tc; i++)
- netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
+ if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ mvneta_disable_per_queue_rate_limit(pp);
+ return 0;
+ }
+
+ if (mqprio->qopt.num_tc > txq_number)
+ return -EINVAL;
+
+ ret = mvneta_enable_per_queue_rate_limit(pp);
+ if (ret)
+ return ret;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ for (txq = mqprio->qopt.offset[tc];
+ txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ txq++) {
+ if (txq >= txq_number)
+ return -EINVAL;
+
+ ret = mvneta_setup_queue_rates(pp, txq,
+ mqprio->min_rate[tc],
+ mqprio->max_rate[tc]);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
@@ -5166,6 +5344,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD | MAC_2500FD;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
@@ -5237,6 +5418,9 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_clk;
}
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
+ phylink_set_pcs(phylink, &pp->phylink_pcs);
+
/* Alloc per-cpu port structure */
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
if (!pp->ports) {
@@ -5355,7 +5539,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index cf8acabb90ac..ad73a488fc5f 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1239,7 +1239,8 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs pcs_gmac;
+ struct phylink_pcs pcs_xlg;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6da8a595026b..7cdbf8b8bbf6 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1488,6 +1488,7 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
static bool mvpp2_is_xlg(phy_interface_t interface)
{
return interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_5GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1627,6 +1628,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
if (!mvpp2_port_supports_xlg(port))
goto invalid_conf;
@@ -2186,6 +2188,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
@@ -3820,7 +3823,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
}
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(port->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(port->dev, prog, act);
@@ -5139,9 +5142,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -5433,8 +5433,11 @@ static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
-static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static void
+mvpp2_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -5444,8 +5447,11 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
ring->tx_pending = port->tx_ring_size;
}
-static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvpp2_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
u16 prev_rx_ring_size = port->rx_ring_size;
@@ -6109,18 +6115,26 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
-static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
{
- return container_of(pcs, struct mvpp2_port, phylink_pcs);
+ return container_of(pcs, struct mvpp2_port, pcs_xlg);
+}
+
+static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
u32 val;
- state->speed = SPEED_10000;
+ if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
state->duplex = 1;
state->an_complete = 1;
@@ -6149,10 +6163,25 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
+static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
+
+ return 0;
+}
+
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 val;
val = readl(port->base + MVPP2_GMAC_STATUS0);
@@ -6189,7 +6218,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 mask, val, an, old_an, changed;
mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
@@ -6243,7 +6272,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -6253,78 +6282,12 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_validate = mvpp2_gmac_pcs_validate,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
};
-static void mvpp2_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* When in 802.3z mode, we must have AN enabled:
- * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
- */
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- goto empty_set;
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- if (port->priv->global_tx_fc) {
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- }
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
- if (mvpp2_port_supports_xlg(port)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- }
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
-
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
- return;
-
-empty_set:
- linkmode_zero(supported);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -6404,8 +6367,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
}
-static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ return &port->pcs_xlg;
+ else
+ return &port->pcs_gmac;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
@@ -6454,31 +6432,9 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
}
}
- /* Select the appropriate PCS operations depending on the
- * configured interface mode. We will only switch to a mode
- * that the validate() checks have already passed.
- */
- if (mvpp2_is_xlg(interface))
- port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
- else
- port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
-
return 0;
}
-static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- int ret;
-
- ret = mvpp2__mac_prepare(config, mode, interface);
- if (ret == 0)
- phylink_set_pcs(port->phylink, &port->phylink_pcs);
-
- return ret;
-}
-
static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -6649,7 +6605,8 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops mvpp2_phylink_ops = {
- .validate = mvpp2_phylink_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
.mac_finish = mvpp2_mac_finish,
@@ -6667,12 +6624,15 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
struct phylink_link_state state = {
.interface = port->phy_interface,
};
- mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
- port->phy_interface);
+ struct phylink_pcs *pcs;
+
+ pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
+
+ mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
- port->phy_interface,
- state.advertising, false);
+ pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface,
+ state.advertising, false);
mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
port->phy_interface);
mvpp2_mac_link_up(&port->phylink_config, NULL,
@@ -6901,7 +6861,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
- dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
+ netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
@@ -6913,12 +6873,44 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_config.mac_capabilities =
+ MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+
+ if (port->priv->global_tx_fc)
+ port->phylink_config.mac_capabilities |=
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
if (mvpp2_port_supports_xlg(port)) {
- __set_bit(PHY_INTERFACE_MODE_10GBASER,
- port->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XAUI,
- port->phylink_config.supported_interfaces);
+ /* If a COMPHY is present, we can support any of
+ * the serdes modes and switch between them.
+ */
+ if (comphy) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (comphy)
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD | MAC_5000FD;
+ else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
+ port->phylink_config.mac_capabilities |=
+ MAC_5000FD;
+ else
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD;
}
if (mvpp2_port_supports_rgmii(port))
@@ -6948,6 +6940,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.supported_interfaces);
}
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 186d00a9ab35..3631d612aaca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1570,6 +1570,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index fc6e7423cbd8..b33e7d1d0851 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -107,6 +107,9 @@ struct mac_ops {
void (*mac_enadis_ptp_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4e79e918a161..58e2aeebc14f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -732,6 +732,7 @@ enum nix_af_status {
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
};
/* For NIX RX vtag action */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0fe7ad35e36f..4180376fa676 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_EXDSA,
- NPC_S_KPU2_NGIO,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_NSH,
NPC_S_KPU5_CPT_IP,
NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
@@ -1124,15 +1124,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_NGIO,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1968,6 +1959,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_PPPOE,
0xffff,
0x0000,
@@ -2750,15 +2750,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_NGIO, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CPT_CTAG, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -5090,6 +5081,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8425,14 +8425,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_NGIO, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -9196,6 +9188,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
@@ -9892,14 +9892,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NGIO,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_CPT_IP, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -11974,6 +11966,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index d6321de3cc17..e682b7bfde64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -60,6 +60,8 @@ struct ptp *ptp_get(void)
/* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
+ else
+ pci_dev_get(ptp->pdev);
return ptp;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index e695fa0e82a9..9ea2f6ac38ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -30,6 +30,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -54,6 +56,43 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -252,23 +291,20 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
if (!rpm || lmac_id >= rpm->lmac_count)
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_100G_R) {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
-
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
- } else {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
}
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 57c8a687b488..ff580311edd0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -43,6 +43,8 @@
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -57,4 +59,6 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3ca6b942ebe2..54e1b27a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -520,8 +520,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
- if (err)
- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 66e45d733824..5ed94cfb47d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -806,6 +806,7 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 2ca182a4ce82..8a7ac5a8b821 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -441,16 +441,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
- return 0;
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 45357deecabb..a73a8017e0ee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -172,14 +172,13 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- char irq_name[16];
int i, ret;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
ret = rvu_cpt_do_register_interrupt(block, off + i,
rvu_cpt_af_flt_intr_handler,
- irq_name);
+ &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a09a507369ac..d1eddb769a41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1224,6 +1224,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 70bacd38a6d9..d0ab8f233a02 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -41,7 +41,7 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset,
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
int rc;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
&rvu->irq_name[offset * NAME_SIZE], rvu_dl);
if (rc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8b1948aaa0a..97fb61915379 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -512,11 +512,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
-
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -2068,8 +2068,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -2092,7 +2092,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
@@ -3878,7 +3878,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
@@ -3891,8 +3891,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
- rc = -EBUSY;
- poll_tmo = jiffies + usecs_to_jiffies(10000);
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
@@ -3918,7 +3918,7 @@ exit:
/* Restore state of cgx tx */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c0005a1feee6..91f86d77cd41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -402,6 +402,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, struct mcam_entry *entry,
bool *enable)
{
+ struct rvu_npc_mcam_rule *rule;
u16 owner, target_func;
struct rvu_pfvf *pfvf;
u64 rx_action;
@@ -423,6 +424,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -489,8 +496,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
/* PF installing VF rule */
- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -916,7 +923,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc, u64 rx_action)
{
int actindex, index, bank, entry;
- bool enable;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return;
@@ -924,6 +932,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_lock(&mcam->lock);
for (index = 0; index < mcam->bmap_entries; index++) {
if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
bank = npc_get_bank(mcam, index);
actindex = index;
entry = index & (mcam->banksize - 1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ff2b21999f36..19c53e591d0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1098,14 +1098,6 @@ find_rule:
write_req.cntr = rule->cntr;
}
- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
- &write_rsp);
- if (err) {
- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
- if (new)
- kfree(rule);
- return err;
- }
/* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
@@ -1132,6 +1124,18 @@ find_rule:
if (req->default_rule)
pfvf->def_ucast_rule = rule;
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
/* VF's MAC address is being changed via PF */
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 61e52812983f..14509fc64cce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -603,6 +603,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
+ dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 80d4ce61f442..d85db90632d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev,
}
static void otx2_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_qset *qs = &pfvf->qset;
@@ -372,7 +374,9 @@ static void otx2_get_ringparam(struct net_device *netdev,
}
static int otx2_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 1e0d0c9c1dac..d39341e4ab37 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -394,7 +394,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
- if (err) {
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
@@ -2002,10 +2007,6 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
otx2_config_hw_tx_tstamp(pfvf, false);
@@ -2741,7 +2742,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 0cc6353254bf..7c4068c5d1ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1198,7 +1198,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
put_page(page);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
trace_xdp_exception(pfvf->netdev, prog, act);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 78944ad3492f..925b74ebb8b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -663,7 +663,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
@@ -684,7 +684,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(vf);
@@ -709,6 +709,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
err_detach_rsrc:
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
@@ -742,6 +744,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ otx2_ptp_destroy(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index 0609df8b913d..d395f4131648 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PRESTERA) += prestera.o
prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
prestera_switchdev.o prestera_acl.o prestera_flow.o \
- prestera_flower.o prestera_span.o
+ prestera_flower.o prestera_span.o prestera_counter.o \
+ prestera_router.o prestera_router_hw.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2a4c14c704c0..2fd9ef2fe5d6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -225,6 +225,29 @@ struct prestera_event {
};
};
+enum prestera_if_type {
+ /* the interface is of port type (dev,port) */
+ PRESTERA_IF_PORT_E = 0,
+
+ /* the interface is of lag type (lag-id) */
+ PRESTERA_IF_LAG_E = 1,
+
+ /* the interface is of Vid type (vlan-id) */
+ PRESTERA_IF_VID_E = 3,
+};
+
+struct prestera_iface {
+ enum prestera_if_type type;
+ struct {
+ u32 hw_dev_num;
+ u32 port_num;
+ } dev_port;
+ u32 hw_dev_num;
+ u16 vr_id;
+ u16 lag_id;
+ u16 vlan_id;
+};
+
struct prestera_switchdev;
struct prestera_span;
struct prestera_rxtx;
@@ -247,11 +270,21 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct prestera_router *router;
struct prestera_lag *lags;
+ struct prestera_counter *counter;
u8 lag_member_max;
u8 lag_max;
};
+struct prestera_router {
+ struct prestera_switch *sw;
+ struct list_head vr_list;
+ struct list_head rif_entry_list;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inetaddr_valid_nb;
+};
+
struct prestera_rxtx_params {
bool use_sdma;
u32 map_addr;
@@ -279,6 +312,9 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes);
+int prestera_router_init(struct prestera_switch *sw);
+void prestera_router_fini(struct prestera_switch *sw);
+
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
int prestera_port_cfg_mac_read(struct prestera_port *port,
@@ -293,6 +329,8 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
+
bool prestera_port_is_lag_member(const struct prestera_port *port);
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 83c75ffb1a1c..f0d9f592173b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -1,35 +1,72 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */
#include <linux/rhashtable.h>
-#include "prestera.h"
-#include "prestera_hw.h"
#include "prestera_acl.h"
-#include "prestera_span.h"
+#include "prestera_flow.h"
+#include "prestera_hw.h"
+#include "prestera.h"
+
+#define ACL_KEYMASK_SIZE \
+ (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
struct prestera_acl {
struct prestera_switch *sw;
+ struct list_head vtcam_list;
struct list_head rules;
+ struct rhashtable ruleset_ht;
+ struct rhashtable acl_rule_entry_ht;
+ struct idr uid;
+};
+
+struct prestera_acl_ruleset_ht_key {
+ struct prestera_flow_block *block;
+};
+
+struct prestera_acl_rule_entry {
+ struct rhash_head ht_node;
+ struct prestera_acl_rule_entry_key key;
+ u32 hw_id;
+ u32 vtcam_id;
+ struct {
+ struct {
+ u8 valid:1;
+ } accept, drop, trap;
+ struct {
+ u32 id;
+ struct prestera_counter_block *block;
+ } counter;
+ };
};
struct prestera_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct prestera_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- struct prestera_switch *sw;
- u16 id;
+ struct prestera_acl *acl;
+ unsigned long rule_count;
+ refcount_t refcount;
+ void *keymask;
+ u32 vtcam_id;
+ u16 pcl_id;
+ bool offload;
};
-struct prestera_acl_rule {
- struct rhash_head ht_node;
+struct prestera_acl_vtcam {
struct list_head list;
- struct list_head match_list;
- struct list_head action_list;
- struct prestera_flow_block *block;
- unsigned long cookie;
- u32 priority;
- u8 n_actions;
- u8 n_matches;
+ __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ refcount_t refcount;
u32 id;
+ bool is_keymask_set;
+ u8 lookup;
+};
+
+static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
+ .key_len = sizeof(struct prestera_acl_ruleset_ht_key),
+ .key_offset = offsetof(struct prestera_acl_ruleset, ht_key),
+ .head_offset = offsetof(struct prestera_acl_ruleset, ht_node),
+ .automatic_shrinking = true,
};
static const struct rhashtable_params prestera_acl_rule_ht_params = {
@@ -39,28 +76,48 @@ static const struct rhashtable_params prestera_acl_rule_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
+ .key_offset = offsetof(struct prestera_acl_rule_entry, key),
+ .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node),
+ .key_len = sizeof(struct prestera_acl_rule_entry_key),
+ .automatic_shrinking = true,
+};
+
static struct prestera_acl_ruleset *
-prestera_acl_ruleset_create(struct prestera_switch *sw)
+prestera_acl_ruleset_create(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
{
struct prestera_acl_ruleset *ruleset;
+ u32 uid = 0;
int err;
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
+ ruleset->acl = acl;
+ ruleset->ht_key.block = block;
+ refcount_set(&ruleset->refcount, 1);
+
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
if (err)
goto err_rhashtable_init;
- err = prestera_hw_acl_ruleset_create(sw, &ruleset->id);
+ err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL);
if (err)
goto err_ruleset_create;
- ruleset->sw = sw;
+ /* make pcl-id based on uid */
+ ruleset->pcl_id = (u8)uid;
+ err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_insert;
return ruleset;
+err_ruleset_ht_insert:
+ idr_remove(&acl->uid, uid);
err_ruleset_create:
rhashtable_destroy(&ruleset->rule_ht);
err_rhashtable_init:
@@ -68,117 +125,164 @@ err_rhashtable_init:
return ERR_PTR(err);
}
-static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
+void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
{
- prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
+ ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
}
-struct prestera_flow_block *
-prestera_acl_block_create(struct prestera_switch *sw, struct net *net)
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
- struct prestera_flow_block *block;
+ u32 vtcam_id;
+ int err;
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
- INIT_LIST_HEAD(&block->binding_list);
- block->net = net;
- block->sw = sw;
-
- block->ruleset = prestera_acl_ruleset_create(sw);
- if (IS_ERR(block->ruleset)) {
- kfree(block);
- return NULL;
- }
+ if (ruleset->offload)
+ return -EEXIST;
+
+ err = prestera_acl_vtcam_id_get(ruleset->acl, 0,
+ ruleset->keymask, &vtcam_id);
+ if (err)
+ return err;
- return block;
+ ruleset->vtcam_id = vtcam_id;
+ ruleset->offload = true;
+ return 0;
}
-void prestera_acl_block_destroy(struct prestera_flow_block *block)
+static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
- prestera_acl_ruleset_destroy(block->ruleset);
- WARN_ON(!list_empty(&block->binding_list));
- kfree(block);
+ struct prestera_acl *acl = ruleset->acl;
+ u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+
+ if (ruleset->offload)
+ WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+
+ idr_remove(&acl->uid, uid);
+
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset->keymask);
+ kfree(ruleset);
}
-static struct prestera_flow_block_binding *
-prestera_acl_block_lookup(struct prestera_flow_block *block,
- struct prestera_port *port)
+static struct prestera_acl_ruleset *
+__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
{
- struct prestera_flow_block_binding *binding;
+ struct prestera_acl_ruleset_ht_key ht_key;
- list_for_each_entry(binding, &block->binding_list, list)
- if (binding->port == port)
- return binding;
-
- return NULL;
+ memset(&ht_key, 0, sizeof(ht_key));
+ ht_key.block = block;
+ return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+ prestera_acl_ruleset_ht_params);
}
-int prestera_acl_block_bind(struct prestera_flow_block *block,
- struct prestera_port *port)
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
{
- struct prestera_flow_block_binding *binding;
- int err;
+ struct prestera_acl_ruleset *ruleset;
- if (WARN_ON(prestera_acl_block_lookup(block, port)))
- return -EEXIST;
+ ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ if (!ruleset)
+ return ERR_PTR(-ENOENT);
- binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
- binding->span_id = PRESTERA_SPAN_INVALID_ID;
- binding->port = port;
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+}
- err = prestera_hw_acl_port_bind(port, block->ruleset->id);
- if (err)
- goto err_rules_bind;
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block)
+{
+ struct prestera_acl_ruleset *ruleset;
- list_add(&binding->list, &block->binding_list);
- return 0;
+ ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ if (ruleset) {
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+ }
-err_rules_bind:
- kfree(binding);
- return err;
+ return prestera_acl_ruleset_create(acl, block);
}
-int prestera_acl_block_unbind(struct prestera_flow_block *block,
- struct prestera_port *port)
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
{
- struct prestera_flow_block_binding *binding;
+ if (!refcount_dec_and_test(&ruleset->refcount))
+ return;
- binding = prestera_acl_block_lookup(block, port);
- if (!binding)
- return -ENOENT;
-
- list_del(&binding->list);
+ prestera_acl_ruleset_destroy(ruleset);
+}
- prestera_hw_acl_port_unbind(port, block->ruleset->id);
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
+{
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
- kfree(binding);
- return 0;
+ return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id,
+ ruleset->pcl_id);
}
-struct prestera_acl_ruleset *
-prestera_acl_block_ruleset_get(struct prestera_flow_block *block)
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
{
- return block->ruleset;
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
+
+ return prestera_hw_vtcam_iface_unbind(port->sw, &iface,
+ ruleset->vtcam_id);
}
-u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule)
+static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
{
- return rule->block->ruleset->id;
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_acl_ruleset_bind(ruleset, binding->port);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ err = prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
+
+ return err;
}
-struct net *prestera_acl_block_net(struct prestera_flow_block *block)
+static void
+prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
{
- return block->net;
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
}
-struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block)
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
{
- return block->sw;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID);
+ __be16 pcl_id_key = htons(pcl_id);
+
+ rule_match_set(r_match->key, PCL_ID, pcl_id_key);
+ rule_match_set(r_match->mask, PCL_ID, pcl_id_mask);
}
struct prestera_acl_rule *
@@ -189,8 +293,13 @@ prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
prestera_acl_rule_ht_params);
}
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->offload;
+}
+
struct prestera_acl_rule *
-prestera_acl_rule_create(struct prestera_flow_block *block,
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
unsigned long cookie)
{
struct prestera_acl_rule *rule;
@@ -199,178 +308,436 @@ prestera_acl_rule_create(struct prestera_flow_block *block,
if (!rule)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&rule->match_list);
- INIT_LIST_HEAD(&rule->action_list);
+ rule->ruleset = ruleset;
rule->cookie = cookie;
- rule->block = block;
+
+ refcount_inc(&ruleset->refcount);
return rule;
}
-struct list_head *
-prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule)
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority)
{
- return &rule->match_list;
+ rule->priority = priority;
}
-struct list_head *
-prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule)
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
- return &rule->action_list;
+ prestera_acl_ruleset_put(rule->ruleset);
+ kfree(rule);
}
-int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_action_entry *entry)
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
{
- struct prestera_acl_rule_action_entry *a_entry;
+ int err;
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
- a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL);
- if (!a_entry)
- return -ENOMEM;
+ /* try to add rule to hash table first */
+ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ if (err)
+ goto err_ht_insert;
- memcpy(a_entry, entry, sizeof(*entry));
- list_add(&a_entry->list, &rule->action_list);
+ prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id);
+ rule->re_arg.vtcam_id = ruleset->vtcam_id;
+ rule->re_key.prio = rule->priority;
- rule->n_actions++;
+ /* setup counter */
+ rule->re_arg.count.valid = true;
+ rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0;
+
+ rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
+ err = WARN_ON(rule->re) ? -EEXIST : 0;
+ if (err)
+ goto err_rule_add;
+
+ rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key,
+ &rule->re_arg);
+ err = !rule->re ? -EINVAL : 0;
+ if (err)
+ goto err_rule_add;
+
+ /* bind the block (all ports) to chain index 0 */
+ if (!ruleset->rule_count) {
+ err = prestera_acl_ruleset_block_bind(ruleset, block);
+ if (err)
+ goto err_acl_block_bind;
+ }
+
+ list_add_tail(&rule->list, &sw->acl->rules);
+ ruleset->rule_count++;
return 0;
+
+err_acl_block_bind:
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+err_rule_add:
+ rule->re = NULL;
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+err_ht_insert:
+ return err;
}
-u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule)
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
{
- return rule->n_actions;
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
+
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ ruleset->rule_count--;
+ list_del(&rule->list);
+
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+
+ /* unbind block (all ports) */
+ if (!ruleset->rule_count)
+ prestera_acl_ruleset_block_unbind(ruleset, block);
}
-u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule)
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
{
- return rule->priority;
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ err = prestera_counter_stats_get(acl->sw->counter,
+ rule->re->counter.block,
+ rule->re->counter.id,
+ &current_packets, &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets;
+ *bytes = current_bytes;
+ *last_use = jiffies;
+
+ return 0;
}
-void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
- u32 priority)
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key)
{
- rule->priority = priority;
+ return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key,
+ __prestera_acl_rule_entry_ht_params);
}
-int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_match_entry *entry)
+static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
{
- struct prestera_acl_rule_match_entry *m_entry;
+ return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id);
+}
- m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL);
- if (!m_entry)
- return -ENOMEM;
+static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX];
+ int act_num;
- memcpy(m_entry, entry, sizeof(*entry));
- list_add(&m_entry->list, &rule->match_list);
+ memset(&act_hw, 0, sizeof(act_hw));
+ act_num = 0;
- rule->n_matches++;
- return 0;
+ /* accept */
+ if (e->accept.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ act_num++;
+ }
+ /* drop */
+ if (e->drop.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP;
+ act_num++;
+ }
+ /* trap */
+ if (e->trap.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ act_num++;
+ }
+ /* counter */
+ if (e->counter.block) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
+ act_hw[act_num].count.id = e->counter.id;
+ act_num++;
+ }
+
+ return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio,
+ e->key.match.key, e->key.match.mask,
+ act_hw, act_num, &e->hw_id);
}
-u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule)
+static void
+__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
{
- return rule->n_matches;
+ /* counter */
+ prestera_counter_put(sw->counter, e->counter.block, e->counter.id);
}
-void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e)
{
- struct prestera_acl_rule_action_entry *a_entry;
- struct prestera_acl_rule_match_entry *m_entry;
- struct list_head *pos, *n;
+ int ret;
- list_for_each_safe(pos, n, &rule->match_list) {
- m_entry = list_entry(pos, typeof(*m_entry), list);
- list_del(pos);
- kfree(m_entry);
- }
+ rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+
+ ret = __prestera_acl_rule_entry2hw_del(acl->sw, e);
+ WARN_ON(ret && ret != -ENODEV);
- list_for_each_safe(pos, n, &rule->action_list) {
- a_entry = list_entry(pos, typeof(*a_entry), list);
- list_del(pos);
- kfree(a_entry);
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+ kfree(e);
+}
+
+static int
+__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e,
+ struct prestera_acl_rule_entry_arg *arg)
+{
+ /* accept */
+ e->accept.valid = arg->accept.valid;
+ /* drop */
+ e->drop.valid = arg->drop.valid;
+ /* trap */
+ e->trap.valid = arg->trap.valid;
+ /* counter */
+ if (arg->count.valid) {
+ int err;
+
+ err = prestera_counter_get(sw->counter, arg->count.client,
+ &e->counter.block,
+ &e->counter.id);
+ if (err)
+ goto err_out;
}
- kfree(rule);
+ return 0;
+
+err_out:
+ __prestera_acl_rule_entry_act_destruct(sw, e);
+ return -EINVAL;
}
-int prestera_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule)
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg)
{
- u32 rule_id;
+ struct prestera_acl_rule_entry *e;
int err;
- /* try to add rule to hash table first */
- err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht,
- &rule->ht_node,
- prestera_acl_rule_ht_params);
- if (err)
- return err;
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
- /* add rule to hw */
- err = prestera_hw_acl_rule_add(sw, rule, &rule_id);
+ memcpy(&e->key, key, sizeof(*key));
+ e->vtcam_id = arg->vtcam_id;
+ err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg);
if (err)
- goto err_rule_add;
+ goto err_act_construct;
- rule->id = rule_id;
+ err = __prestera_acl_rule_entry2hw_add(acl->sw, e);
+ if (err)
+ goto err_hw_add;
- list_add_tail(&rule->list, &sw->acl->rules);
+ err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_ht_insert;
- return 0;
+ return e;
-err_rule_add:
- rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
- prestera_acl_rule_ht_params);
- return err;
+err_ht_insert:
+ WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e));
+err_hw_add:
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+err_act_construct:
+ kfree(e);
+err_kzalloc:
+ return NULL;
}
-void prestera_acl_rule_del(struct prestera_switch *sw,
- struct prestera_acl_rule *rule)
+static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id)
{
- rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
- prestera_acl_rule_ht_params);
- list_del(&rule->list);
- prestera_hw_acl_rule_del(sw, rule->id);
+ struct prestera_acl_vtcam *vtcam;
+ int i;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set)
+ goto vtcam_found;
+
+ if (!(keymask && vtcam->is_keymask_set))
+ continue;
+
+ /* try to fit with vtcam keymask */
+ for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) {
+ __be32 __keymask = ((__be32 *)keymask)[i];
+
+ if (!__keymask)
+ /* vtcam keymask in not interested */
+ continue;
+
+ if (__keymask & ~vtcam->keymask[i])
+ /* keymask does not fit the vtcam keymask */
+ break;
+ }
+
+ if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
+ /* keymask fits vtcam keymask, return it */
+ goto vtcam_found;
+ }
+
+ /* nothing is found */
+ return -ENOENT;
+
+vtcam_found:
+ refcount_inc(&vtcam->refcount);
+ *vtcam_id = vtcam->id;
+ return 0;
}
-int prestera_acl_rule_get_stats(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u64 *packets, u64 *bytes, u64 *last_use)
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id)
{
- u64 current_packets;
- u64 current_bytes;
+ struct prestera_acl_vtcam *vtcam;
+ u32 new_vtcam_id;
int err;
- err = prestera_hw_acl_rule_stats_get(sw, rule->id, &current_packets,
- &current_bytes);
- if (err)
- return err;
+ /* find the vtcam that suits keymask. We do not expect to have
+ * a big number of vtcams, so, the list type for vtcam list is
+ * fine for now
+ */
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+
+ if (keymask && vtcam->is_keymask_set &&
+ !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+ }
- *packets = current_packets;
- *bytes = current_bytes;
- *last_use = jiffies;
+ /* vtcam not found, try to create new one */
+ vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL);
+ if (!vtcam)
+ return -ENOMEM;
+
+ err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
+ PRESTERA_HW_VTCAM_DIR_INGRESS);
+ if (err) {
+ kfree(vtcam);
+
+ /* cannot create new, try to fit into existing vtcam */
+ if (__prestera_acl_vtcam_id_try_fit(acl, lookup,
+ keymask, &new_vtcam_id))
+ return err;
+ *vtcam_id = new_vtcam_id;
+ return 0;
+ }
+
+ vtcam->id = new_vtcam_id;
+ vtcam->lookup = lookup;
+ if (keymask) {
+ memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask));
+ vtcam->is_keymask_set = true;
+ }
+ refcount_set(&vtcam->refcount, 1);
+ list_add_rcu(&vtcam->list, &acl->vtcam_list);
+
+vtcam_found:
+ *vtcam_id = vtcam->id;
return 0;
}
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ int err;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (vtcam_id != vtcam->id)
+ continue;
+
+ if (!refcount_dec_and_test(&vtcam->refcount))
+ return 0;
+
+ err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id);
+ if (err && err != -ENODEV) {
+ refcount_set(&vtcam->refcount, 1);
+ return err;
+ }
+
+ list_del(&vtcam->list);
+ kfree(vtcam);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
int prestera_acl_init(struct prestera_switch *sw)
{
struct prestera_acl *acl;
+ int err;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!acl)
return -ENOMEM;
+ acl->sw = sw;
INIT_LIST_HEAD(&acl->rules);
+ INIT_LIST_HEAD(&acl->vtcam_list);
+ idr_init(&acl->uid);
+
+ err = rhashtable_init(&acl->acl_rule_entry_ht,
+ &__prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_acl_rule_entry_ht_init;
+
+ err = rhashtable_init(&acl->ruleset_ht,
+ &prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_init;
+
sw->acl = acl;
- acl->sw = sw;
return 0;
+
+err_ruleset_ht_init:
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+err_acl_rule_entry_ht_init:
+ kfree(acl);
+ return err;
}
void prestera_acl_fini(struct prestera_switch *sw)
{
struct prestera_acl *acl = sw->acl;
+ WARN_ON(!idr_is_empty(&acl->uid));
+ idr_destroy(&acl->uid);
+
+ WARN_ON(!list_empty(&acl->vtcam_list));
WARN_ON(!list_empty(&acl->rules));
+
+ rhashtable_destroy(&acl->ruleset_ht);
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+
kfree(acl);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 39b7869be659..40f6c1d961fa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -1,114 +1,130 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
#ifndef _PRESTERA_ACL_H_
#define _PRESTERA_ACL_H_
-enum prestera_acl_rule_match_entry_type {
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE
+#include <linux/types.h>
+#include "prestera_counter.h"
+
+#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
+#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+
+#define rule_match_set_n(match_p, type, val_p, size) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ val_p, size)
+#define rule_match_set(match_p, type, val) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ &(val), sizeof(val))
+
+enum prestera_acl_match_type {
+ PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE,
+
+ __PRESTERA_ACL_RULE_MATCH_TYPE_MAX
};
enum prestera_acl_rule_action {
- PRESTERA_ACL_RULE_ACTION_ACCEPT,
- PRESTERA_ACL_RULE_ACTION_DROP,
- PRESTERA_ACL_RULE_ACTION_TRAP
+ PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
+ PRESTERA_ACL_RULE_ACTION_DROP = 1,
+ PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_COUNT = 7,
+
+ PRESTERA_ACL_RULE_ACTION_MAX
};
-struct prestera_switch;
-struct prestera_port;
-struct prestera_acl_rule;
-struct prestera_acl_ruleset;
+enum {
+ PRESTERA_ACL_IFACE_TYPE_PORT,
+ PRESTERA_ACL_IFACE_TYPE_INDEX
+};
-struct prestera_flow_block_binding {
- struct list_head list;
- struct prestera_port *port;
- int span_id;
+struct prestera_acl_match {
+ __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
-struct prestera_flow_block {
- struct list_head binding_list;
- struct prestera_switch *sw;
- struct net *net;
- struct prestera_acl_ruleset *ruleset;
- struct flow_block_cb *block_cb;
+struct prestera_acl_action_count {
+ u32 id;
};
-struct prestera_acl_rule_action_entry {
- struct list_head list;
- enum prestera_acl_rule_action id;
+struct prestera_acl_rule_entry_key {
+ u32 prio;
+ struct prestera_acl_match match;
};
-struct prestera_acl_rule_match_entry {
- struct list_head list;
- enum prestera_acl_rule_match_entry_type type;
+struct prestera_acl_hw_action_info {
+ enum prestera_acl_rule_action id;
union {
+ struct prestera_acl_action_count count;
+ };
+};
+
+/* This struct (arg) used only to be passed as parameter for
+ * acl_rule_entry_create. Must be flat. Can contain object keys, which will be
+ * resolved to object links, before saving to acl_rule_entry struct
+ */
+struct prestera_acl_rule_entry_arg {
+ u32 vtcam_id;
+ struct {
struct {
- u8 key;
- u8 mask;
- } u8;
- struct {
- u16 key;
- u16 mask;
- } u16;
- struct {
- u32 key;
- u32 mask;
- } u32;
- struct {
- u64 key;
- u64 mask;
- } u64;
+ u8 valid:1;
+ } accept, drop, trap;
struct {
- u8 key[ETH_ALEN];
- u8 mask[ETH_ALEN];
- } mac;
- } keymask;
+ u8 valid:1;
+ u32 client;
+ } count;
+ };
+};
+
+struct prestera_acl_rule {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct list_head list;
+ struct prestera_acl_ruleset *ruleset;
+ unsigned long cookie;
+ u32 priority;
+ struct prestera_acl_rule_entry_key re_key;
+ struct prestera_acl_rule_entry_arg re_arg;
+ struct prestera_acl_rule_entry *re;
};
+struct prestera_acl_iface {
+ union {
+ struct prestera_port *port;
+ u32 index;
+ };
+ u8 type;
+};
+
+struct prestera_acl;
+struct prestera_switch;
+struct prestera_flow_block;
+
int prestera_acl_init(struct prestera_switch *sw);
void prestera_acl_fini(struct prestera_switch *sw);
-struct prestera_flow_block *
-prestera_acl_block_create(struct prestera_switch *sw, struct net *net);
-void prestera_acl_block_destroy(struct prestera_flow_block *block);
-struct net *prestera_acl_block_net(struct prestera_flow_block *block);
-struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block);
-int prestera_acl_block_bind(struct prestera_flow_block *block,
- struct prestera_port *port);
-int prestera_acl_block_unbind(struct prestera_flow_block *block,
- struct prestera_port *port);
-struct prestera_acl_ruleset *
-prestera_acl_block_ruleset_get(struct prestera_flow_block *block);
+
struct prestera_acl_rule *
-prestera_acl_rule_create(struct prestera_flow_block *block,
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
unsigned long cookie);
-u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
-u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule);
-struct list_head *
-prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule);
-u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule);
-u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule);
-int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_action_entry *entry);
-struct list_head *
-prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule);
-int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_match_entry *entry);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
struct prestera_acl_rule *
prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
@@ -117,8 +133,39 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule);
void prestera_acl_rule_del(struct prestera_switch *sw,
struct prestera_acl_rule *rule);
-int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
struct prestera_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key);
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block);
+void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
+ u16 pcl_id);
+
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id);
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
new file mode 100644
index 000000000000..4cd53a2dae46
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_counter.h"
+
+#define COUNTER_POLL_TIME (msecs_to_jiffies(1000))
+#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50))
+#define COUNTER_BULK_SIZE (256)
+
+struct prestera_counter {
+ struct prestera_switch *sw;
+ struct delayed_work stats_dw;
+ struct mutex mtx; /* protect block_list */
+ struct prestera_counter_block **block_list;
+ u32 total_read;
+ u32 block_list_len;
+ u32 curr_idx;
+ bool is_fetching;
+};
+
+struct prestera_counter_block {
+ struct list_head list;
+ u32 id;
+ u32 offset;
+ u32 num_counters;
+ u32 client;
+ struct idr counter_idr;
+ refcount_t refcnt;
+ struct mutex mtx; /* protect stats and counter_idr */
+ struct prestera_counter_stats *stats;
+ u8 *counter_flag;
+ bool is_updating;
+ bool full;
+};
+
+enum {
+ COUNTER_FLAG_READY = 0,
+ COUNTER_FLAG_INVALID = 1
+};
+
+static bool
+prestera_counter_is_ready(struct prestera_counter_block *block, u32 id)
+{
+ return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY;
+}
+
+static void prestera_counter_lock(struct prestera_counter *counter)
+{
+ mutex_lock(&counter->mtx);
+}
+
+static void prestera_counter_unlock(struct prestera_counter *counter)
+{
+ mutex_unlock(&counter->mtx);
+}
+
+static void prestera_counter_block_lock(struct prestera_counter_block *block)
+{
+ mutex_lock(&block->mtx);
+}
+
+static void prestera_counter_block_unlock(struct prestera_counter_block *block)
+{
+ mutex_unlock(&block->mtx);
+}
+
+static bool prestera_counter_block_incref(struct prestera_counter_block *block)
+{
+ return refcount_inc_not_zero(&block->refcnt);
+}
+
+static bool prestera_counter_block_decref(struct prestera_counter_block *block)
+{
+ return refcount_dec_and_test(&block->refcnt);
+}
+
+/* must be called with prestera_counter_block_lock() */
+static void prestera_counter_stats_clear(struct prestera_counter_block *block,
+ u32 counter_id)
+{
+ memset(&block->stats[counter_id - block->offset], 0,
+ sizeof(*block->stats));
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_lookup_not_full(struct prestera_counter *counter,
+ u32 client)
+{
+ u32 i;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->client == client &&
+ !counter->block_list[i]->full &&
+ prestera_counter_block_incref(counter->block_list[i])) {
+ prestera_counter_unlock(counter);
+ return counter->block_list[i];
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ return NULL;
+}
+
+static int prestera_counter_block_list_add(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ struct prestera_counter_block **arr;
+ u32 i;
+
+ prestera_counter_lock(counter);
+
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i])
+ continue;
+
+ counter->block_list[i] = block;
+ prestera_counter_unlock(counter);
+ return 0;
+ }
+
+ arr = krealloc(counter->block_list, (counter->block_list_len + 1) *
+ sizeof(*counter->block_list), GFP_KERNEL);
+ if (!arr) {
+ prestera_counter_unlock(counter);
+ return -ENOMEM;
+ }
+
+ counter->block_list = arr;
+ counter->block_list[counter->block_list_len] = block;
+ counter->block_list_len++;
+ prestera_counter_unlock(counter);
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get(struct prestera_counter *counter, u32 client)
+{
+ struct prestera_counter_block *block;
+ int err;
+
+ block = prestera_counter_block_lookup_not_full(counter, client);
+ if (block)
+ return block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ err = prestera_hw_counter_block_get(counter->sw, client,
+ &block->id, &block->offset,
+ &block->num_counters);
+ if (err)
+ goto err_block;
+
+ block->stats = kcalloc(block->num_counters,
+ sizeof(*block->stats), GFP_KERNEL);
+ if (!block->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
+ block->counter_flag = kcalloc(block->num_counters,
+ sizeof(*block->counter_flag),
+ GFP_KERNEL);
+ if (!block->counter_flag) {
+ err = -ENOMEM;
+ goto err_flag;
+ }
+
+ block->client = client;
+ mutex_init(&block->mtx);
+ refcount_set(&block->refcnt, 1);
+ idr_init_base(&block->counter_idr, block->offset);
+
+ err = prestera_counter_block_list_add(counter, block);
+ if (err)
+ goto err_list_add;
+
+ return block;
+
+err_list_add:
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->counter_flag);
+err_flag:
+ kfree(block->stats);
+err_stats:
+ prestera_hw_counter_block_release(counter->sw, block->id);
+err_block:
+ kfree(block);
+ return ERR_PTR(err);
+}
+
+static void prestera_counter_block_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ u32 i;
+
+ if (!prestera_counter_block_decref(block))
+ return;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->id == block->id) {
+ counter->block_list[i] = NULL;
+ break;
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ WARN_ON(!idr_is_empty(&block->counter_idr));
+
+ prestera_hw_counter_block_release(counter->sw, block->id);
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->stats);
+ kfree(block);
+}
+
+static int prestera_counter_get_vacant(struct prestera_counter_block *block,
+ u32 *id)
+{
+ int free_id;
+
+ if (block->full)
+ return -ENOSPC;
+
+ prestera_counter_block_lock(block);
+ free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset,
+ block->offset + block->num_counters,
+ GFP_KERNEL);
+ if (free_id < 0) {
+ if (free_id == -ENOSPC)
+ block->full = true;
+
+ prestera_counter_block_unlock(block);
+ return free_id;
+ }
+ *id = free_id;
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **bl, u32 *counter_id)
+{
+ struct prestera_counter_block *block;
+ int err;
+ u32 id;
+
+get_next_block:
+ block = prestera_counter_block_get(counter, client);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ err = prestera_counter_get_vacant(block, &id);
+ if (err) {
+ prestera_counter_block_put(counter, block);
+
+ if (err == -ENOSPC)
+ goto get_next_block;
+
+ return err;
+ }
+
+ prestera_counter_block_lock(block);
+ if (block->is_updating)
+ block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID;
+ prestera_counter_block_unlock(block);
+
+ *counter_id = id;
+ *bl = block;
+
+ return 0;
+}
+
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id)
+{
+ if (!block)
+ return;
+
+ prestera_counter_block_lock(block);
+ idr_remove(&block->counter_idr, counter_id);
+ block->full = false;
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ prestera_hw_counter_clear(counter->sw, block->id, counter_id);
+ prestera_counter_block_put(counter, block);
+}
+
+static u32 prestera_counter_block_idx_next(struct prestera_counter *counter,
+ u32 curr_idx)
+{
+ u32 idx, i, start = curr_idx + 1;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ idx = (start + i) % counter->block_list_len;
+ if (!counter->block_list[idx])
+ continue;
+
+ prestera_counter_unlock(counter);
+ return idx;
+ }
+ prestera_counter_unlock(counter);
+
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
+{
+ if (idx >= counter->block_list_len)
+ return NULL;
+
+ prestera_counter_lock(counter);
+
+ if (!counter->block_list[idx] ||
+ !prestera_counter_block_incref(counter->block_list[idx])) {
+ prestera_counter_unlock(counter);
+ return NULL;
+ }
+
+ prestera_counter_unlock(counter);
+ return counter->block_list[idx];
+}
+
+static void prestera_counter_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dl_work =
+ container_of(work, struct delayed_work, work);
+ struct prestera_counter *counter =
+ container_of(dl_work, struct prestera_counter, stats_dw);
+ struct prestera_counter_block *block;
+ u32 resched_time = COUNTER_POLL_TIME;
+ u32 count = COUNTER_BULK_SIZE;
+ bool done = false;
+ int err;
+ u32 i;
+
+ block = prestera_counter_block_get_by_idx(counter, counter->curr_idx);
+ if (!block) {
+ if (counter->is_fetching)
+ goto abort;
+
+ goto next;
+ }
+
+ if (!counter->is_fetching) {
+ err = prestera_hw_counter_trigger(counter->sw, block->id);
+ if (err)
+ goto abort;
+
+ prestera_counter_block_lock(block);
+ block->is_updating = true;
+ prestera_counter_block_unlock(block);
+
+ counter->is_fetching = true;
+ counter->total_read = 0;
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ prestera_counter_block_lock(block);
+ err = prestera_hw_counters_get(counter->sw, counter->total_read,
+ &count, &done,
+ &block->stats[counter->total_read]);
+ prestera_counter_block_unlock(block);
+ if (err)
+ goto abort;
+
+ counter->total_read += count;
+ if (!done || counter->total_read < block->num_counters) {
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ for (i = 0; i < block->num_counters; i++) {
+ if (block->counter_flag[i] == COUNTER_FLAG_INVALID) {
+ prestera_counter_block_lock(block);
+ block->counter_flag[i] = COUNTER_FLAG_READY;
+ memset(&block->stats[i], 0, sizeof(*block->stats));
+ prestera_counter_block_unlock(block);
+ }
+ }
+
+ prestera_counter_block_lock(block);
+ block->is_updating = false;
+ prestera_counter_block_unlock(block);
+
+ goto next;
+abort:
+ prestera_hw_counter_abort(counter->sw);
+next:
+ counter->is_fetching = false;
+ counter->curr_idx =
+ prestera_counter_block_idx_next(counter, counter->curr_idx);
+resched:
+ if (block)
+ prestera_counter_block_put(counter, block);
+
+ schedule_delayed_work(&counter->stats_dw, resched_time);
+}
+
+/* Can be executed without rtnl_lock().
+ * So pay attention when something changing.
+ */
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes)
+{
+ if (!block || !prestera_counter_is_ready(block, counter_id)) {
+ *packets = 0;
+ *bytes = 0;
+ return 0;
+ }
+
+ prestera_counter_block_lock(block);
+ *packets = block->stats[counter_id - block->offset].packets;
+ *bytes = block->stats[counter_id - block->offset].bytes;
+
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_init(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return -ENOMEM;
+
+ counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL);
+ if (!counter->block_list) {
+ kfree(counter);
+ return -ENOMEM;
+ }
+
+ mutex_init(&counter->mtx);
+ counter->block_list_len = 1;
+ counter->sw = sw;
+ sw->counter = counter;
+
+ INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work);
+ schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME);
+
+ return 0;
+}
+
+void prestera_counter_fini(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter = sw->counter;
+ u32 i;
+
+ cancel_delayed_work_sync(&counter->stats_dw);
+
+ for (i = 0; i < counter->block_list_len; i++)
+ WARN_ON(counter->block_list[i]);
+
+ mutex_destroy(&counter->mtx);
+ kfree(counter->block_list);
+ kfree(counter);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
new file mode 100644
index 000000000000..ad6b73907799
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_COUNTER_H_
+#define _PRESTERA_COUNTER_H_
+
+#include <linux/types.h>
+
+struct prestera_counter_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct prestera_switch;
+struct prestera_counter;
+struct prestera_counter_block;
+
+int prestera_counter_init(struct prestera_switch *sw);
+void prestera_counter_fini(struct prestera_switch *sw);
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **block,
+ u32 *counter_id);
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id);
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes);
+
+#endif /* _PRESTERA_COUNTER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index c9891e968259..d849f046ece7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -40,6 +40,11 @@ static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
return 0;
case FLOW_CLS_STATS:
return prestera_flower_stats(block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return prestera_flower_tmplt_create(block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ prestera_flower_tmplt_destroy(block, f);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -60,11 +65,102 @@ static int prestera_flow_block_cb(enum tc_setup_type type,
}
}
+static void prestera_flow_block_destroy(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_flower_template_cleanup(block);
+
+ WARN_ON(!list_empty(&block->binding_list));
+
+ kfree(block);
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_create(struct prestera_switch *sw, struct net *net)
+{
+ struct prestera_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ INIT_LIST_HEAD(&block->binding_list);
+ block->net = net;
+ block->sw = sw;
+
+ return block;
+}
+
static void prestera_flow_block_release(void *cb_priv)
{
struct prestera_flow_block *block = cb_priv;
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
+}
+
+static bool
+prestera_flow_block_is_bound(const struct prestera_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static struct prestera_flow_block_binding *
+prestera_flow_block_lookup(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->port == port)
+ return binding;
+
+ return NULL;
+}
+
+static int prestera_flow_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ binding->port = port;
+
+ if (prestera_flow_block_is_bound(block)) {
+ err = prestera_acl_ruleset_bind(block->ruleset_zero, port);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+static int prestera_flow_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ binding = prestera_flow_block_lookup(block, port);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (prestera_flow_block_is_bound(block))
+ prestera_acl_ruleset_unbind(block->ruleset_zero, port);
+
+ kfree(binding);
+ return 0;
}
static struct prestera_flow_block *
@@ -78,7 +174,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_acl_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -86,7 +182,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
sw, block,
prestera_flow_block_release);
if (IS_ERR(block_cb)) {
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
return ERR_CAST(block_cb);
}
@@ -110,7 +206,7 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
return;
flow_block_cb_free(block_cb);
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
@@ -128,7 +224,7 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
block_cb = block->block_cb;
- err = prestera_acl_block_bind(block, port);
+ err = prestera_flow_block_bind(block, port);
if (err)
goto err_block_bind;
@@ -162,7 +258,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
prestera_span_destroy(block);
- err = prestera_acl_block_unbind(block, port);
+ err = prestera_flow_block_unbind(block, port);
if (err)
goto error;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 467c7038cace..1ea5b745bf72 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -7,6 +7,24 @@
#include <net/flow_offload.h>
struct prestera_port;
+struct prestera_switch;
+struct prestera_flower_template;
+
+struct prestera_flow_block_binding {
+ struct list_head list;
+ struct prestera_port *port;
+ int span_id;
+};
+
+struct prestera_flow_block {
+ struct list_head binding_list;
+ struct prestera_switch *sw;
+ struct net *net;
+ struct prestera_acl_ruleset *ruleset_zero;
+ struct flow_block_cb *block_cb;
+ struct prestera_flower_template *tmplt;
+ unsigned int rule_count;
+};
int prestera_flow_block_setup(struct prestera_port *port,
struct flow_block_offload *f);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index e571ba09ec08..19c1417fd05f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -3,42 +3,61 @@
#include "prestera.h"
#include "prestera_acl.h"
+#include "prestera_flow.h"
#include "prestera_flower.h"
+struct prestera_flower_template {
+ struct prestera_acl_ruleset *ruleset;
+};
+
+void prestera_flower_template_cleanup(struct prestera_flow_block *block)
+{
+ if (block->tmplt) {
+ /* put the reference to the ruleset kept in create */
+ prestera_acl_ruleset_put(block->tmplt->ruleset);
+ kfree(block->tmplt);
+ block->tmplt = NULL;
+ return;
+ }
+}
+
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
- struct prestera_acl_rule_action_entry a_entry;
const struct flow_action_entry *act;
- int err, i;
+ int i;
+ /* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
return 0;
flow_action_for_each(i, act, flow_action) {
- memset(&a_entry, 0, sizeof(a_entry));
-
switch (act->id) {
case FLOW_ACTION_ACCEPT:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ if (rule->re_arg.accept.valid)
+ return -EEXIST;
+
+ rule->re_arg.accept.valid = 1;
break;
case FLOW_ACTION_DROP:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP;
+ if (rule->re_arg.drop.valid)
+ return -EEXIST;
+
+ rule->re_arg.drop.valid = 1;
break;
case FLOW_ACTION_TRAP:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ if (rule->re_arg.trap.valid)
+ return -EEXIST;
+
+ rule->re_arg.trap.valid = 1;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
return -EOPNOTSUPP;
}
-
- err = prestera_acl_rule_action_add(rule, &a_entry);
- if (err)
- return err;
}
return 0;
@@ -47,12 +66,12 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block,
static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
struct flow_cls_offload *f,
struct prestera_flow_block *block)
-{
- struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
- struct prestera_acl_rule_match_entry m_entry = {0};
+{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ struct prestera_port *port;
struct net_device *ingress_dev;
struct flow_match_meta match;
- struct prestera_port *port;
+ __be16 key, mask;
flow_rule_match_meta(f_rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
@@ -61,7 +80,7 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
return -EINVAL;
}
- ingress_dev = __dev_get_by_index(prestera_acl_block_net(block),
+ ingress_dev = __dev_get_by_index(block->net,
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -76,22 +95,28 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
}
port = netdev_priv(ingress_dev);
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT;
- m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32);
- m_entry.keymask.u64.mask = ~(u64)0;
+ mask = htons(0x1FFF);
+ key = htons(port->hw_id);
+ rule_match_set(r_match->key, SYS_PORT, key);
+ rule_match_set(r_match->mask, SYS_PORT, mask);
+
+ mask = htons(0x1FF);
+ key = htons(port->dev_id);
+ rule_match_set(r_match->key, SYS_DEV, key);
+ rule_match_set(r_match->mask, SYS_DEV, mask);
+
+ return 0;
- return prestera_acl_rule_match_add(rule, &m_entry);
}
static int prestera_flower_parse(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_cls_offload *f)
-{
- struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+{ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = f_rule->match.dissector;
- struct prestera_acl_rule_match_entry m_entry;
- u16 n_proto_mask = 0;
- u16 n_proto_key = 0;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 n_proto_mask = 0;
+ __be16 n_proto_key = 0;
u16 addr_type = 0;
u8 ip_proto = 0;
int err;
@@ -129,32 +154,19 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
struct flow_match_basic match;
flow_rule_match_basic(f_rule, &match);
- n_proto_key = ntohs(match.key->n_proto);
- n_proto_mask = ntohs(match.mask->n_proto);
+ n_proto_key = match.key->n_proto;
+ n_proto_mask = match.mask->n_proto;
- if (n_proto_key == ETH_P_ALL) {
+ if (ntohs(match.key->n_proto) == ETH_P_ALL) {
n_proto_key = 0;
n_proto_mask = 0;
}
- /* add eth type key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE;
- m_entry.keymask.u16.key = n_proto_key;
- m_entry.keymask.u16.mask = n_proto_mask;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
-
- /* add ip proto key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO;
- m_entry.keymask.u8.key = match.key->ip_proto;
- m_entry.keymask.u8.mask = match.mask->ip_proto;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
+ rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
+ rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
+ rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
ip_proto = match.key->ip_proto;
}
@@ -163,27 +175,27 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_eth_addrs(f_rule, &match);
- /* add ethernet dst key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC;
- memcpy(&m_entry.keymask.mac.key,
- &match.key->dst, sizeof(match.key->dst));
- memcpy(&m_entry.keymask.mac.mask,
- &match.mask->dst, sizeof(match.mask->dst));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
-
- /* add ethernet src key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC;
- memcpy(&m_entry.keymask.mac.key,
- &match.key->src, sizeof(match.key->src));
- memcpy(&m_entry.keymask.mac.mask,
- &match.mask->src, sizeof(match.mask->src));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ /* DA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_0, &match.key->dst[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_1, &match.key->dst[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_0, &match.mask->dst[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_1, &match.mask->dst[4], 2);
+
+ /* SA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_0, &match.key->src[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_1, &match.key->src[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_0, &match.mask->src[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_1, &match.mask->src[4], 2);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -191,25 +203,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_ipv4_addrs(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC;
- memcpy(&m_entry.keymask.u32.key,
- &match.key->src, sizeof(match.key->src));
- memcpy(&m_entry.keymask.u32.mask,
- &match.mask->src, sizeof(match.mask->src));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, IP_SRC, match.key->src);
+ rule_match_set(r_match->mask, IP_SRC, match.mask->src);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST;
- memcpy(&m_entry.keymask.u32.key,
- &match.key->dst, sizeof(match.key->dst));
- memcpy(&m_entry.keymask.u32.mask,
- &match.mask->dst, sizeof(match.mask->dst));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, IP_DST, match.key->dst);
+ rule_match_set(r_match->mask, IP_DST, match.mask->dst);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
@@ -224,21 +222,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_ports(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC;
- m_entry.keymask.u16.key = ntohs(match.key->src);
- m_entry.keymask.u16.mask = ntohs(match.mask->src);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
+ rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST;
- m_entry.keymask.u16.key = ntohs(match.key->dst);
- m_entry.keymask.u16.mask = ntohs(match.mask->dst);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
+ rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -247,22 +235,15 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_vlan(f_rule, &match);
if (match.mask->vlan_id != 0) {
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID;
- m_entry.keymask.u16.key = match.key->vlan_id;
- m_entry.keymask.u16.mask = match.mask->vlan_id;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ __be16 key = cpu_to_be16(match.key->vlan_id);
+ __be16 mask = cpu_to_be16(match.mask->vlan_id);
+
+ rule_match_set(r_match->key, VLAN_ID, key);
+ rule_match_set(r_match->mask, VLAN_ID, mask);
}
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID;
- m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid);
- m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
+ rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
@@ -270,90 +251,164 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_icmp(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE;
- m_entry.keymask.u8.key = match.key->type;
- m_entry.keymask.u8.mask = match.mask->type;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
+ rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE;
- m_entry.keymask.u8.key = match.key->code;
- m_entry.keymask.u8.mask = match.mask->code;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ICMP_CODE, match.key->code);
+ rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
}
- return prestera_flower_parse_actions(block, rule,
- &f->rule->action,
+ return prestera_flower_parse_actions(block, rule, &f->rule->action,
f->common.extack);
}
int prestera_flower_replace(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl *acl = block->sw->acl;
struct prestera_acl_rule *rule;
int err;
- rule = prestera_acl_rule_create(block, f->cookie);
- if (IS_ERR(rule))
- return PTR_ERR(rule);
+ ruleset = prestera_acl_ruleset_get(acl, block);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* increments the ruleset reference */
+ rule = prestera_acl_rule_create(ruleset, f->cookie);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule_create;
+ }
err = prestera_flower_parse(block, rule, f);
if (err)
- goto err_flower_parse;
+ goto err_rule_add;
+
+ if (!prestera_acl_ruleset_is_offload(ruleset)) {
+ err = prestera_acl_ruleset_offload(ruleset);
+ if (err)
+ goto err_ruleset_offload;
+ }
- err = prestera_acl_rule_add(sw, rule);
+ err = prestera_acl_rule_add(block->sw, rule);
if (err)
goto err_rule_add;
+ prestera_acl_ruleset_put(ruleset);
return 0;
+err_ruleset_offload:
err_rule_add:
-err_flower_parse:
prestera_acl_rule_destroy(rule);
+err_rule_create:
+ prestera_acl_ruleset_put(ruleset);
return err;
}
void prestera_flower_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
+ struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- struct prestera_switch *sw;
- rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
- f->cookie);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ if (IS_ERR(ruleset))
+ return;
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
if (rule) {
- sw = prestera_acl_block_sw(block);
- prestera_acl_rule_del(sw, rule);
+ prestera_acl_rule_del(block->sw, rule);
prestera_acl_rule_destroy(rule);
}
+ prestera_acl_ruleset_put(ruleset);
+
+}
+
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_flower_template *template;
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_rule rule;
+ int err;
+
+ memset(&rule, 0, sizeof(rule));
+ err = prestera_flower_parse(block, &rule, f);
+ if (err)
+ return err;
+
+ template = kmalloc(sizeof(*template), GFP_KERNEL);
+ if (!template) {
+ err = -ENOMEM;
+ goto err_malloc;
+ }
+
+ prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
+ if (IS_ERR_OR_NULL(ruleset)) {
+ err = -EINVAL;
+ goto err_ruleset_get;
+ }
+
+ /* preserve keymask/template to this ruleset */
+ prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+
+ /* skip error, as it is not possible to reject template operation,
+ * so, keep the reference to the ruleset for rules to be added
+ * to that ruleset later. In case of offload fail, the ruleset
+ * will be offloaded again during adding a new rule. Also,
+ * unlikly possble that ruleset is already offloaded at this staage.
+ */
+ prestera_acl_ruleset_offload(ruleset);
+
+ /* keep the reference to the ruleset */
+ template->ruleset = ruleset;
+ block->tmplt = template;
+ return 0;
+
+err_ruleset_get:
+ kfree(template);
+err_malloc:
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
+ return err;
+}
+
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ prestera_flower_template_cleanup(block);
}
int prestera_flower_stats(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
u64 packets;
u64 lastuse;
u64 bytes;
int err;
- rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
- f->cookie);
- if (!rule)
- return -EINVAL;
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
- err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse);
+ err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
+ &bytes, &lastuse);
if (err)
- return err;
+ goto err_rule_get_stats;
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
- FLOW_ACTION_HW_STATS_IMMEDIATE);
- return 0;
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+err_rule_get_stats:
+ prestera_acl_ruleset_put(ruleset);
+ return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index 91e045eec58b..dc3aa4280e9f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
#ifndef _PRESTERA_FLOWER_H_
#define _PRESTERA_FLOWER_H_
#include <net/pkt_cls.h>
+struct prestera_switch;
struct prestera_flow_block;
int prestera_flower_replace(struct prestera_flow_block *block,
@@ -14,5 +15,10 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f);
int prestera_flower_stats(struct prestera_flow_block *block,
struct flow_cls_offload *f);
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_template_cleanup(struct prestera_flow_block *block);
#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 9b8b1ed474fc..e6bfadc874c5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -9,6 +9,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
+#include "prestera_counter.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
@@ -38,13 +39,24 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
- PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500,
- PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501,
- PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510,
- PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520,
- PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521,
- PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530,
- PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
+ PRESTERA_CMD_TYPE_COUNTER_GET = 0x510,
+ PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511,
+ PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514,
+ PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515,
+
+ PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540,
+ PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561,
+
+ PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
+ PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
+ PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
@@ -359,76 +371,84 @@ struct prestera_msg_bridge_resp {
u8 pad[2];
};
-struct prestera_msg_acl_action {
- __le32 id;
- __le32 reserved[5];
+struct prestera_msg_vtcam_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ u8 direction;
+ u8 lookup;
+ u8 pad[2];
};
-struct prestera_msg_acl_match {
- __le32 type;
- __le32 __reserved;
- union {
- struct {
- u8 key;
- u8 mask;
- } u8;
- struct {
- __le16 key;
- __le16 mask;
- } u16;
- struct {
- __le32 key;
- __le32 mask;
- } u32;
- struct {
- __le64 key;
- __le64 mask;
- } u64;
- struct {
- u8 key[ETH_ALEN];
- u8 mask[ETH_ALEN];
- } mac;
- } keymask;
+struct prestera_msg_vtcam_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
};
-struct prestera_msg_acl_rule_req {
+struct prestera_msg_vtcam_rule_add_req {
struct prestera_msg_cmd cmd;
- __le32 id;
- __le32 priority;
- __le16 ruleset_id;
- u8 n_actions;
- u8 n_matches;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
};
-struct prestera_msg_acl_rule_resp {
- struct prestera_msg_ret ret;
+struct prestera_msg_vtcam_rule_del_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
__le32 id;
};
-struct prestera_msg_acl_rule_stats_resp {
+struct prestera_msg_vtcam_bind_req {
+ struct prestera_msg_cmd cmd;
+ union {
+ struct {
+ __le32 hw_id;
+ __le32 dev_id;
+ } port;
+ __le32 index;
+ };
+ __le32 vtcam_id;
+ __le16 pcl_id;
+ __le16 type;
+};
+
+struct prestera_msg_vtcam_resp {
struct prestera_msg_ret ret;
- __le64 packets;
- __le64 bytes;
+ __le32 vtcam_id;
+ __le32 rule_id;
};
-struct prestera_msg_acl_ruleset_bind_req {
- struct prestera_msg_cmd cmd;
- __le32 port;
- __le32 dev;
- __le16 ruleset_id;
- u8 pad[2];
+struct prestera_msg_acl_action {
+ __le32 id;
+ __le32 __reserved;
+ union {
+ struct {
+ __le32 id;
+ } count;
+ __le32 reserved[6];
+ };
};
-struct prestera_msg_acl_ruleset_req {
+struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
- __le16 id;
- u8 pad[2];
+ __le32 client;
+ __le32 block_id;
+ __le32 num_counters;
+};
+
+struct prestera_msg_counter_stats {
+ __le64 packets;
+ __le64 bytes;
};
-struct prestera_msg_acl_ruleset_resp {
+struct prestera_msg_counter_resp {
struct prestera_msg_ret ret;
- __le16 id;
- u8 pad[2];
+ __le32 block_id;
+ __le32 offset;
+ __le32 num_counters;
+ __le32 done;
+ struct prestera_msg_counter_stats stats[];
};
struct prestera_msg_span_req {
@@ -465,6 +485,48 @@ struct prestera_msg_rxtx_resp {
__le32 map_addr;
};
+struct prestera_msg_iface {
+ union {
+ struct {
+ __le32 dev;
+ __le32 port;
+ };
+ __le16 lag_id;
+ };
+ __le16 vr_id;
+ __le16 vid;
+ u8 type;
+ u8 __pad[3];
+};
+
+struct prestera_msg_rif_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_iface iif;
+ __le32 mtu;
+ __le16 rif_id;
+ __le16 __reserved;
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+};
+
+struct prestera_msg_rif_resp {
+ struct prestera_msg_ret ret;
+ __le16 rif_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_vr_req {
+ struct prestera_msg_cmd cmd;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_vr_resp {
+ struct prestera_msg_ret ret;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
struct prestera_msg_lag_req {
struct prestera_msg_cmd cmd;
__le32 port;
@@ -521,14 +583,24 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+
+ /* structure that are part of req/resp fw messages */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -537,11 +609,12 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -1044,225 +1117,162 @@ static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
}
}
-int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t dir)
{
- struct prestera_msg_acl_ruleset_resp resp;
- struct prestera_msg_acl_ruleset_req req;
int err;
+ struct prestera_msg_vtcam_resp resp;
+ struct prestera_msg_vtcam_create_req req = {
+ .lookup = lookup,
+ .direction = dir,
+ };
+
+ if (keymask)
+ memcpy(req.keymask, keymask, sizeof(req.keymask));
+ else
+ memset(req.keymask, 0, sizeof(req.keymask));
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE,
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
- *ruleset_id = __le16_to_cpu(resp.id);
-
+ *vtcam_id = __le32_to_cpu(resp.vtcam_id);
return 0;
}
-int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id)
{
- struct prestera_msg_acl_ruleset_req req = {
- .id = __cpu_to_le16(ruleset_id),
+ struct prestera_msg_vtcam_destroy_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
};
- return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY,
&req.cmd, sizeof(req));
}
-static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
- struct prestera_acl_rule *rule)
+static int
+prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
+ struct prestera_acl_hw_action_info *info)
{
- struct list_head *a_list = prestera_acl_rule_action_list_get(rule);
- struct prestera_acl_rule_action_entry *a_entry;
- int i = 0;
-
- list_for_each_entry(a_entry, a_list, list) {
- action[i].id = __cpu_to_le32(a_entry->id);
-
- switch (a_entry->id) {
- case PRESTERA_ACL_RULE_ACTION_ACCEPT:
- case PRESTERA_ACL_RULE_ACTION_DROP:
- case PRESTERA_ACL_RULE_ACTION_TRAP:
- /* just rule action id, no specific data */
- break;
- default:
- return -EINVAL;
- }
+ action->id = __cpu_to_le32(info->id);
- i++;
- }
-
- return 0;
-}
-
-static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
- struct prestera_acl_rule *rule)
-{
- struct list_head *m_list = prestera_acl_rule_match_list_get(rule);
- struct prestera_acl_rule_match_entry *m_entry;
- int i = 0;
-
- list_for_each_entry(m_entry, m_list, list) {
- match[i].type = __cpu_to_le32(m_entry->type);
-
- switch (m_entry->type) {
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
- match[i].keymask.u16.key =
- __cpu_to_le16(m_entry->keymask.u16.key);
- match[i].keymask.u16.mask =
- __cpu_to_le16(m_entry->keymask.u16.mask);
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO:
- match[i].keymask.u8.key = m_entry->keymask.u8.key;
- match[i].keymask.u8.mask = m_entry->keymask.u8.mask;
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC:
- memcpy(match[i].keymask.mac.key,
- m_entry->keymask.mac.key,
- sizeof(match[i].keymask.mac.key));
- memcpy(match[i].keymask.mac.mask,
- m_entry->keymask.mac.mask,
- sizeof(match[i].keymask.mac.mask));
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
- match[i].keymask.u32.key =
- __cpu_to_le32(m_entry->keymask.u32.key);
- match[i].keymask.u32.mask =
- __cpu_to_le32(m_entry->keymask.u32.mask);
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
- match[i].keymask.u64.key =
- __cpu_to_le64(m_entry->keymask.u64.key);
- match[i].keymask.u64.mask =
- __cpu_to_le64(m_entry->keymask.u64.mask);
- break;
- default:
- return -EINVAL;
- }
-
- i++;
+ switch (info->id) {
+ case PRESTERA_ACL_RULE_ACTION_ACCEPT:
+ case PRESTERA_ACL_RULE_ACTION_DROP:
+ case PRESTERA_ACL_RULE_ACTION_TRAP:
+ /* just rule action id, no specific data */
+ break;
+ case PRESTERA_ACL_RULE_ACTION_COUNT:
+ action->count.id = __cpu_to_le32(info->count.id);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
-int prestera_hw_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u32 *rule_id)
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
+ u32 vtcam_id, u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id)
{
- struct prestera_msg_acl_action *actions;
- struct prestera_msg_acl_match *matches;
- struct prestera_msg_acl_rule_resp resp;
- struct prestera_msg_acl_rule_req *req;
- u8 n_actions;
- u8 n_matches;
+ struct prestera_msg_acl_action *actions_msg;
+ struct prestera_msg_vtcam_rule_add_req *req;
+ struct prestera_msg_vtcam_resp resp;
void *buff;
u32 size;
int err;
+ u8 i;
- n_actions = prestera_acl_rule_action_len(rule);
- n_matches = prestera_acl_rule_match_len(rule);
-
- size = sizeof(*req) + sizeof(*actions) * n_actions +
- sizeof(*matches) * n_matches;
+ size = sizeof(*req) + sizeof(*actions_msg) * n_act;
buff = kzalloc(size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
req = buff;
- actions = buff + sizeof(*req);
- matches = buff + sizeof(*req) + sizeof(*actions) * n_actions;
-
- /* put acl actions into the message */
- err = prestera_hw_acl_actions_put(actions, rule);
- if (err)
- goto free_buff;
+ req->n_act = __cpu_to_le32(n_act);
+ actions_msg = buff + sizeof(*req);
/* put acl matches into the message */
- err = prestera_hw_acl_matches_put(matches, rule);
- if (err)
- goto free_buff;
+ memcpy(req->key, key, sizeof(req->key));
+ memcpy(req->keymask, keymask, sizeof(req->keymask));
- req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule));
- req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule));
- req->n_actions = prestera_acl_rule_action_len(rule);
- req->n_matches = prestera_acl_rule_match_len(rule);
+ /* put acl actions into the message */
+ for (i = 0; i < n_act; i++) {
+ err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ &act[i]);
+ if (err)
+ goto free_buff;
+ }
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD,
+ req->vtcam_id = __cpu_to_le32(vtcam_id);
+ req->prio = __cpu_to_le32(prio);
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD,
&req->cmd, size, &resp.ret, sizeof(resp));
if (err)
goto free_buff;
- *rule_id = __le32_to_cpu(resp.id);
+ *rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
kfree(buff);
return err;
}
-int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id)
{
- struct prestera_msg_acl_rule_req req = {
+ struct prestera_msg_vtcam_rule_del_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
.id = __cpu_to_le32(rule_id)
};
- return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE,
&req.cmd, sizeof(req));
}
-int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
- u64 *packets, u64 *bytes)
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id)
{
- struct prestera_msg_acl_rule_stats_resp resp;
- struct prestera_msg_acl_rule_req req = {
- .id = __cpu_to_le32(rule_id)
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type),
+ .pcl_id = __cpu_to_le16(pcl_id)
};
- int err;
-
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *packets = __le64_to_cpu(resp.packets);
- *bytes = __le64_to_cpu(resp.bytes);
-
- return 0;
-}
-int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
-{
- struct prestera_msg_acl_ruleset_bind_req req = {
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .ruleset_id = __cpu_to_le16(ruleset_id),
- };
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND,
&req.cmd, sizeof(req));
}
-int prestera_hw_acl_port_unbind(const struct prestera_port *port,
- u16 ruleset_id)
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id)
{
- struct prestera_msg_acl_ruleset_bind_req req = {
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .ruleset_id = __cpu_to_le16(ruleset_id),
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type)
};
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND,
&req.cmd, sizeof(req));
}
@@ -1796,6 +1806,91 @@ int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
&req.cmd, sizeof(req));
}
+static int prestera_iface_to_msg(struct prestera_iface *iface,
+ struct prestera_msg_iface *msg_if)
+{
+ switch (iface->type) {
+ case PRESTERA_IF_PORT_E:
+ case PRESTERA_IF_VID_E:
+ msg_if->port = __cpu_to_le32(iface->dev_port.port_num);
+ msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num);
+ break;
+ case PRESTERA_IF_LAG_E:
+ msg_if->lag_id = __cpu_to_le16(iface->lag_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msg_if->vr_id = __cpu_to_le16(iface->vr_id);
+ msg_if->vid = __cpu_to_le16(iface->vlan_id);
+ msg_if->type = iface->type;
+ return 0;
+}
+
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id)
+{
+ struct prestera_msg_rif_resp resp;
+ struct prestera_msg_rif_req req;
+ int err;
+
+ memcpy(req.mac, mac, ETH_ALEN);
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *rif_id = __le16_to_cpu(resp.rif_id);
+ return err;
+}
+
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif)
+{
+ struct prestera_msg_rif_req req = {
+ .rif_id = __cpu_to_le16(rif_id),
+ };
+ int err;
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
+{
+ struct prestera_msg_vr_resp resp;
+ struct prestera_msg_vr_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *vr_id = __le16_to_cpu(resp.vr_id);
+ return err;
+}
+
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
+{
+ struct prestera_msg_vr_req req = {
+ .vr_id = __cpu_to_le16(vr_id),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd,
+ sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
@@ -1916,3 +2011,100 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
list_del_rcu(&eh->list);
kfree_rcu(eh, rcu);
}
+
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_abort(struct prestera_switch *sw)
+{
+ struct prestera_msg_counter_req req;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats)
+{
+ struct prestera_msg_counter_resp *resp;
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(idx),
+ .num_counters = __cpu_to_le32(*len),
+ };
+ size_t size = struct_size(resp, stats, *len);
+ int err, i;
+
+ resp = kmalloc(size, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET,
+ &req.cmd, sizeof(req), &resp->ret, size);
+ if (err)
+ goto free_buff;
+
+ for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) {
+ stats[i].packets += __le64_to_cpu(resp->stats[i].packets);
+ stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes);
+ }
+
+ *len = __le32_to_cpu(resp->num_counters);
+ *done = __le32_to_cpu(resp->done);
+
+free_buff:
+ kfree(resp);
+ return err;
+}
+
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters)
+{
+ struct prestera_msg_counter_resp resp;
+ struct prestera_msg_counter_req req = {
+ .client = __cpu_to_le32(client)
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *block_id = __le32_to_cpu(resp.block_id);
+ *offset = __le32_to_cpu(resp.offset);
+ *num_counters = __le32_to_cpu(resp.num_counters);
+
+ return 0;
+}
+
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id),
+ .num_counters = __cpu_to_le32(counter_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
+ &req.cmd, sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 57a3c2e5b112..3ff12bae5909 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -5,6 +5,7 @@
#define _PRESTERA_HW_H_
#include <linux/types.h>
+#include "prestera_acl.h"
enum prestera_accept_frm_type {
PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
@@ -111,18 +112,32 @@ enum prestera_hw_cpu_code_cnt_t {
PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
};
+enum prestera_hw_vtcam_direction_t {
+ PRESTERA_HW_VTCAM_DIR_INGRESS = 0,
+ PRESTERA_HW_VTCAM_DIR_EGRESS = 1,
+};
+
+enum {
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2 = 2,
+};
+
struct prestera_switch;
struct prestera_port;
struct prestera_port_stats;
struct prestera_port_caps;
enum prestera_event_type;
struct prestera_event;
-struct prestera_acl_rule;
typedef void (*prestera_event_cb_t)
(struct prestera_switch *sw, struct prestera_event *evt, void *arg);
struct prestera_rxtx_params;
+struct prestera_acl_hw_action_info;
+struct prestera_acl_iface;
+struct prestera_counter_stats;
+struct prestera_iface;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -186,21 +201,37 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
-/* ACL API */
-int prestera_hw_acl_ruleset_create(struct prestera_switch *sw,
- u16 *ruleset_id);
-int prestera_hw_acl_ruleset_del(struct prestera_switch *sw,
- u16 ruleset_id);
-int prestera_hw_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u32 *rule_id);
-int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id);
-int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw,
- u32 rule_id, u64 *packets, u64 *bytes);
-int prestera_hw_acl_port_bind(const struct prestera_port *port,
- u16 ruleset_id);
-int prestera_hw_acl_port_unbind(const struct prestera_port *port,
- u16 ruleset_id);
+/* vTCAM API */
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t direction);
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id,
+ u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id);
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id);
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id);
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id);
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id);
+
+/* Counter API */
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id);
+int prestera_hw_counter_abort(struct prestera_switch *sw);
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats);
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters);
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id);
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id);
/* SPAN API */
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
@@ -208,6 +239,16 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
int prestera_hw_span_unbind(const struct prestera_port *port);
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
+/* Router API */
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id);
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif);
+
+/* Virtual Router API */
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4369a3ffad45..cad93f747d0c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -18,6 +18,7 @@
#include "prestera_rxtx.h"
#include "prestera_devlink.h"
#include "prestera_ethtool.h"
+#include "prestera_counter.h"
#include "prestera_switchdev.h"
#define PRESTERA_MTU_DEFAULT 1536
@@ -54,12 +55,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->dev_id == dev_id && port->hw_id == hw_id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -68,12 +71,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->id == id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->id == id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -158,7 +163,7 @@ static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
return prestera_rxtx_xmit(netdev_priv(dev), skb);
}
-static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr)
{
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
@@ -764,23 +769,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
struct net_device *dev,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
+ struct netdev_notifier_info *info = ptr;
+ struct netdev_notifier_changeupper_info *cu_info;
struct prestera_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
struct net_device *upper;
- extack = netdev_notifier_info_to_extack(&info->info);
- upper = info->upper_dev;
+ extack = netdev_notifier_info_to_extack(info);
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ upper = cu_info->upper_dev;
if (!netif_is_bridge_master(upper) &&
!netif_is_lag_master(upper)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
- if (!info->linking)
+ if (!cu_info->linking)
break;
if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +798,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
}
if (netif_is_lag_master(upper) &&
- !prestera_lag_master_check(upper, info->upper_info, extack))
+ !prestera_lag_master_check(upper, cu_info->upper_info, extack))
return -EOPNOTSUPP;
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +814,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
break;
case NETDEV_CHANGEUPPER:
+ upper = cu_info->upper_dev;
if (netif_is_bridge_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_bridge_port_join(upper, port,
extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_lag_port_add(port, upper);
else
prestera_lag_port_del(port);
@@ -892,6 +902,10 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
return err;
+ err = prestera_router_init(sw);
+ if (err)
+ goto err_router_init;
+
err = prestera_switchdev_init(sw);
if (err)
goto err_swdev_register;
@@ -904,6 +918,10 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_handlers_register;
+ err = prestera_counter_init(sw);
+ if (err)
+ goto err_counter_init;
+
err = prestera_acl_init(sw);
if (err)
goto err_acl_init;
@@ -936,12 +954,16 @@ err_dl_register:
err_span_init:
prestera_acl_fini(sw);
err_acl_init:
+ prestera_counter_fini(sw);
+err_counter_init:
prestera_event_handlers_unregister(sw);
err_handlers_register:
prestera_rxtx_switch_fini(sw);
err_rxtx_register:
prestera_switchdev_fini(sw);
err_swdev_register:
+ prestera_router_fini(sw);
+err_router_init:
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
@@ -956,9 +978,11 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
+ prestera_counter_fini(sw);
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
+ prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
new file mode 100644
index 000000000000..6ef4d32b8fdd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/inetdevice.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_router_hw.h"
+
+/* This util to be used, to convert kernel rules for default vr in hw_vr */
+static u32 prestera_fix_tb_id(u32 tb_id)
+{
+ if (tb_id == RT_TABLE_UNSPEC ||
+ tb_id == RT_TABLE_LOCAL ||
+ tb_id == RT_TABLE_DEFAULT)
+ tb_id = RT_TABLE_MAIN;
+
+ return tb_id;
+}
+
+static int __prestera_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(port_dev);
+ struct prestera_rif_entry_key re_key = {};
+ struct prestera_rif_entry *re;
+ u32 kern_tb_id;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
+ return err;
+ }
+
+ kern_tb_id = l3mdev_fib_table(port_dev);
+ re_key.iface.type = PRESTERA_IF_PORT_E;
+ re_key.iface.dev_port.hw_dev_num = port->dev_id;
+ re_key.iface.dev_port.port_num = port->hw_id;
+ re = prestera_rif_entry_find(port->sw, &re_key);
+
+ switch (event) {
+ case NETDEV_UP:
+ if (re) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
+ return -EEXIST;
+ }
+ re = prestera_rif_entry_create(port->sw, &re_key,
+ prestera_fix_tb_id(kern_tb_id),
+ port_dev->dev_addr);
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
+ return -EINVAL;
+ }
+ dev_hold(port_dev);
+ break;
+ case NETDEV_DOWN:
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
+ return -EEXIST;
+ }
+ prestera_rif_entry_destroy(port->sw, re);
+ dev_put(port_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int __prestera_inetaddr_event(struct prestera_switch *sw,
+ struct net_device *dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
+ netif_is_lag_port(dev) || netif_is_ovs_port(dev))
+ return 0;
+
+ return __prestera_inetaddr_port_event(dev, event, extack);
+}
+
+static int __prestera_inetaddr_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_DOWN)
+ goto out;
+
+ /* Ignore if this is not latest address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
+out:
+ return notifier_from_errno(err);
+}
+
+static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *)ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_valid_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_UP)
+ goto out;
+
+ /* Ignore if this is not first address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ if (ipv4_is_multicast(ivi->ivi_addr)) {
+ NL_SET_ERR_MSG_MOD(ivi->extack,
+ "Multicast addr on RIF is not supported");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
+out:
+ return notifier_from_errno(err);
+}
+
+int prestera_router_init(struct prestera_switch *sw)
+{
+ struct prestera_router *router;
+ int err;
+
+ router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+
+ sw->router = router;
+ router->sw = sw;
+
+ err = prestera_router_hw_init(sw);
+ if (err)
+ goto err_router_lib_init;
+
+ router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
+ err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+ if (err)
+ goto err_register_inetaddr_validator_notifier;
+
+ router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ return 0;
+
+err_register_inetaddr_notifier:
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+err_register_inetaddr_validator_notifier:
+ prestera_router_hw_fini(sw);
+err_router_lib_init:
+ kfree(sw->router);
+ return err;
+}
+
+void prestera_router_fini(struct prestera_switch *sw)
+{
+ unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
+ unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ prestera_router_hw_fini(sw);
+ kfree(sw->router);
+ sw->router = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
new file mode 100644
index 000000000000..e5592b69ad37
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_router_hw.h"
+#include "prestera_acl.h"
+
+/* +--+
+ * +------->|vr|
+ * | +--+
+ * |
+ * +-+-------+
+ * |rif_entry|
+ * +---------+
+ * Rif is
+ * used as
+ * entry point
+ * for vr in hw
+ */
+
+int prestera_router_hw_init(struct prestera_switch *sw)
+{
+ INIT_LIST_HEAD(&sw->router->vr_list);
+ INIT_LIST_HEAD(&sw->router->rif_entry_list);
+
+ return 0;
+}
+
+void prestera_router_hw_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->router->vr_list));
+ WARN_ON(!list_empty(&sw->router->rif_entry_list));
+}
+
+static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
+ u32 tb_id)
+{
+ struct prestera_vr *vr;
+
+ list_for_each_entry(vr, &sw->router->vr_list, router_node) {
+ if (vr->tb_id == tb_id)
+ return vr;
+ }
+
+ return NULL;
+}
+
+static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+ int err;
+
+ vr = kzalloc(sizeof(*vr), GFP_KERNEL);
+ if (!vr) {
+ err = -ENOMEM;
+ goto err_alloc_vr;
+ }
+
+ vr->tb_id = tb_id;
+
+ err = prestera_hw_vr_create(sw, &vr->hw_vr_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&vr->router_node, &sw->router->vr_list);
+
+ return vr;
+
+err_hw_create:
+ kfree(vr);
+err_alloc_vr:
+ return ERR_PTR(err);
+}
+
+static void __prestera_vr_destroy(struct prestera_switch *sw,
+ struct prestera_vr *vr)
+{
+ list_del(&vr->router_node);
+ prestera_hw_vr_delete(sw, vr->hw_vr_id);
+ kfree(vr);
+}
+
+static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+
+ vr = __prestera_vr_find(sw, tb_id);
+ if (vr) {
+ refcount_inc(&vr->refcount);
+ } else {
+ vr = __prestera_vr_create(sw, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ refcount_set(&vr->refcount, 1);
+ }
+
+ return vr;
+}
+
+static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr)
+{
+ if (refcount_dec_and_test(&vr->refcount))
+ __prestera_vr_destroy(sw, vr);
+}
+
+/* iface is overhead struct. vr_id also can be removed. */
+static int
+__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in,
+ struct prestera_rif_entry_key *out)
+{
+ memset(out, 0, sizeof(*out));
+
+ switch (in->iface.type) {
+ case PRESTERA_IF_PORT_E:
+ out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num;
+ out->iface.dev_port.port_num = in->iface.dev_port.port_num;
+ break;
+ case PRESTERA_IF_LAG_E:
+ out->iface.lag_id = in->iface.lag_id;
+ break;
+ case PRESTERA_IF_VID_E:
+ out->iface.vlan_id = in->iface.vlan_id;
+ break;
+ default:
+ WARN(1, "Unsupported iface type");
+ return -EINVAL;
+ }
+
+ out->iface.type = in->iface.type;
+ return 0;
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k)
+{
+ struct prestera_rif_entry *rif_entry;
+ struct prestera_rif_entry_key lk; /* lookup key */
+
+ if (__prestera_rif_entry_key_copy(k, &lk))
+ return NULL;
+
+ list_for_each_entry(rif_entry, &sw->router->rif_entry_list,
+ router_node) {
+ if (!memcmp(k, &rif_entry->key, sizeof(*k)))
+ return rif_entry;
+ }
+
+ return NULL;
+}
+
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e)
+{
+ struct prestera_iface iface;
+
+ list_del(&e->router_node);
+
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ prestera_hw_rif_delete(sw, e->hw_id, &iface);
+
+ prestera_vr_put(sw, e->vr);
+ kfree(e);
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr)
+{
+ int err;
+ struct prestera_rif_entry *e;
+ struct prestera_iface iface;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
+
+ if (__prestera_rif_entry_key_copy(k, &e->key))
+ goto err_key_copy;
+
+ e->vr = prestera_vr_get(sw, tb_id, NULL);
+ if (IS_ERR(e->vr))
+ goto err_vr_get;
+
+ memcpy(&e->addr, addr, sizeof(e->addr));
+
+ /* HW */
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&e->router_node, &sw->router->rif_entry_list);
+
+ return e;
+
+err_hw_create:
+ prestera_vr_put(sw, e->vr);
+err_vr_get:
+err_key_copy:
+ kfree(e);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
new file mode 100644
index 000000000000..b6b028551868
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ROUTER_HW_H_
+#define _PRESTERA_ROUTER_HW_H_
+
+struct prestera_vr {
+ struct list_head router_node;
+ refcount_t refcount;
+ u32 tb_id; /* key (kernel fib table id) */
+ u16 hw_vr_id; /* virtual router ID */
+ u8 __pad[2];
+};
+
+struct prestera_rif_entry {
+ struct prestera_rif_entry_key {
+ struct prestera_iface iface;
+ } key;
+ struct prestera_vr *vr;
+ unsigned char addr[ETH_ALEN];
+ u16 hw_id; /* rif_id */
+ struct list_head router_node; /* ht */
+};
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k);
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e);
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr);
+int prestera_router_hw_init(struct prestera_switch *sw);
+void prestera_router_hw_fini(struct prestera_switch *sw);
+
+#endif /* _PRESTERA_ROUTER_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index 3cafca827bb7..845e9d8c8cc7 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -7,6 +7,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
+#include "prestera_flow.h"
#include "prestera_span.h"
struct prestera_span_entry {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1d607bc6b59e..52bef50f5a0d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1388,7 +1388,6 @@ static int pxa168_eth_probe(struct platform_device *pdev)
{
struct pxa168_eth_private *pep = NULL;
struct net_device *dev = NULL;
- struct resource *res;
struct clk *clk;
struct device_node *np;
int err;
@@ -1419,9 +1418,11 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ err = platform_get_irq(pdev, 0);
+ if (err == -EPROBE_DEFER)
+ goto err_netdev;
+ BUG_ON(dev->irq < 0);
+ dev->irq = err;
dev->netdev_ops = &pxa168_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 0c864e5bf0a6..cf03c67fbf40 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -492,7 +492,9 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
static void skge_get_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
@@ -504,7 +506,9 @@ static void skge_get_ring_param(struct net_device *dev,
}
static int skge_set_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28b5b9341145..ea16b1dd6a98 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4149,7 +4149,9 @@ static unsigned long roundup_ring_size(unsigned long pending)
}
static void sky2_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4161,7 +4163,9 @@ static void sky2_get_ringparam(struct net_device *dev,
}
static int sky2_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4266,96 +4270,36 @@ static int sky2_get_eeprom_len(struct net_device *dev)
return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
}
-static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
-{
- unsigned long start = jiffies;
-
- while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
- /* Can take up to 10.6 ms for write */
- if (time_after(jiffies, start + HZ/4)) {
- dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
- return -ETIMEDOUT;
- }
- msleep(1);
- }
-
- return 0;
-}
-
-static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
- u16 offset, size_t length)
-{
- int rc = 0;
-
- while (length > 0) {
- u32 val;
-
- sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
- rc = sky2_vpd_wait(hw, cap, 0);
- if (rc)
- break;
-
- val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
-
- memcpy(data, &val, min(sizeof(val), length));
- offset += sizeof(u32);
- data += sizeof(u32);
- length -= sizeof(u32);
- }
-
- return rc;
-}
-
-static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
- u16 offset, unsigned int length)
-{
- unsigned int i;
- int rc = 0;
-
- for (i = 0; i < length; i += sizeof(u32)) {
- u32 val = *(u32 *)(data + i);
-
- sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
- sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
-
- rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
- if (rc)
- break;
- }
- return rc;
-}
-
static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
eeprom->magic = SKY2_EEPROM_MAGIC;
+ rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
+ if (rc < 0)
+ return rc;
+
+ eeprom->len = rc;
- return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return 0;
}
static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
if (eeprom->magic != SKY2_EEPROM_MAGIC)
return -EINVAL;
- /* Partial writes not supported */
- if ((eeprom->offset & 3) || (eeprom->len & 3))
- return -EINVAL;
+ rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
- return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return rc < 0 ? rc : 0;
}
static netdev_features_t sky2_fix_features(struct net_device *dev,
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index c357c193378e..86d356b4388d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
@@ -10,6 +10,7 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
+ select PINCTRL
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 75d67d1b5f6b..f02d07ec5ccb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -91,46 +91,96 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
- return -1;
+ return -ETIMEDOUT;
}
-static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
- u32 phy_register, u32 write_data)
+static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
{
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ int ret;
- write_data &= 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
- (phy_register << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
- MTK_PHY_IAC);
+ if (phy_reg & MII_ADDR_C45) {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+ } else {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+ }
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
return 0;
}
-static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
{
- u32 d;
-
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ int ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
- (phy_reg << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT),
- MTK_PHY_IAC);
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ if (phy_reg & MII_ADDR_C45) {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+ } else {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+ }
- d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- return d;
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
@@ -217,7 +267,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
phylink_config);
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new, sid, i;
- int val, ge_mode, err;
+ int val, ge_mode, err = 0;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -463,94 +513,8 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
- phy_interface_mode_is_rgmii(state->interface)) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
- !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(state->interface)))) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_TRGMII:
- phylink_set(mask, 1000baseT_Full);
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseT_Half);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_NA:
- default:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- break;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_NA) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
- }
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
-}
-
static const struct phylink_mac_ops mtk_phylink_ops = {
- .validate = mtk_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
.mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
@@ -583,6 +547,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read;
eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
@@ -2297,7 +2262,6 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
u32 gdm_config = MTK_GDMA_TO_PDMA;
- int err;
err = mtk_start_dma(eth);
if (err)
@@ -3009,6 +2973,33 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ /* This driver makes use of state->speed/state->duplex in
+ * mac_config
+ */
+ mac->phylink_config.legacy_pre_march2020 = true;
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ mac->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 5ef70dd8b49c..c9d42be314b5 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -341,11 +341,20 @@
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
-#define PHY_IAC_READ BIT(19)
-#define PHY_IAC_WRITE BIT(18)
-#define PHY_IAC_START BIT(16)
-#define PHY_IAC_ADDR_SHIFT 20
-#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_REG_MASK GENMASK(29, 25)
+#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x))
+#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
+#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
+#define PHY_IAC_CMD_MASK GENMASK(19, 18)
+#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0)
+#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1)
+#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2)
+#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3)
+#define PHY_IAC_START_MASK GENMASK(17, 16)
+#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0)
+#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1)
+#define PHY_IAC_DATA_MASK GENMASK(15, 0)
+#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x))
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 98b1d3577bcd..d4b482340cb9 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -207,9 +207,6 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
struct dentry *root;
root = debugfs_create_dir("mtk_ppe", NULL);
- if (!root)
- return -ENOMEM;
-
debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 10238bedd694..ed5038d98ef6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1141,7 +1141,9 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
}
static int mlx4_en_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -1208,7 +1210,9 @@ out:
}
static void mlx4_en_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f1c10f2bda78..c61dc7ae0c05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -33,6 +33,7 @@
#include <linux/bpf.h>
#include <linux/etherdevice.h>
+#include <linux/filter.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
@@ -2427,10 +2428,6 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* device doesn't support time stamping */
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 650e6a1844ae..8cfc649f226b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -812,7 +812,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 9e48509ed3b2..414e390e6b48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
cpumask_empty(eq->affinity_mask))
return;
- hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
- mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+ mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
}
#endif
@@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
-#if defined(CONFIG_SMP)
- irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
-#endif
+ irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 92056452a9e3..4ba1a78c6515 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -115,6 +115,7 @@ config MLX5_TC_CT
config MLX5_TC_SAMPLE
bool "MLX5 TC sample offload support"
depends on MLX5_CLS_ACT
+ depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m
default y
help
Say Y here if you want to support offloading sample rules via tc
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index e63bb9ceb9c0..fcfd38fa9e6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -46,6 +46,15 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o
+
+mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
+ en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
+ en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
+ en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
+ en/tc/act/mirred.o en/tc/act/mirred_nic.o \
+ en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
+ en/tc/act/redirect_ingress.o
+
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
@@ -95,11 +104,12 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \
- steering/dr_action.o steering/fs_dr.o
+ steering/dr_action.o steering/fs_dr.o \
+ steering/dr_dbg.o
#
# SF device
#
-mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o
+mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_affinity.o
#
# SF manager
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a46284ca5172..17fe05809653 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -148,8 +148,12 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
if (!refcount_dec_and_test(&ent->refcnt))
return;
- if (ent->idx >= 0)
- cmd_free_index(ent->cmd, ent->idx);
+ if (ent->idx >= 0) {
+ struct mlx5_cmd *cmd = ent->cmd;
+
+ cmd_free_index(cmd, ent->idx);
+ up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
+ }
cmd_free_ent(ent);
}
@@ -900,25 +904,6 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
return cmd->allowed_opcode == opcode;
}
-static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
-{
- unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
- int idx;
-
-retry:
- idx = cmd_alloc_index(cmd);
- if (idx < 0 && time_before(jiffies, alloc_end)) {
- /* Index allocation can fail on heavy load of commands. This is a temporary
- * situation as the current command already holds the semaphore, meaning that
- * another command completion is being handled and it is expected to release
- * the entry index soon.
- */
- cpu_relax();
- goto retry;
- }
- return idx;
-}
-
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
{
return pci_channel_offline(dev->pdev) ||
@@ -946,7 +931,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- alloc_ret = cmd_alloc_index_retry(cmd);
+ alloc_ret = cmd_alloc_index(cmd);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@@ -1602,8 +1587,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
vector = vec & 0xffffffff;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
- struct semaphore *sem;
-
ent = cmd->ent_arr[i];
/* if we already completed the command, ignore it */
@@ -1626,10 +1609,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
cmd_ent_put(ent);
- if (ent->page_queue)
- sem = &cmd->pages_sem;
- else
- sem = &cmd->sem;
ent->ts2 = ktime_get_ns();
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
@@ -1683,7 +1662,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
*/
complete(&ent->done);
}
- up(sem);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a8b84d53dfb0..ba6dad97e308 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -538,7 +538,7 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
-static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
{
u64 fsystem_guid, psystem_guid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 1c98652b244a..d1093bb2d436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -546,6 +546,13 @@ static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32
return 0;
}
+static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
@@ -570,6 +577,10 @@ static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_enable_remote_dev_reset_get,
mlx5_devlink_enable_remote_dev_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
+ DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -608,6 +619,16 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
value);
}
#endif
+
+ value.vu32 = MLX5_COMP_EQ_SIZE;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ value);
+
+ value.vu32 = MLX5_NUM_ASYNC_EQE;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ value);
}
static const struct devlink_param enable_eth_param =
@@ -752,6 +773,66 @@ static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
mlx5_devlink_eth_param_unregister(devlink);
}
+static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (val.vu32 == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(val.vu32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs");
+ return -EINVAL;
+ }
+
+ if (ilog2(val.vu32) >
+ MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param max_uc_list_param =
+ DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_max_uc_list_validate);
+
+static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return 0;
+
+ err = devlink_param_register(devlink, &max_uc_list_param);
+ if (err)
+ return err;
+
+ value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ return 0;
+}
+
+static void
+mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return;
+
+ devlink_param_unregister(devlink, &max_uc_list_param);
+}
+
#define MLX5_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
@@ -811,6 +892,10 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto auxdev_reg_err;
+ err = mlx5_devlink_max_uc_list_param_register(devlink);
+ if (err)
+ goto max_uc_list_err;
+
err = mlx5_devlink_traps_register(devlink);
if (err)
goto traps_reg_err;
@@ -821,6 +906,8 @@ int mlx5_devlink_register(struct devlink *devlink)
return 0;
traps_reg_err:
+ mlx5_devlink_max_uc_list_param_unregister(devlink);
+max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
@@ -831,6 +918,7 @@ auxdev_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
+ mlx5_devlink_max_uc_list_param_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f0ac6b0d9653..812e6810cb3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -145,7 +145,6 @@ struct page_pool;
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
-#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
@@ -173,7 +172,7 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+#define MLX5E_MAX_KLM_PER_WQE \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -783,6 +782,8 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
+ /* Sync between icosq recovery and XSK enable/disable. */
+ struct mutex icosq_recovery_lock;
};
struct mlx5e_ptp;
@@ -875,10 +876,8 @@ struct mlx5e_trap;
struct mlx5e_priv {
/* priv data path fields - start */
- /* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
- MLX5E_QOS_MAX_LEAF_NODES];
- int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq **txq2sq;
+ int **channel_tc2realtxq;
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
@@ -893,7 +892,7 @@ struct mlx5e_priv {
struct mlx5e_channels channels;
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
- u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ u32 *tx_rates;
struct mlx5e_flow_steering fs;
@@ -909,7 +908,7 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
- struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
u16 stats_nch;
@@ -956,6 +955,12 @@ struct mlx5e_rx_handlers {
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
+enum mlx5e_profile_feature {
+ MLX5E_PROFILE_FEATURE_PTP_RX,
+ MLX5E_PROFILE_FEATURE_PTP_TX,
+ MLX5E_PROFILE_FEATURE_QOS_HTB,
+};
+
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev);
@@ -969,14 +974,18 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ int (*max_nch_limit)(struct mlx5_core_dev *mdev);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
- bool rx_ptp_support;
+ u32 features;
};
+#define mlx5e_profile_feature_cap(profile, feature) \
+ ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
+
void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
@@ -1014,9 +1023,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
@@ -1057,7 +1063,6 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
@@ -1148,9 +1153,12 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
@@ -1186,8 +1194,7 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index d5b7110a4265..0107e4e73bb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index d290d7276b8d..b4f3bd7d346e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -20,7 +20,7 @@ mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
struct mlx5e_channel_stats *stats;
int tc;
- stats = &priv->channel_stats[ch];
+ stats = priv->channel_stats[ch];
data->rx_packets = stats->rq.packets;
data->rx_bytes = stats->rq.bytes;
@@ -120,14 +120,14 @@ static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent)
cancel_delayed_work_sync(&priv->stats_agent.work);
}
-int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
{
int buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
struct mlx5_hv_vhca_agent *agent;
priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL);
if (!priv->stats_agent.buf)
- return -ENOMEM;
+ return;
agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca,
MLX5_HV_VHCA_AGENT_STATS,
@@ -142,13 +142,11 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
PTR_ERR(agent));
kvfree(priv->stats_agent.buf);
- return IS_ERR_OR_NULL(agent);
+ return;
}
priv->stats_agent.agent = agent;
INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work);
-
- return 0;
}
void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
index 664463faf77b..29c8c6d3260f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
@@ -7,19 +7,12 @@
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
-int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
+void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv);
#else
-
-static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
-{
- return 0;
-}
-
-static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
-{
-}
+static inline void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv) {}
+static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv) {}
#endif
#endif /* __MLX5_EN_STATS_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
index 7edde4d536fd..17325c5d6516 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
@@ -155,3 +155,61 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
return mh->modify_hdr;
}
+char *
+mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ int new_num_actions, max_hw_actions;
+ size_t new_sz, old_sz;
+ void *ret;
+
+ if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
+ goto out;
+
+ max_hw_actions = mlx5e_mod_hdr_max_actions(mdev, namespace);
+ new_num_actions = min(max_hw_actions,
+ mod_hdr_acts->actions ?
+ mod_hdr_acts->max_actions * 2 : 1);
+ if (mod_hdr_acts->max_actions == new_num_actions)
+ return ERR_PTR(-ENOSPC);
+
+ new_sz = MLX5_MH_ACT_SZ * new_num_actions;
+ old_sz = mod_hdr_acts->max_actions * MLX5_MH_ACT_SZ;
+
+ if (mod_hdr_acts->is_static) {
+ ret = kzalloc(new_sz, GFP_KERNEL);
+ if (ret) {
+ memcpy(ret, mod_hdr_acts->actions, old_sz);
+ mod_hdr_acts->is_static = false;
+ }
+ } else {
+ ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
+ if (ret)
+ memset(ret + old_sz, 0, new_sz - old_sz);
+ }
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ mod_hdr_acts->actions = ret;
+ mod_hdr_acts->max_actions = new_num_actions;
+
+out:
+ return mod_hdr_acts->actions + (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+}
+
+void
+mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+ if (!mod_hdr_acts->is_static)
+ kfree(mod_hdr_acts->actions);
+
+ mod_hdr_acts->actions = NULL;
+ mod_hdr_acts->num_actions = 0;
+ mod_hdr_acts->max_actions = 0;
+}
+
+char *
+mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos)
+{
+ return mod_hdr_acts->actions + (pos * MLX5_MH_ACT_SZ);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
index 33b23d8f9182..b8dac418d0a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
@@ -7,14 +7,32 @@
#include <linux/hashtable.h>
#include <linux/mlx5/fs.h>
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
+
struct mlx5e_mod_hdr_handle;
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
+ bool is_static;
void *actions;
};
+#define DECLARE_MOD_HDR_ACTS_ACTIONS(name, len) \
+ u8 name[len][MLX5_MH_ACT_SZ] = {}
+
+#define DECLARE_MOD_HDR_ACTS(name, acts_arr) \
+ struct mlx5e_tc_mod_hdr_acts name = { \
+ .max_actions = ARRAY_SIZE(acts_arr), \
+ .is_static = true, \
+ .actions = acts_arr, \
+ }
+
+char *mlx5e_mod_hdr_alloc(struct mlx5_core_dev *mdev, int namespace,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void mlx5e_mod_hdr_dealloc(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+char *mlx5e_mod_hdr_get_item(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, int pos);
+
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
@@ -28,4 +46,12 @@ struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
+static inline int mlx5e_mod_hdr_max_actions(struct mlx5_core_dev *mdev, int namespace)
+{
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
+}
+
#endif /* __MLX5E_EN_MOD_HDR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index f8c29022dbb2..66180ffb4606 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -717,7 +717,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 18d542b1c5cb..82baafd3c00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -768,7 +768,7 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
@@ -783,7 +783,7 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
mlx5e_ptp_rx_unset_fs(priv);
@@ -794,7 +794,7 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
{
struct mlx5e_ptp *c = priv->channels.ptp;
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return 0;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 50977f01a050..00449df98a5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+#include <net/sch_generic.h>
#include "en.h"
#include "params.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index fcb0892c08a9..0991345c4ae5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -517,6 +517,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
+ if (!netdev)
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index d6c7c81690eb..7c9dd3a75f8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -66,7 +66,7 @@ mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
static inline void
mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
- struct sk_buff *skb) {}
+ struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
#endif /* CONFIG_MLX5_CLS_ACT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 74086eb556ae..2684e9da9f41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -62,6 +62,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
{
+ struct mlx5e_rq *xskrq = NULL;
struct mlx5_core_dev *mdev;
struct mlx5e_icosq *icosq;
struct net_device *dev;
@@ -70,7 +71,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
int err;
icosq = ctx;
+
+ mutex_lock(&icosq->channel->icosq_recovery_lock);
+
+ /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
rq = &icosq->channel->rq;
+ if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
+ xskrq = &icosq->channel->xskrq;
mdev = icosq->channel->mdev;
dev = icosq->channel->netdev;
err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
@@ -84,6 +91,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_deactivate_rq(rq);
+ if (xskrq)
+ mlx5e_deactivate_rq(xskrq);
+
err = mlx5e_wait_for_icosq_flush(icosq);
if (err)
goto out;
@@ -97,15 +107,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_reset_icosq_cc_pc(icosq);
+
mlx5e_free_rx_in_progress_descs(rq);
+ if (xskrq)
+ mlx5e_free_rx_in_progress_descs(xskrq);
+
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq);
- mlx5e_activate_rq(rq);
+ mlx5e_activate_rq(rq);
rq->stats->recover++;
+
+ if (xskrq) {
+ mlx5e_activate_rq(xskrq);
+ xskrq->stats->recover++;
+ }
+
+ mutex_unlock(&icosq->channel->icosq_recovery_lock);
+
return 0;
out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ mutex_unlock(&icosq->channel->icosq_recovery_lock);
return err;
}
@@ -706,6 +729,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
}
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
+{
+ mutex_lock(&c->icosq_recovery_lock);
+}
+
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
+{
+ mutex_unlock(&c->icosq_recovery_lock);
+}
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 4f4bc8726ec4..60bc5b577ab9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -466,6 +466,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
+
+ return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
+}
+
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
@@ -561,11 +569,11 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
to_ctx.sq = sq;
err_ctx.ctx = &to_ctx;
err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
- err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
- jiffies_to_usecs(jiffies - sq->txq->trans_start));
+ jiffies_to_usecs(jiffies - READ_ONCE(sq->txq->trans_start)));
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
return to_ctx.status;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 0015a81eb9a1..24c32f73040a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -37,7 +37,6 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
@@ -52,7 +51,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return -ENOMEM;
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- init_pkt_merge_param);
+ &res->pkt_merge_param);
if (err)
goto err_rss_free;
@@ -277,8 +276,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
}
-static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param)
+static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
@@ -309,7 +307,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -339,7 +337,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
@@ -454,11 +452,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->pkt_merge_param = *init_pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
- err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
+ err = mlx5e_rx_res_rss_init_def(res, init_nch);
if (err)
goto err_out;
- err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
+ err = mlx5e_rx_res_channels_init(res);
if (err)
goto err_rss_destroy;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
new file mode 100644
index 000000000000..b0de6b999675
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_accept = {
+ .can_offload = tc_act_can_offload_accept,
+ .parse_action = tc_act_parse_accept,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
new file mode 100644
index 000000000000..e600924e30ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "mlx5_core.h"
+
+/* Must be aligned with enum flow_action_id. */
+static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
+ &mlx5e_tc_act_accept,
+ &mlx5e_tc_act_drop,
+ &mlx5e_tc_act_trap,
+ &mlx5e_tc_act_goto,
+ &mlx5e_tc_act_mirred,
+ &mlx5e_tc_act_mirred,
+ &mlx5e_tc_act_redirect_ingress,
+ NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan,
+ &mlx5e_tc_act_vlan_mangle,
+ &mlx5e_tc_act_tun_encap,
+ &mlx5e_tc_act_tun_decap,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_csum,
+ NULL, /* FLOW_ACTION_MARK, */
+ &mlx5e_tc_act_ptype,
+ NULL, /* FLOW_ACTION_PRIORITY, */
+ NULL, /* FLOW_ACTION_WAKE, */
+ NULL, /* FLOW_ACTION_QUEUE, */
+ &mlx5e_tc_act_sample,
+ NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_ct,
+ NULL, /* FLOW_ACTION_CT_METADATA, */
+ &mlx5e_tc_act_mpls_push,
+ &mlx5e_tc_act_mpls_pop,
+};
+
+/* Must be aligned with enum flow_action_id. */
+static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
+ &mlx5e_tc_act_accept,
+ &mlx5e_tc_act_drop,
+ NULL, /* FLOW_ACTION_TRAP, */
+ &mlx5e_tc_act_goto,
+ &mlx5e_tc_act_mirred_nic,
+ NULL, /* FLOW_ACTION_MIRRED, */
+ NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
+ NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
+ NULL, /* FLOW_ACTION_VLAN_PUSH, */
+ NULL, /* FLOW_ACTION_VLAN_POP, */
+ NULL, /* FLOW_ACTION_VLAN_MANGLE, */
+ NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
+ NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_pedit,
+ &mlx5e_tc_act_csum,
+ &mlx5e_tc_act_mark,
+ NULL, /* FLOW_ACTION_PTYPE, */
+ NULL, /* FLOW_ACTION_PRIORITY, */
+ NULL, /* FLOW_ACTION_WAKE, */
+ NULL, /* FLOW_ACTION_QUEUE, */
+ NULL, /* FLOW_ACTION_SAMPLE, */
+ NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_ct,
+};
+
+/**
+ * mlx5e_tc_act_get() - Get an action parser for an action id.
+ * @act_id: Flow action id.
+ * @ns_type: flow namespace type.
+ */
+struct mlx5e_tc_act *
+mlx5e_tc_act_get(enum flow_action_id act_id,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_act **tc_acts;
+
+ tc_acts = ns_type == MLX5_FLOW_NAMESPACE_FDB ? tc_acts_fdb : tc_acts_nic;
+
+ return tc_acts[act_id];
+}
+
+/**
+ * mlx5e_tc_act_init_parse_state() - Init a new parse_state.
+ * @parse_state: Parsing state.
+ * @flow: mlx5e tc flow being handled.
+ * @flow_action: flow action to parse.
+ * @extack: to set an error msg.
+ *
+ * The same parse_state should be passed to action parsers
+ * for tracking the current parsing state.
+ */
+void
+mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_tc_flow *flow,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ memset(parse_state, 0, sizeof(*parse_state));
+ parse_state->flow = flow;
+ parse_state->num_actions = flow_action->num_entries;
+ parse_state->extack = extack;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
new file mode 100644
index 000000000000..26efa33de56f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_H__
+#define __MLX5_EN_TC_ACT_H__
+
+#include <net/tc_act/tc_pedit.h>
+#include <net/flow_offload.h>
+#include <linux/netlink.h>
+#include "eswitch.h"
+#include "pedit.h"
+
+struct mlx5_flow_attr;
+
+struct mlx5e_tc_act_parse_state {
+ unsigned int num_actions;
+ struct mlx5e_tc_flow *flow;
+ struct netlink_ext_ack *extack;
+ bool encap;
+ bool decap;
+ bool mpls_push;
+ bool ptype_host;
+ const struct ip_tunnel_info *tun_info;
+ struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
+ int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
+ int if_count;
+ struct mlx5_tc_ct_priv *ct_priv;
+};
+
+struct mlx5e_tc_act {
+ bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index);
+
+ int (*parse_action)(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr);
+
+ int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr);
+};
+
+extern struct mlx5e_tc_act mlx5e_tc_act_drop;
+extern struct mlx5e_tc_act mlx5e_tc_act_trap;
+extern struct mlx5e_tc_act mlx5e_tc_act_accept;
+extern struct mlx5e_tc_act mlx5e_tc_act_mark;
+extern struct mlx5e_tc_act mlx5e_tc_act_goto;
+extern struct mlx5e_tc_act mlx5e_tc_act_tun_encap;
+extern struct mlx5e_tc_act mlx5e_tc_act_tun_decap;
+extern struct mlx5e_tc_act mlx5e_tc_act_csum;
+extern struct mlx5e_tc_act mlx5e_tc_act_pedit;
+extern struct mlx5e_tc_act mlx5e_tc_act_vlan;
+extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
+extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
+extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
+extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
+extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
+extern struct mlx5e_tc_act mlx5e_tc_act_ct;
+extern struct mlx5e_tc_act mlx5e_tc_act_sample;
+extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
+extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+
+struct mlx5e_tc_act *
+mlx5e_tc_act_get(enum flow_action_id act_id,
+ enum mlx5_flow_namespace_type ns_type);
+
+void
+mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_tc_flow *flow,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
new file mode 100644
index 000000000000..29920ef0180a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/tc_act/tc_csum.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+csum_offload_supported(struct mlx5e_priv *priv,
+ u32 action,
+ u32 update_flags,
+ struct netlink_ext_ack *extack)
+{
+ u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
+ TCA_CSUM_UPDATE_FLAG_UDP;
+
+ /* The HW recalcs checksums only if re-writing headers */
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TC csum action is only offloaded with pedit");
+ netdev_warn(priv->netdev,
+ "TC csum action is only offloaded with pedit\n");
+ return false;
+ }
+
+ if (update_flags & ~prot_flags) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't offload TC csum action for some header/s");
+ netdev_warn(priv->netdev,
+ "can't offload TC csum action for some header/s - flags %#x\n",
+ update_flags);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+tc_act_can_offload_csum(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ return csum_offload_supported(flow->priv, flow->attr->action,
+ act->csum_flags, parse_state->extack);
+}
+
+static int
+tc_act_parse_csum(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_csum = {
+ .can_offload = tc_act_can_offload_csum,
+ .parse_action = tc_act_parse_csum,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
new file mode 100644
index 000000000000..06ec30cdb269
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "en/tc_ct.h"
+
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (flow_flag_test(parse_state->flow, SAMPLE)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sample action with connection tracking is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
+ &attr->parse_attr->mod_hdr_acts,
+ act, parse_state->extack);
+ if (err)
+ return err;
+
+ flow_flag_set(parse_state->flow, CT);
+
+ if (mlx5e_is_eswitch_flow(parse_state->flow))
+ attr->esw_attr->split_count = attr->esw_attr->out_count;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
+ .parse_action = tc_act_parse_ct,
+};
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
new file mode 100644
index 000000000000..2e29a23bed12
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_drop = {
+ .can_offload = tc_act_can_offload_drop,
+ .parse_action = tc_act_parse_drop,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
new file mode 100644
index 000000000000..f44515061228
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+#include "eswitch.h"
+
+static int
+validate_goto_chain(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ bool is_esw = mlx5e_is_eswitch_flow(flow);
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_eswitch *esw;
+ u32 reformat_and_fwd;
+ u32 max_chain;
+
+ esw = priv->mdev->priv.eswitch;
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ max_chain = mlx5_chains_get_chain_range(chains);
+ reformat_and_fwd = is_esw ?
+ MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_chains_backwards_supported(chains) &&
+ dest_chain <= flow->attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !reformat_and_fwd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ if (validate_goto_chain(flow->priv, flow, act, extack))
+ return false;
+
+ return true;
+}
+
+static int
+tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = act->chain_index;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+
+ if (!attr->dest_chain)
+ return 0;
+
+ if (parse_state->decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG_MOD(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5e_is_eswitch_flow(flow) && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_goto = {
+ .can_offload = tc_act_can_offload_goto,
+ .parse_action = tc_act_parse_goto,
+ .post_parse = tc_act_post_parse_goto,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
new file mode 100644
index 000000000000..d775c3d9edf3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en_tc.h"
+
+static bool
+tc_act_can_offload_mark(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ if (act->mark & ~MLX5E_TC_FLOW_ID_MASK) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack, "Bad flow mark, only 16 bit supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mark(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->nic_attr->flow_tag = act->mark;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mark = {
+ .can_offload = tc_act_can_offload_mark,
+ .parse_action = tc_act_parse_mark,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
new file mode 100644
index 000000000000..c614fc7fdc9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
+#include <net/bareudp.h>
+#include <net/bonding.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_tun_encap.h"
+#include "en/tc_priv.h"
+#include "en_rep.h"
+
+static bool
+same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev)
+{
+ return mlx5e_eswitch_vf_rep(priv->netdev) &&
+ priv->netdev == out_dev;
+}
+
+static int
+verify_uplink_forwarding(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rep_priv;
+
+ /* Forwarding non encapsulated traffic between
+ * uplink ports is allowed only if
+ * termination_table_raw_traffic cap is set.
+ *
+ * Input vport was stored attr->in_rep.
+ * In LAG case, *priv* is the private data of
+ * uplink which may be not the input vport.
+ */
+ rep_priv = mlx5e_rep_to_rep_priv(attr->esw_attr->in_rep);
+
+ if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
+ mlx5e_eswitch_uplink_rep(out_dev)))
+ return 0;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
+ termination_table_raw_traffic)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are both uplink, can't offload forwarding");
+ return -EOPNOTSUPP;
+ } else if (out_dev != rep_priv->netdev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not the same uplink, can't offload forwarding");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static bool
+is_duplicated_output_device(struct net_device *dev,
+ struct net_device *out_dev,
+ int *ifindexes, int if_count,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < if_count; i++) {
+ if (ifindexes[i] == out_dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate output to same device");
+ netdev_err(dev, "can't duplicate output to same device: %s\n",
+ out_dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct net_device *
+get_fdb_out_dev(struct net_device *uplink_dev, struct net_device *out_dev)
+{
+ struct net_device *fdb_out_dev = out_dev;
+ struct net_device *uplink_upper;
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ if (uplink_upper && netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev) {
+ fdb_out_dev = uplink_dev;
+ } else if (netif_is_lag_master(out_dev)) {
+ fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
+ if (fdb_out_dev &&
+ (!mlx5e_eswitch_rep(fdb_out_dev) ||
+ !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
+ fdb_out_dev = NULL;
+ }
+ rcu_read_unlock();
+ return fdb_out_dev;
+}
+
+static bool
+tc_act_can_offload_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct net_device *out_dev = act->dev;
+ struct mlx5e_priv *priv = flow->priv;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ parse_attr = flow->attr->parse_attr;
+ esw_attr = flow->attr->esw_attr;
+
+ if (!out_dev) {
+ /* out_dev is NULL when filters with
+ * non-existing mirred device are replayed to
+ * the driver.
+ */
+ return false;
+ }
+
+ if (parse_state->mpls_push && !netif_is_bareudp(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls is supported only through a bareudp device");
+ return false;
+ }
+
+ if (mlx5e_is_ft_flow(flow) && out_dev == priv->netdev) {
+ /* Ignore forward to self rules generated
+ * by adding both mlx5 devs to the flow table
+ * block on a normal nft offload setup.
+ */
+ return false;
+ }
+
+ if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't support more output ports, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "can't support more than %d output ports, can't offload forwarding\n",
+ esw_attr->out_count);
+ return false;
+ }
+
+ if (parse_state->encap ||
+ netdev_port_same_parent_id(priv->netdev, out_dev) ||
+ netif_is_ovs_master(out_dev))
+ return true;
+
+ if (parse_attr->filter_dev != priv->netdev) {
+ /* All mlx5 devices are called to configure
+ * high level device filters. Therefore, the
+ * *attempt* to install a filter on invalid
+ * eswitch should not trigger an explicit error
+ */
+ return false;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding");
+
+ return false;
+}
+
+static int
+parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+
+ parse_attr->mirred_ifindex[esw_attr->out_count] = out_dev->ifindex;
+ parse_attr->tun_info[esw_attr->out_count] =
+ mlx5e_dup_tun_info(parse_state->tun_info);
+
+ if (!parse_attr->tun_info[esw_attr->out_count])
+ return -ENOMEM;
+
+ parse_state->encap = false;
+ esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
+ esw_attr->out_count++;
+ /* attr->dests[].rep is resolved when we handle encap */
+
+ return 0;
+}
+
+static int
+parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct net_device *out_dev = act->dev;
+ struct net_device *uplink_dev;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch *esw;
+ int *ifindexes;
+ int if_count;
+ int err;
+
+ esw = priv->mdev->priv.eswitch;
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ ifindexes = parse_state->ifindexes;
+ if_count = parse_state->if_count;
+
+ if (is_duplicated_output_device(priv->netdev, out_dev, ifindexes, if_count, extack))
+ return -EOPNOTSUPP;
+
+ parse_state->ifindexes[if_count] = out_dev->ifindex;
+ parse_state->if_count++;
+
+ out_dev = get_fdb_out_dev(uplink_dev, out_dev);
+ if (!out_dev)
+ return -ENODEV;
+
+ if (is_vlan_dev(out_dev)) {
+ err = mlx5e_tc_act_vlan_add_push_action(priv, attr, &out_dev, extack);
+ if (err)
+ return err;
+ }
+
+ if (is_vlan_dev(parse_attr->filter_dev)) {
+ err = mlx5e_tc_act_vlan_add_pop_action(priv, attr, extack);
+ if (err)
+ return err;
+ }
+
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
+ err = verify_uplink_forwarding(priv, attr, out_dev, extack);
+ if (err)
+ return err;
+
+ if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ return -EOPNOTSUPP;
+ }
+
+ if (same_vf_reps(priv, out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "can't forward from a VF to itself");
+ return -EOPNOTSUPP;
+ }
+
+ out_priv = netdev_priv(out_dev);
+ rpriv = out_priv->ppriv;
+ esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+ esw_attr->out_count++;
+
+ return 0;
+}
+
+static int
+parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+ int err;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &attr->action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+ return 0;
+}
+
+static int
+tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct net_device *out_dev = act->dev;
+ int err = -EOPNOTSUPP;
+
+ if (parse_state->encap)
+ err = parse_mirred_encap(parse_state, act, attr);
+ else if (netdev_port_same_parent_id(priv->netdev, out_dev))
+ err = parse_mirred(parse_state, act, priv, attr);
+ else if (netif_is_ovs_master(out_dev))
+ err = parse_mirred_ovs_master(parse_state, act, priv, attr);
+
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mirred = {
+ .can_offload = tc_act_can_offload_mirred,
+ .parse_action = tc_act_parse_mirred,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
new file mode 100644
index 000000000000..2c74567b6d25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct net_device *out_dev = act->dev;
+ struct mlx5e_priv *priv = flow->priv;
+
+ if (act->id != FLOW_ACTION_REDIRECT)
+ return false;
+
+ if (priv->netdev->netdev_ops != out_dev->netdev_ops ||
+ !mlx5e_same_hw_devs(priv, netdev_priv(out_dev))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "devices are not on same switch HW, can't offload forwarding");
+ netdev_warn(priv->netdev,
+ "devices %s %s not on same switch HW, can't offload forwarding\n",
+ netdev_name(priv->netdev),
+ out_dev->name);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
+ flow_flag_set(parse_state->flow, HAIRPIN);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
+ .can_offload = tc_act_can_offload_mirred_nic,
+ .parse_action = tc_act_parse_mirred_nic,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
new file mode 100644
index 000000000000..784fc4f68b1e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <net/bareudp.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_priv *priv = parse_state->flow->priv;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_l2_to_l3_tunnel) ||
+ act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls push is supported only for mpls_uc protocol");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->mpls_push = true;
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct net_device *filter_dev;
+
+ filter_dev = flow->attr->parse_attr->filter_dev;
+
+ /* we only support mpls pop if it is the first action
+ * and the filter net device is bareudp. Subsequent
+ * actions can be pedit and the last can be mirred
+ * egress redirect.
+ */
+ if (act_index) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only as first action");
+ return false;
+ }
+
+ if (!netif_is_bareudp(filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_mpls_pop(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->parse_attr->eth.h_proto = act->mpls_pop.proto;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_flag_set(parse_state->flow, L3_TO_L2_DECAP);
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_mpls_push = {
+ .can_offload = tc_act_can_offload_mpls_push,
+ .parse_action = tc_act_parse_mpls_push,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_mpls_pop = {
+ .can_offload = tc_act_can_offload_mpls_pop,
+ .parse_action = tc_act_parse_mpls_pop,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
new file mode 100644
index 000000000000..79addbbef087
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "pedit.h"
+#include "en/tc_priv.h"
+#include "en/mod_hdr.h"
+
+static int pedit_header_offsets[] = {
+ [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
+ [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
+ [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
+ [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
+};
+
+#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
+
+static int
+set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ u32 *curr_pmask, *curr_pval;
+
+ curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
+ curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
+
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
+ goto out_err;
+ }
+
+ *curr_pmask |= mask;
+ *curr_pval |= (val & mask);
+
+ return 0;
+
+out_err:
+ return -EOPNOTSUPP;
+}
+
+static int
+parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
+{
+ u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
+ u8 htype = act->mangle.htype;
+ int err = -EOPNOTSUPP;
+ u32 mask, val, offset;
+
+ if (htype == FLOW_ACT_MANGLE_UNSPEC) {
+ NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
+ goto out_err;
+ }
+
+ if (!mlx5e_mod_hdr_max_actions(priv->mdev, namespace)) {
+ NL_SET_ERR_MSG_MOD(extack, "The pedit offload action is not supported");
+ goto out_err;
+ }
+
+ mask = act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
+ if (err)
+ goto out_err;
+
+ hdrs[cmd].pedits++;
+
+ return 0;
+out_err:
+ return err;
+}
+
+static int
+parse_pedit_to_reformat(const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask, val, offset;
+ u32 *p;
+
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
+ return -EOPNOTSUPP;
+ }
+
+ mask = ~act->mangle.mask;
+ val = act->mangle.val;
+ offset = act->mangle.offset;
+ p = (u32 *)&parse_attr->eth;
+ *(p + (offset >> 2)) |= (val & mask);
+
+ return 0;
+}
+
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
+ return parse_pedit_to_reformat(act, parse_attr, extack);
+
+ return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack);
+}
+
+static bool
+tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+
+ err = mlx5e_tc_act_pedit_parse_action(flow->priv, act, ns_type,
+ attr->parse_attr, parse_state->hdrs,
+ flow, parse_state->extack);
+ if (err)
+ return err;
+
+ if (flow_flag_test(flow, L3_TO_L2_DECAP))
+ goto out;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ esw_attr->split_count = esw_attr->out_count;
+
+out:
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_pedit = {
+ .can_offload = tc_act_can_offload_pedit,
+ .parse_action = tc_act_parse_pedit,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
new file mode 100644
index 000000000000..da8ab03af58f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_PEDIT_H__
+#define __MLX5_EN_TC_ACT_PEDIT_H__
+
+#include "en_tc.h"
+
+struct pedit_headers {
+ struct ethhdr eth;
+ struct vlan_hdr vlan;
+ struct iphdr ip4;
+ struct ipv6hdr ip6;
+ struct tcphdr tcp;
+ struct udphdr udp;
+};
+
+struct pedit_headers_action {
+ struct pedit_headers vals;
+ struct pedit_headers masks;
+ u32 pedits;
+};
+
+int
+mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act, int namespace,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_PEDIT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
new file mode 100644
index 000000000000..0819110193dc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (act->ptype != PACKET_HOST) {
+ NL_SET_ERR_MSG_MOD(extack, "skbedit ptype is only supported with type host");
+ return -EOPNOTSUPP;
+ }
+
+ parse_state->ptype_host = true;
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_ptype = {
+ .can_offload = tc_act_can_offload_ptype,
+ .parse_action = tc_act_parse_ptype,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
new file mode 100644
index 000000000000..1c32e24e528d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct net_device *out_dev = act->dev;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ parse_attr = flow->attr->parse_attr;
+ esw_attr = flow->attr->esw_attr;
+
+ if (!out_dev)
+ return false;
+
+ if (!netif_is_ovs_master(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is supported only for OVS internal ports");
+ return false;
+ }
+
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is not supported from internal port");
+ return false;
+ }
+
+ if (!parse_state->ptype_host) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress requires ptype=host action");
+ return false;
+ }
+
+ if (esw_attr->out_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress is supported only as single destination");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *out_dev = act->dev;
+ int err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS,
+ &attr->action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress = {
+ .can_offload = tc_act_can_offload_redirect_ingress,
+ .parse_action = tc_act_parse_redirect_ingress,
+};
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
new file mode 100644
index 000000000000..6699bdf5cf01
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <net/psample.h>
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (flow_flag_test(parse_state->flow, CT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sample action with connection tracking is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_sample_attr *sample_attr;
+
+ sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
+ if (!sample_attr)
+ return -ENOMEM;
+
+ sample_attr->rate = act->sample.rate;
+ sample_attr->group_num = act->sample.psample_group->group_num;
+
+ if (act->sample.truncate)
+ sample_attr->trunc_size = act->sample.trunc_size;
+
+ attr->sample_attr = sample_attr;
+ flow_flag_set(parse_state->flow, SAMPLE);
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_sample = {
+ .can_offload = tc_act_can_offload_sample,
+ .parse_action = tc_act_parse_sample,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
new file mode 100644
index 000000000000..046b64c2cec4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+
+ if (parse_state->num_actions != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_trap = {
+ .can_offload = tc_act_can_offload_trap,
+ .parse_action = tc_act_parse_trap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
new file mode 100644
index 000000000000..6f4a2cf46afd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_tun_encap.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ if (!act->tunnel) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Zero tunnel attributes is not supported");
+ return false;
+ }
+
+ return true;
+}
+
+static int
+tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->tun_info = act->tunnel;
+ parse_state->encap = true;
+
+ return 0;
+}
+
+static bool
+tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ parse_state->decap = true;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
+ .can_offload = tc_act_can_offload_tun_encap,
+ .parse_action = tc_act_parse_tun_encap,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
+ .can_offload = tc_act_can_offload_tun_decap,
+ .parse_action = tc_act_parse_tun_decap,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
new file mode 100644
index 000000000000..70fc0c2d8813
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_priv.h"
+
+static int
+add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry prio_tag_act = {
+ .vlan.vid = 0,
+ .vlan.prio =
+ MLX5_GET(fte_match_set_lyr_2_4,
+ mlx5e_get_match_headers_value(*action,
+ &parse_attr->spec),
+ first_prio) &
+ MLX5_GET(fte_match_set_lyr_2_4,
+ mlx5e_get_match_headers_criteria(*action,
+ &parse_attr->spec),
+ first_prio),
+ };
+
+ return mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+ &prio_tag_act, parse_attr, hdrs, action,
+ extack);
+}
+
+static int
+parse_tc_vlan_action(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action,
+ struct netlink_ext_ack *extack)
+{
+ u8 vlan_idx = attr->total_vlan;
+
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (act->id) {
+ case FLOW_ACTION_VLAN_POP:
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
+ } else {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ attr->vlan_vid[vlan_idx] = act->vlan.vid;
+ attr->vlan_prio[vlan_idx] = act->vlan.prio;
+ attr->vlan_proto[vlan_idx] = act->vlan.proto;
+ if (!attr->vlan_proto[vlan_idx])
+ attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
+
+ if (vlan_idx) {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
+ } else {
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
+ (act->vlan.proto != htons(ETH_P_8021Q) ||
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ }
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
+ return -EINVAL;
+ }
+
+ attr->total_vlan = vlan_idx + 1;
+
+ return 0;
+}
+
+int
+mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device **out_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
+ if (err)
+ return err;
+
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
+ if (is_vlan_dev(*out_dev))
+ err = mlx5e_tc_act_vlan_add_push_action(priv, attr, out_dev, extack);
+
+ return err;
+}
+
+int
+mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+ int nest_level, err = 0;
+
+ nest_level = attr->parse_attr->filter_dev->lower_level -
+ priv->netdev->lower_level;
+ while (nest_level--) {
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static bool
+tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int err;
+
+ if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ /* Replace vlan pop+push with vlan modify */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB, act,
+ attr->parse_attr, parse_state->hdrs,
+ &attr->action, parse_state->extack);
+ } else {
+ err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
+ parse_state->extack);
+ }
+
+ if (err)
+ return err;
+
+ esw_attr->split_count = esw_attr->out_count;
+
+ return 0;
+}
+
+static int
+tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct pedit_headers_action *hdrs = parse_state->hdrs;
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
+
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ /* For prio tag mode, replace vlan pop with rewrite vlan prio
+ * tag rewrite.
+ */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_vlan = {
+ .can_offload = tc_act_can_offload_vlan,
+ .parse_action = tc_act_parse_vlan,
+ .post_parse = tc_act_post_parse_vlan,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
new file mode 100644
index 000000000000..3d62f13ab61f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_VLAN_H__
+#define __MLX5_EN_TC_ACT_VLAN_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+struct pedit_headers_action;
+
+int
+mlx5e_tc_act_vlan_add_push_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct net_device **out_dev,
+ struct netlink_ext_ack *extack);
+
+int
+mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct netlink_ext_ack *extack);
+
+int
+mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack);
+
+#endif /* __MLX5_EN_TC_ACT_VLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
new file mode 100644
index 000000000000..63e36e7f53e3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/if_vlan.h>
+#include "act.h"
+#include "vlan.h"
+#include "en/tc_priv.h"
+
+struct pedit_headers_action;
+
+int
+mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ u16 mask16 = VLAN_VID_MASK;
+ u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ const struct flow_action_entry pedit_act = {
+ .id = FLOW_ACTION_MANGLE,
+ .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ };
+ u8 match_prio_mask, match_prio_val;
+ void *headers_c, *headers_v;
+ int err;
+
+ headers_c = mlx5e_get_match_headers_criteria(*action, &parse_attr->spec);
+ headers_v = mlx5e_get_match_headers_value(*action, &parse_attr->spec);
+
+ if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN rewrite action must have VLAN protocol match");
+ return -EOPNOTSUPP;
+ }
+
+ match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing VLAN prio is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlx5e_tc_act_pedit_parse_action(priv, &pedit_act, namespace, parse_attr, hdrs,
+ NULL, extack);
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return err;
+}
+
+static bool
+tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index)
+{
+ return true;
+}
+
+static int
+tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ ns_type = mlx5e_get_flow_namespace(parse_state->flow);
+ err = mlx5e_tc_act_vlan_add_rewrite_action(priv, ns_type, act,
+ attr->parse_attr, parse_state->hdrs,
+ &attr->action, parse_state->extack);
+ if (err)
+ return err;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ attr->esw_attr->split_count = attr->esw_attr->out_count;
+
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
+ .can_offload = tc_act_can_offload_vlan_mangle,
+ .parse_action = tc_act_parse_vlan_mangle,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index df6888c4793c..ff4b4f8a5a9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
#include "en_tc.h"
@@ -255,12 +256,12 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
goto err_modify_hdr;
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
return modify_hdr;
err_modify_hdr:
err_post_act:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 2445e2ae3324..4a0d38d219ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/refcount.h>
#include <linux/xarray.h>
+#include <linux/if_macvlan.h>
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
@@ -36,6 +37,12 @@
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+/* Statically allocate modify actions for
+ * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
+ * This will be increased dynamically if needed (for the ipv6 snat + dnat).
+ */
+#define MLX5_CT_MIN_MOD_ACTS 10
+
#define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
@@ -320,7 +327,33 @@ mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
}
static int
-mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
+mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv *ct_priv,
+ struct net_device *ndev)
+{
+ struct mlx5e_priv *other_priv = netdev_priv(ndev);
+ struct mlx5_core_dev *mdev = ct_priv->dev;
+ bool vf_rep, uplink_rep;
+
+ vf_rep = mlx5e_eswitch_vf_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
+ uplink_rep = mlx5e_eswitch_uplink_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
+
+ if (vf_rep)
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ if (uplink_rep)
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ if (is_vlan_dev(ndev))
+ return mlx5_tc_ct_get_flow_source_match(ct_priv, vlan_dev_real_dev(ndev));
+ if (netif_is_macvlan(ndev))
+ return mlx5_tc_ct_get_flow_source_match(ct_priv, macvlan_dev_real_dev(ndev));
+ if (mlx5e_get_tc_tun(ndev) || netif_is_lag_master(ndev))
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+
+ return MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT;
+}
+
+static int
+mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_spec *spec,
struct flow_rule *rule)
{
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -335,8 +368,7 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
flow_rule_match_basic(rule, &match);
- mlx5e_tc_set_ethertype(priv->mdev, &match, true, headers_c,
- headers_v);
+ mlx5e_tc_set_ethertype(ct_priv->dev, &match, true, headers_c, headers_v);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
@@ -432,6 +464,23 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
ntohs(match.key->flags));
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (match.key->ingress_ifindex & match.mask->ingress_ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ if (dev && MLX5_CAP_ESW_FLOWTABLE(ct_priv->dev, flow_source))
+ spec->flow_context.flow_source =
+ mlx5_tc_ct_get_flow_source_match(ct_priv, dev);
+
+ dev_put(dev);
+ }
+ }
+
return 0;
}
@@ -609,22 +658,15 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
struct flow_action *flow_action = &flow_rule->action;
struct mlx5_core_dev *mdev = ct_priv->dev;
struct flow_action_entry *act;
- size_t action_size;
char *modact;
int err, i;
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_MANGLE: {
- err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
- mod_acts);
- if (err)
- return err;
-
- modact = mod_acts->actions +
- mod_acts->num_actions * action_size;
+ modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts);
+ if (IS_ERR(modact))
+ return PTR_ERR(modact);
err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
if (err)
@@ -652,7 +694,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
+ DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
struct flow_action_entry *meta;
u16 ct_state = 0;
int err;
@@ -706,11 +749,11 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
return 0;
err_mapping:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err;
}
@@ -770,7 +813,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
attr->esw_attr->in_mdev = priv->mdev;
- mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
+ mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
@@ -907,12 +950,9 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_tuple rev_tuple = entry->tuple;
struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
- __be16 tmp_port;
/* get the reversed tuple */
- tmp_port = rev_tuple.port.src;
- rev_tuple.port.src = rev_tuple.port.dst;
- rev_tuple.port.dst = tmp_port;
+ swap(rev_tuple.port.src, rev_tuple.port.dst);
if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
__be32 tmp_addr = rev_tuple.ip.src_v4;
@@ -1460,7 +1500,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
}
pre_ct->miss_rule = rule;
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
kvfree(spec);
return 0;
@@ -1469,7 +1509,7 @@ err_miss_rule:
err_flow_rule:
mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
err_mapping:
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
kvfree(spec);
return err;
}
@@ -1865,14 +1905,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- dealloc_mod_hdr_actions(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(&pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index b689701ac7d8..f832c26ff2c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -5,11 +5,14 @@
#define __MLX5_EN_TC_PRIV_H__
#include "en_tc.h"
+#include "en/tc/act/act.h"
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
#define MLX5E_TC_MAX_SPLITS 1
+#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
+
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
@@ -37,6 +40,7 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
struct ethhdr eth;
+ struct mlx5e_tc_act_parse_state parse_state;
};
/* Helper struct for accessing a struct containing list_head array.
@@ -115,7 +119,11 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
+bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
+int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow);
+bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
{
@@ -176,4 +184,8 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+
+void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
+void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index a5e450973225..378fc8e3bd97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
+#include <net/inet_ecn.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -103,7 +104,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
}
static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
+ struct net_device *dev,
struct mlx5e_tc_tun_route_attr *attr)
{
struct net_device *route_dev;
@@ -122,13 +123,13 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
} else {
- struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
if (tunnel && tunnel->get_remote_ifindex)
- attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(dev);
}
- rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
+ rt = ip_route_output_key(dev_net(dev), &attr->fl.fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -235,7 +236,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -350,7 +351,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -440,10 +441,10 @@ release_neigh:
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
+ struct net_device *dev,
struct mlx5e_tc_tun_route_attr *attr)
{
- struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
@@ -451,8 +452,8 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
int ret;
if (tunnel && tunnel->get_remote_ifindex)
- attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(dev);
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -708,7 +709,8 @@ release_neigh:
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *flow_attr)
+ struct mlx5_flow_attr *flow_attr,
+ struct net_device *filter_dev)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
struct mlx5e_tc_int_port *int_port;
@@ -720,14 +722,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
/* Addresses are swapped for decap */
attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
- err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
+ err = mlx5e_route_lookup_ipv4_get(priv, filter_dev, &attr);
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
else if (flow_attr->tun_ip_version == 6) {
/* Addresses are swapped for decap */
attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
- err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr);
+ err = mlx5e_route_lookup_ipv6_get(priv, filter_dev, &attr);
}
#endif
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index aa092eaeaec3..b38f693bbb52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -94,7 +94,8 @@ mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
#endif
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct mlx5_flow_attr *attr);
+ struct mlx5_flow_attr *attr,
+ struct net_device *filter_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 042b1abe1437..9918ed8c059b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1159,7 +1159,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
tbl_time_after = tbl_time_before;
- err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr);
+ err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr, parse_attr->filter_dev);
if (err || !esw_attr->rx_tun_attr->decap_vport)
goto out;
@@ -1480,7 +1480,7 @@ static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
spec = &parse_attr->spec;
- err = mlx5e_tc_tun_route_lookup(priv, spec, attr);
+ err = mlx5e_tc_tun_route_lookup(priv, spec, attr, parse_attr->filter_dev);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
err);
@@ -1579,6 +1579,8 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
struct net_device *fib_dev;
fen_info = container_of(info, struct fib_entry_notifier_info, info);
+ if (fen_info->fi->nh)
+ return NULL;
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 2f0df5cc1a2d..338d65e2c9ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -151,7 +151,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
rq->stats->xdp_redirect++;
return true;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rq->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
xdp_abort:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 7b562d2c8a19..279cd8f4e79f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
{
struct device *dev = mlx5_core_dma_dev(priv->mdev);
- return xsk_pool_dma_map(pool, dev, 0);
+ return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
}
static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool)
{
- return xsk_pool_dma_unmap(pool, 0);
+ return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
}
static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 538bc2419bd8..25eac9e20342 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -4,6 +4,7 @@
#include "setup.h"
#include "en/params.h"
#include "en/txrx.h"
+#include "en/health.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
@@ -67,7 +68,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = pool;
- rq->stats = &c->priv->channel_stats[c->ix].xskrq;
+ rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
err = mlx5e_rq_set_handlers(rq, params, xsk);
@@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
+ /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
+ * activating XSKRQ in the middle of recovery.
+ */
+ mlx5e_reporter_icosq_suspend_recovery(c);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ mlx5e_reporter_icosq_resume_recovery(c);
+
/* TX queue is created active. */
spin_lock_bh(&c->async_icosq_lock);
@@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
- mlx5e_deactivate_rq(&c->xskrq);
+ /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
+ * middle of recovery. Suspend the recovery to avoid it.
+ */
+ mlx5e_reporter_icosq_suspend_recovery(c);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ mlx5e_reporter_icosq_resume_recovery(c);
+ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+
/* TX queue is disabled on close. */
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 15711814d2d2..96064a2033f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -611,7 +611,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->rxq = rxq;
priv_rx->sk = sk;
- priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+ priv_rx->rq_stats = &priv->channel_stats[rxq]->rq;
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index fe5d82fa6e92..49cca6bd49a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -556,7 +556,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
+ priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
mlx5e_dbg(HW, priv,
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
__func__, arfs_rule->filter_id, arfs_rule->rxq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c2ea5fad48dd..57d755db1cf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -314,7 +314,9 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
}
static void mlx5e_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -380,7 +382,9 @@ unlock:
}
static int mlx5e_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -511,7 +515,8 @@ static int mlx5e_set_channels(struct net_device *dev,
}
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
struct dim_cq_moder *rx_moder, *tx_moder;
@@ -528,6 +533,11 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
+ kernel_coal->use_cqe_mode_rx =
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
+ kernel_coal->use_cqe_mode_tx =
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
return 0;
}
@@ -538,7 +548,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
@@ -578,14 +588,26 @@ mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
}
}
+/* convert a boolean value of cq_mode to mlx5 period mode
+ * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
+ * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
+ */
+static int cqe_mode_to_period_mode(bool val)
+{
+ return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal)
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
bool reset = true;
+ u8 cq_period_mode;
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
@@ -605,6 +627,12 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
return -ERANGE;
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
+ NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
@@ -621,6 +649,18 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
+ if (cq_period_mode != rx_moder->cq_period_mode) {
+ mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
+ reset_rx = true;
+ }
+
+ cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
+ if (cq_period_mode != tx_moder->cq_period_mode) {
+ mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
+ reset_tx = true;
+ }
+
if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
@@ -656,9 +696,9 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
@@ -1843,24 +1883,19 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_params new_params;
- bool mode_changed;
u8 cq_period_mode, current_cq_period_mode;
+ struct mlx5e_params new_params;
+
+ if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
+ return -EOPNOTSUPP;
+
+ cq_period_mode = cqe_mode_to_period_mode(enable);
- cq_period_mode = enable ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
priv->channels.params.tx_cq_moderation.cq_period_mode;
- mode_changed = cq_period_mode != current_cq_period_mode;
-
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
- !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
- return -EOPNOTSUPP;
- if (!mode_changed)
+ if (cq_period_mode == current_cq_period_mode)
return 0;
new_params = priv->channels.params;
@@ -1894,7 +1929,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
+ if (new_val && !mlx5e_profile_feature_cap(priv->profile, PTP_RX) && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
@@ -2358,7 +2393,8 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USE_ADAPTIVE,
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USE_CQE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 65571593ec5c..bf80fb612449 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -37,6 +37,7 @@
#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/if_bridge.h>
+#include <linux/filter.h>
#include <net/page_pool.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
@@ -479,7 +480,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
- rq->stats = &c->priv->channel_stats[c->ix].rq;
+ rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, NULL);
if (err)
@@ -1087,8 +1088,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
- if (rq->icosq)
- cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1161,10 +1160,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->xsk_pool = xsk_pool;
sq->stats = sq->xsk_pool ?
- &c->priv->channel_stats[c->ix].xsksq :
+ &c->priv->channel_stats[c->ix]->xsksq :
is_redirect ?
- &c->priv->channel_stats[c->ix].xdpsq :
- &c->priv->channel_stats[c->ix].rq_xdpsq;
+ &c->priv->channel_stats[c->ix]->xdpsq :
+ &c->priv->channel_stats[c->ix]->rq_xdpsq;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1216,9 +1215,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_icosq_cqe_err(sq);
}
+static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+ recover_work);
+
+ /* Not implemented yet. */
+
+ netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
+}
+
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
- struct mlx5e_icosq *sq)
+ struct mlx5e_icosq *sq,
+ work_func_t recover_work_func)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -1239,7 +1249,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+ INIT_WORK(&sq->recover_work, recover_work_func);
return 0;
@@ -1575,13 +1585,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq);
}
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
+ work_func_t recover_work_func)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_icosq(c, param, sq);
+ err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
if (err)
return err;
@@ -1620,7 +1631,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
synchronize_net(); /* Sync with NAPI. */
}
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
@@ -1928,7 +1939,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
- &c->priv->channel_stats[c->ix].sq[tc]);
+ &c->priv->channel_stats[c->ix]->sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -2084,11 +2095,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
spin_lock_init(&c->async_icosq_lock);
- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
+ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
+ mlx5e_async_icosq_err_cqe_work);
if (err)
goto err_close_xdpsq_cq;
- err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+ mutex_init(&c->icosq_recovery_lock);
+
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
+ mlx5e_icosq_err_cqe_work);
if (err)
goto err_close_async_icosq;
@@ -2156,9 +2171,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+ /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
+ cancel_work_sync(&c->icosq.recover_work);
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
+ mutex_destroy(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -2176,6 +2194,30 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
}
+static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
+{
+ if (ix > priv->stats_nch) {
+ netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
+ priv->stats_nch);
+ return -EINVAL;
+ }
+
+ if (priv->channel_stats[ix])
+ return 0;
+
+ /* Asymmetric dynamic memory allocation.
+ * Freed in mlx5e_priv_arrays_free, not on channel closure.
+ */
+ mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
+ priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!priv->channel_stats[ix])
+ return -ENOMEM;
+ priv->stats_nch++;
+
+ return 0;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -2193,6 +2235,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
return err;
+ err = mlx5e_channel_stats_alloc(priv, ix, cpu);
+ if (err)
+ return err;
+
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -2207,7 +2253,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
- c->stats = &priv->channel_stats[ix].ch;
+ c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
@@ -2598,7 +2644,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
}
}
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
+static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
int err;
@@ -3371,7 +3417,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
int i;
for (i = 0; i < priv->stats_nch; i++) {
- struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
int j;
@@ -3559,11 +3605,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
new_params = priv->channels.params;
if (enable) {
- if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n");
- err = -EINVAL;
- goto out;
- }
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
new_params.packet_merge.shampo.match_criteria_type =
MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
@@ -3724,12 +3765,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t *features,
- netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
{
- netdev_features_t changes = wanted_features ^ netdev->features;
- bool enable = !!(wanted_features & feature);
+ netdev_features_t changes = *features ^ netdev->features;
+ bool enable = !!(*features & feature);
int err;
if (!(changes & feature))
@@ -3737,22 +3777,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
err = feature_handler(netdev, enable);
if (err) {
+ MLX5E_SET_FEATURE(features, feature, !enable);
netdev_err(netdev, "%s feature %pNF failed, err %d\n",
enable ? "Enable" : "Disable", &feature, err);
return err;
}
- MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{
- netdev_features_t oper_features = netdev->features;
+ netdev_features_t oper_features = features;
int err = 0;
#define MLX5E_HANDLE_FEATURE(feature, handler) \
- mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+ mlx5e_handle_feature(netdev, &oper_features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
@@ -3826,6 +3866,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
+
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
}
if (mlx5e_is_uplink_rep(priv))
@@ -4038,7 +4083,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
goto err_unlock;
}
- if (!priv->profile->rx_ptp_support)
+ if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
config.rx_filter != HWTSTAMP_FILTER_NONE);
else
@@ -4773,15 +4818,22 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
- netdev->hw_features |= NETIF_F_GSO_GRE;
- netdev->hw_enc_features |= NETIF_F_GSO_GRE;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE;
+ netdev->hw_features |= NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
@@ -5093,9 +5145,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
- .rx_ptp_support = true,
+ .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
+ BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
};
+static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ int nch;
+
+ nch = mlx5e_get_max_num_channels(mdev);
+
+ if (profile->max_nch_limit)
+ nch = min_t(int, nch, profile->max_nch_limit(mdev));
+ return nch;
+}
+
static unsigned int
mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
const struct mlx5e_profile *profile)
@@ -5104,7 +5170,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
unsigned int max_nch, tmp;
/* core resources */
- max_nch = mlx5e_get_max_num_channels(mdev);
+ max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
@@ -5128,12 +5194,17 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
+ int nch, num_txqs, node, i;
+
+ num_txqs = netdev->num_tx_queues;
+ nch = mlx5e_calc_max_nch(mdev, netdev, profile);
+ node = dev_to_node(mlx5_core_dma_dev(mdev));
+
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile);
- priv->stats_nch = priv->max_nch;
+ priv->max_nch = nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
@@ -5150,11 +5221,46 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->wq)
goto err_free_cpumask;
+ priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
+ if (!priv->txq2sq)
+ goto err_destroy_workqueue;
+
+ priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
+ if (!priv->tx_rates)
+ goto err_free_txq2sq;
+
+ priv->channel_tc2realtxq =
+ kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
+ if (!priv->channel_tc2realtxq)
+ goto err_free_tx_rates;
+
+ for (i = 0; i < nch; i++) {
+ priv->channel_tc2realtxq[i] =
+ kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
+ GFP_KERNEL, node);
+ if (!priv->channel_tc2realtxq[i])
+ goto err_free_channel_tc2realtxq;
+ }
+
+ priv->channel_stats =
+ kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
+ if (!priv->channel_stats)
+ goto err_free_channel_tc2realtxq;
+
return 0;
+err_free_channel_tc2realtxq:
+ while (--i >= 0)
+ kfree(priv->channel_tc2realtxq[i]);
+ kfree(priv->channel_tc2realtxq);
+err_free_tx_rates:
+ kfree(priv->tx_rates);
+err_free_txq2sq:
+ kfree(priv->txq2sq);
+err_destroy_workqueue:
+ destroy_workqueue(priv->wq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
-
return -ENOMEM;
}
@@ -5166,6 +5272,14 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ for (i = 0; i < priv->stats_nch; i++)
+ kvfree(priv->channel_stats[i]);
+ kfree(priv->channel_stats);
+ for (i = 0; i < priv->max_nch; i++)
+ kfree(priv->channel_tc2realtxq[i]);
+ kfree(priv->channel_tc2realtxq);
+ kfree(priv->tx_rates);
+ kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
@@ -5181,13 +5295,44 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
memset(priv, 0, sizeof(*priv));
}
+static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ unsigned int nch, ptp_txqs, qos_txqs;
+
+ nch = mlx5e_profile_max_num_channels(mdev, profile);
+
+ ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
+ mlx5e_profile_feature_cap(profile, PTP_TX) ?
+ profile->max_tc : 0;
+
+ qos_txqs = mlx5_qos_is_supported(mdev) &&
+ mlx5e_profile_feature_cap(profile, QOS_HTB) ?
+ mlx5e_qos_max_leaf_nodes(mdev) : 0;
+
+ return nch * profile->max_tc + ptp_txqs + qos_txqs;
+}
+
+static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile)
+{
+ unsigned int nch;
+
+ nch = mlx5e_profile_max_num_channels(mdev, profile);
+
+ return nch * profile->rq_groups;
+}
+
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs)
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
{
struct net_device *netdev;
+ unsigned int txqs, rxqs;
int err;
+ txqs = mlx5e_get_max_num_txqs(mdev, profile);
+ rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
+
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
@@ -5389,7 +5534,7 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
@@ -5412,7 +5557,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -5432,22 +5577,10 @@ static int mlx5e_probe(struct auxiliary_device *adev,
struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
pm_message_t state = {};
- unsigned int txqs, rxqs, ptp_txqs = 0;
struct mlx5e_priv *priv;
- int qos_sqs = 0;
int err;
- int nch;
-
- if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
- ptp_txqs = profile->max_tc;
-
- if (mlx5_qos_is_supported(mdev))
- qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
- nch = mlx5e_get_max_num_channels(mdev);
- txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
- rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs);
+ netdev = mlx5e_create_netdev(mdev, profile);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
@@ -5456,7 +5589,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- dev_set_drvdata(&adev->dev, priv);
+ auxiliary_set_drvdata(adev, priv);
priv->profile = profile;
priv->ppriv = NULL;
@@ -5504,7 +5637,7 @@ err_destroy_netdev:
static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
pm_message_t state = {};
mlx5e_dcbnl_delete_app(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 48895d79796a..06d1f46f1688 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -50,6 +50,7 @@
#include "fs_core.h"
#include "lib/mlx5.h"
#include "lib/devcom.h"
+#include "lib/vxlan.h"
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
@@ -219,16 +220,22 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
}
}
-static void mlx5e_rep_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static void
+mlx5e_rep_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param);
}
-static int mlx5e_rep_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+static int
+mlx5e_rep_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -258,7 +265,7 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
static int mlx5e_rep_set_coalesce(struct net_device *netdev,
@@ -268,7 +275,7 @@ static int mlx5e_rep_set_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
@@ -585,6 +592,12 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
+static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
+{
+ return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) /
+ mlx5_eswitch_get_total_vports(mdev);
+}
+
static void mlx5e_build_rep_params(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1027,6 +1040,7 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
+ udp_tunnel_nic_reset_ntf(priv->netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -1048,6 +1062,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
+ mlx5_vxlan_reset_to_default(mdev->vxlan);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
@@ -1107,7 +1122,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
- .rx_ptp_support = false,
+ .max_nch_limit = mlx5e_rep_max_nch_limit,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1128,7 +1143,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
- .rx_ptp_support = false,
};
/* e-Switch vport representors */
@@ -1179,14 +1193,10 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct devlink_port *dl_port;
struct net_device *netdev;
struct mlx5e_priv *priv;
- unsigned int txqs, rxqs;
- int nch, err;
+ int err;
profile = &mlx5e_rep_profile;
- nch = mlx5e_get_max_num_channels(dev);
- txqs = nch * profile->max_tc;
- rxqs = nch * profile->rq_groups;
- netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
+ netdev = mlx5e_create_netdev(dev, profile);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 793511d5ee4c..e86ccc22fb82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -37,6 +37,7 @@
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
+#include <net/gro.h>
#include <net/udp.h>
#include <net/tcp.h>
#include "en.h"
@@ -278,8 +279,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
if (unlikely(!dma_info->page))
return -ENOMEM;
- dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
- PAGE_SIZE, rq->buff.map_dir);
+ dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
+ rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
page_pool_recycle_direct(rq->page_pool, dma_info->page);
dma_info->page = NULL;
@@ -300,7 +301,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
{
- dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
+ dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
@@ -618,7 +620,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
@@ -1602,6 +1604,12 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
}
+static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+}
+
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -1615,8 +1623,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto free_wqe;
}
@@ -1669,7 +1676,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto free_wqe;
}
@@ -1718,8 +1725,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto mpwrq_cqe_out;
}
@@ -1987,8 +1993,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto mpwrq_cqe_out;
}
@@ -2057,8 +2062,7 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- trigger_report(rq, cqe);
- rq->stats->wqe_err++;
+ mlx5e_handle_rx_err_cqe(rq, cqe);
goto mpwrq_cqe_out;
}
@@ -2188,7 +2192,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
priv = mlx5i_epriv(netdev);
tstamp = &priv->tstamp;
- stats = &priv->channel_stats[rq->ix].rq;
+ stats = rq->stats;
flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
g = (flags_rqpn >> 28) & 3;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 2a9bfc3ffa2e..26e326fe503c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -35,6 +35,7 @@
#include "en_accel/tls.h"
#include "en_accel/en_accel.h"
#include "en/ptp.h"
+#include "en/port.h"
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
@@ -463,7 +464,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
- &priv->channel_stats[i];
+ priv->channel_stats[i];
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
@@ -1158,12 +1159,99 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
-void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats)
+static int fec_num_lanes(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
+ int err;
+
+ MLX5_SET(pmlp_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMLP, 0, 0);
+ if (err)
+ return 0;
+
+ return MLX5_GET(pmlp_reg, out, width);
+}
+
+static int fec_active_mode(struct mlx5_core_dev *mdev)
+{
+ unsigned long fec_active_long;
+ u32 fec_active;
+
+ if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
+ return MLX5E_FEC_NOFEC;
+
+ fec_active_long = fec_active;
+ return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
+}
+
+#define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
+ fec_stats->corrected_blocks.lanes[(idx)] = \
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
+ fc_fec_corrected_blocks_lane##idx); \
+ fec_stats->uncorrectable_blocks.lanes[(idx)] = \
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
+ fc_fec_uncorrectable_blocks_lane##idx); \
+})
+
+static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
+ u32 *ppcnt, u8 lanes)
+{
+ if (lanes > 3) { /* 4 lanes */
+ MLX5E_STATS_SET_FEC_BLOCK(3);
+ MLX5E_STATS_SET_FEC_BLOCK(2);
+ }
+ if (lanes > 1) /* 2 lanes */
+ MLX5E_STATS_SET_FEC_BLOCK(1);
+ if (lanes > 0) /* 1 lane */
+ MLX5E_STATS_SET_FEC_BLOCK(0);
+}
+
+static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
+{
+ fec_stats->corrected_blocks.total =
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
+ rs_fec_corrected_blocks);
+ fec_stats->uncorrectable_blocks.total =
+ MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
+ rs_fec_uncorrectable_blocks);
+}
+
+static void fec_set_block_stats(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int mode = fec_active_mode(mdev);
+
+ if (mode == MLX5E_FEC_NOFEC)
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ case MLX5E_FEC_RS_544_514:
+ case MLX5E_FEC_LLRS_272_257_1:
+ fec_set_rs_stats(fec_stats, out);
+ return;
+ case MLX5E_FEC_FIRECODE:
+ fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
+ }
+}
+
+static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
{
u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
struct mlx5_core_dev *mdev = priv->mdev;
- u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
@@ -1181,6 +1269,13 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
phy_corrected_bits);
}
+void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_set_corrected_bits_total(priv, fec_stats);
+ fec_set_block_stats(priv, fec_stats);
+}
+
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -2076,7 +2171,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_ch_stats_desc[i].format);
+ "%s", ptp_ch_stats_desc[i].format);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
@@ -2197,21 +2292,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
ch_stats_desc, j);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
rq_stats_desc, j);
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
xskrq_stats_desc, j);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
rq_xdpsq_stats_desc, j);
}
@@ -2219,17 +2314,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
sq_stats_desc, j);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
xsksq_stats_desc, j);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
xdpsq_stats_desc, j);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d45f4ae80c0..3d908a7e1406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,10 +39,6 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
-#include <linux/if_macvlan.h>
-#include <net/tc_act/tc_pedit.h>
-#include <net/tc_act/tc_csum.h>
-#include <net/psample.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
@@ -62,6 +58,7 @@
#include "en/mod_hdr.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
+#include "en/tc/act/act.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,9 +67,6 @@
#include "lag/lag.h"
#include "lag/mp.h"
-#define nic_chains(priv) ((priv)->fs.tc.chains)
-#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
@@ -209,12 +203,9 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
char *modact;
int err;
- err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
- if (err)
- return err;
-
- modact = mod_hdr_acts->actions +
- (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
+ modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
+ if (IS_ERR(modact))
+ return PTR_ERR(modact);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
@@ -333,7 +324,7 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
char *modact;
- modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
+ modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
@@ -403,7 +394,7 @@ bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, ESWITCH);
}
-static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, FT);
}
@@ -413,7 +404,7 @@ bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, OFFLOADED);
}
-static int get_flow_name_space(struct mlx5e_tc_flow *flow)
+int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
@@ -424,7 +415,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
+ return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs.tc.mod_hdr;
}
@@ -437,7 +428,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
- get_flow_name_space(flow),
+ mlx5e_get_flow_namespace(flow),
&parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
@@ -941,7 +932,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
@@ -1076,7 +1067,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
@@ -1095,7 +1086,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
mlx5_del_flow_rules(rule);
@@ -1127,21 +1118,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mutex_lock(&priv->fs.tc.t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
+ mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
- kvfree(attr->parse_attr);
-
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
- mlx5_fc_destroy(priv->mdev, attr->counter);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(priv->mdev, attr->counter);
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1196,21 +1187,16 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
goto offload_rule_0;
- if (flow_flag_test(flow, CT)) {
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- return;
- }
-
- if (flow_flag_test(flow, SAMPLE)) {
- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
- return;
- }
-
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else if (flow_flag_test(flow, SAMPLE))
+ mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+ else
offload_rule_0:
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
struct mlx5_flow_handle *
@@ -1308,8 +1294,6 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
mutex_unlock(&uplink_priv->unready_flows_lock);
}
-static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
-
bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
{
struct mlx5_core_dev *out_mdev, *route_mdev;
@@ -1324,7 +1308,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_mdev->coredev_type != MLX5_COREDEV_VF)
return false;
- return same_hw_devs(out_priv, route_priv);
+ return mlx5e_same_hw_devs(out_priv, route_priv);
}
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
@@ -1371,7 +1355,7 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
- get_flow_name_space(flow),
+ mlx5e_get_flow_namespace(flow),
mod_hdr_acts->num_actions,
mod_hdr_acts->actions);
if (IS_ERR(mod_hdr))
@@ -1445,7 +1429,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
metadata);
if (err)
- return err;
+ goto err_out;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
}
@@ -1461,13 +1447,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule is only supported on chain 0");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
if (attr->dest_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule offload doesn't support goto action");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
@@ -1475,8 +1463,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow_flag_test(flow, EGRESS) ?
MLX5E_TC_INT_PORT_EGRESS :
MLX5E_TC_INT_PORT_INGRESS);
- if (IS_ERR(int_port))
- return PTR_ERR(int_port);
+ if (IS_ERR(int_port)) {
+ err = PTR_ERR(int_port);
+ goto err_out;
+ }
esw_attr->int_port = int_port;
}
@@ -1624,15 +1614,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
if (vf_tun && attr->modify_hdr)
mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
else
mlx5e_detach_mod_hdr(priv, flow);
}
- kfree(attr->sample_attr);
- kvfree(attr->parse_attr);
- kvfree(attr->esw_attr->rx_tun_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
@@ -1646,6 +1633,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ kfree(attr->sample_attr);
+ kvfree(attr->esw_attr->rx_tun_attr);
+ kvfree(attr->parse_attr);
kfree(flow->attr);
}
@@ -1948,6 +1938,111 @@ u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
return ip_version;
}
+/* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
+ * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
+ * +---------+----------------------------------------+
+ * |Arriving | Arriving Outer Header |
+ * | Inner +---------+---------+---------+----------+
+ * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
+ * +---------+---------+---------+---------+----------+
+ * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
+ * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
+ * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
+ * | CE | CE | CE | CE | CE |
+ * +---------+---------+---------+---------+----------+
+ *
+ * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
+ * the inner ip_ecn value before hardware decap action.
+ *
+ * Cells marked are changed from original inner packet ip_ecn value during decap, and
+ * so matching those values on inner ip_ecn before decap will fail.
+ *
+ * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
+ * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
+ * and such we can drop the inner ip_ecn=CE match.
+ */
+
+static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
+ struct flow_cls_offload *f,
+ bool *match_inner_ecn)
+{
+ u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_match_ip match;
+
+ *match_inner_ecn = true;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ flow_rule_match_enc_ip(rule, &match);
+ outer_ecn_key = match.key->tos & INET_ECN_MASK;
+ outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ flow_rule_match_ip(rule, &match);
+ inner_ecn_key = match.key->tos & INET_ECN_MASK;
+ inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
+ }
+
+ if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
+ netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!outer_ecn_mask) {
+ if (!inner_ecn_mask)
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
+ netdev_warn(priv->netdev,
+ "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
+ netdev_warn(priv->netdev,
+ "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!inner_ecn_mask)
+ return 0;
+
+ /* Both inner and outer have full mask on ecn */
+
+ if (outer_ecn_key == INET_ECN_ECT_1) {
+ /* inner ecn might change by DECAP action */
+
+ NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
+ netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (outer_ecn_key != INET_ECN_CE)
+ return 0;
+
+ if (inner_ecn_key != INET_ECN_CE) {
+ /* Can't happen in software, as packet ecn will be changed to CE after decap */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
+ netdev_warn(priv->netdev,
+ "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
+ * drop match on inner ecn
+ */
+ *match_inner_ecn = false;
+
+ return 0;
+}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2053,16 +2148,14 @@ static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
outer_headers);
}
-static void *get_match_headers_value(u32 flags,
- struct mlx5_flow_spec *spec)
+void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_value(spec) :
get_match_outer_headers_value(spec);
}
-static void *get_match_headers_criteria(u32 flags,
- struct mlx5_flow_spec *spec)
+void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_criteria(spec) :
@@ -2143,6 +2236,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
enum fs_flow_table_type fs_type;
+ bool match_inner_ecn = true;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
@@ -2196,6 +2290,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
headers_c = get_match_inner_headers_criteria(spec);
headers_v = get_match_inner_headers_value(spec);
}
+
+ err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
+ if (err)
+ return err;
}
err = mlx5e_flower_parse_meta(filter_dev, f);
@@ -2419,10 +2517,12 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
- match.mask->tos & 0x3);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
- match.key->tos & 0x3);
+ if (match_inner_ecn) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
+ match.mask->tos & 0x3);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
+ match.key->tos & 0x3);
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
match.mask->tos >> 2);
@@ -2607,55 +2707,6 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return err;
}
-struct pedit_headers {
- struct ethhdr eth;
- struct vlan_hdr vlan;
- struct iphdr ip4;
- struct ipv6hdr ip6;
- struct tcphdr tcp;
- struct udphdr udp;
-};
-
-struct pedit_headers_action {
- struct pedit_headers vals;
- struct pedit_headers masks;
- u32 pedits;
-};
-
-static int pedit_header_offsets[] = {
- [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
- [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
- [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
- [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
- [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
-};
-
-#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
-
-static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
-{
- u32 *curr_pmask, *curr_pval;
-
- curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
- curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
-
- if (*curr_pmask & mask) { /* disallow acting twice on the same location */
- NL_SET_ERR_MSG_MOD(extack,
- "curr_pmask and new mask same. Acting twice on same location");
- goto out_err;
- }
-
- *curr_pmask |= mask;
- *curr_pval |= (val & mask);
-
- return 0;
-
-out_err:
- return -EOPNOTSUPP;
-}
-
struct mlx5_fields {
u8 field;
u8 field_bsize;
@@ -2767,26 +2818,23 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
- int i, action_size, first, last, next_z;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
- struct mlx5_fields *f;
unsigned long mask, field_mask;
- int err;
+ int i, first, last, next_z;
+ struct mlx5_fields *f;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
- headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
- headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
+ headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
+ headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
@@ -2854,18 +2902,16 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
- if (err) {
+ action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
+ if (IS_ERR(action)) {
NL_SET_ERR_MSG_MOD(extack,
"too many pedit actions, can't offload");
mlx5_core_warn(priv->mdev,
"mlx5: parsed %d pedit actions, can't do more\n",
mod_acts->num_actions);
- return err;
+ return PTR_ERR(action);
}
- action = mod_acts->actions +
- (mod_acts->num_actions * action_size);
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
@@ -2895,141 +2941,8 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
- int namespace)
-{
- if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
- return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
- else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
- return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
-}
-
-int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
- int namespace,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
-{
- int action_size, new_num_actions, max_hw_actions;
- size_t new_sz, old_sz;
- void *ret;
-
- if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
- return 0;
-
- action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
-
- max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
- namespace);
- new_num_actions = min(max_hw_actions,
- mod_hdr_acts->actions ?
- mod_hdr_acts->max_actions * 2 : 1);
- if (mod_hdr_acts->max_actions == new_num_actions)
- return -ENOSPC;
-
- new_sz = action_size * new_num_actions;
- old_sz = mod_hdr_acts->max_actions * action_size;
- ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- memset(ret + old_sz, 0, new_sz - old_sz);
- mod_hdr_acts->actions = ret;
- mod_hdr_acts->max_actions = new_num_actions;
-
- return 0;
-}
-
-void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
-{
- kfree(mod_hdr_acts->actions);
- mod_hdr_acts->actions = NULL;
- mod_hdr_acts->num_actions = 0;
- mod_hdr_acts->max_actions = 0;
-}
-
static const struct pedit_headers zero_masks = {};
-static int
-parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct netlink_ext_ack *extack)
-{
- u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
- int err = -EOPNOTSUPP;
- u32 mask, val, offset;
- u8 htype;
-
- htype = act->mangle.htype;
- err = -EOPNOTSUPP; /* can't be all optimistic */
-
- if (htype == FLOW_ACT_MANGLE_UNSPEC) {
- NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
- goto out_err;
- }
-
- if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
- NL_SET_ERR_MSG_MOD(extack,
- "The pedit offload action is not supported");
- goto out_err;
- }
-
- mask = act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
-
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
- if (err)
- goto out_err;
-
- hdrs[cmd].pedits++;
-
- return 0;
-out_err:
- return err;
-}
-
-static int
-parse_pedit_to_reformat(const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct netlink_ext_ack *extack)
-{
- u32 mask, val, offset;
- u32 *p;
-
- if (act->id != FLOW_ACTION_MANGLE) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
- return -EOPNOTSUPP;
- }
-
- if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
- NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
- return -EOPNOTSUPP;
- }
-
- mask = ~act->mangle.mask;
- val = act->mangle.val;
- offset = act->mangle.offset;
- p = (u32 *)&parse_attr->eth;
- *(p + (offset >> 2)) |= (val & mask);
-
- return 0;
-}
-
-static int parse_tc_pedit_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act, int namespace,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(act, parse_attr, extack);
-
- return parse_pedit_to_modify_hdr(priv, act, namespace,
- parse_attr, hdrs, extack);
-}
-
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
@@ -3061,39 +2974,10 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
return 0;
out_dealloc_parsed_actions:
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
return err;
}
-static bool csum_offload_supported(struct mlx5e_priv *priv,
- u32 action,
- u32 update_flags,
- struct netlink_ext_ack *extack)
-{
- u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
- TCA_CSUM_UPDATE_FLAG_UDP;
-
- /* The HW recalcs checksums only if re-writing headers */
- if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC csum action is only offloaded with pedit");
- netdev_warn(priv->netdev,
- "TC csum action is only offloaded with pedit\n");
- return false;
- }
-
- if (update_flags & ~prot_flags) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't offload TC csum action for some header/s");
- netdev_warn(priv->netdev,
- "can't offload TC csum action for some header/s - flags %#x\n",
- update_flags);
- return false;
- }
-
- return true;
-}
-
struct ip_ttl_word {
__u8 ttl;
__u8 protocol;
@@ -3216,8 +3100,8 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
u8 ip_proto;
int i;
- headers_c = get_match_headers_criteria(actions, spec);
- headers_v = get_match_headers_value(actions, spec);
+ headers_c = mlx5e_get_match_headers_criteria(actions, spec);
+ headers_v = mlx5e_get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
@@ -3324,7 +3208,7 @@ static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv
return priv->mdev == peer_priv->mdev;
}
-static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u64 fsystem_guid, psystem_guid;
@@ -3338,126 +3222,45 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
-static bool same_vf_reps(struct mlx5e_priv *priv,
- struct net_device *out_dev)
-{
- return mlx5e_eswitch_vf_rep(priv->netdev) &&
- priv->netdev == out_dev;
-}
-
-static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
- const struct flow_action_entry *act,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action, struct netlink_ext_ack *extack)
-{
- u16 mask16 = VLAN_VID_MASK;
- u16 val16 = act->vlan.vid & VLAN_VID_MASK;
- const struct flow_action_entry pedit_act = {
- .id = FLOW_ACTION_MANGLE,
- .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
- .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
- .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
- .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
- };
- u8 match_prio_mask, match_prio_val;
- void *headers_c, *headers_v;
- int err;
-
- headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
- headers_v = get_match_headers_value(*action, &parse_attr->spec);
-
- if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN rewrite action must have VLAN protocol match");
- return -EOPNOTSUPP;
- }
-
- match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
- match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
- if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Changing VLAN prio is not supported");
- return -EOPNOTSUPP;
- }
-
- err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
- *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
-
- return err;
-}
-
static int
-add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct pedit_headers_action *hdrs,
- u32 *action, struct netlink_ext_ack *extack)
-{
- const struct flow_action_entry prio_tag_act = {
- .vlan.vid = 0,
- .vlan.prio =
- MLX5_GET(fte_match_set_lyr_2_4,
- get_match_headers_value(*action,
- &parse_attr->spec),
- first_prio) &
- MLX5_GET(fte_match_set_lyr_2_4,
- get_match_headers_criteria(*action,
- &parse_attr->spec),
- first_prio),
- };
-
- return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- &prio_tag_act, parse_attr, hdrs, action,
- extack);
-}
-
-static int validate_goto_chain(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- const struct flow_action_entry *act,
- u32 actions,
- struct netlink_ext_ack *extack)
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
{
- bool is_esw = mlx5e_is_eswitch_flow(flow);
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5_flow_attr *attr = flow->attr;
- bool ft_flow = mlx5e_is_ft_flow(flow);
- u32 dest_chain = act->chain_index;
- struct mlx5_fs_chains *chains;
- struct mlx5_eswitch *esw;
- u32 reformat_and_fwd;
- u32 max_chain;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ const struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
- esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : nic_chains(priv);
- max_chain = mlx5_chains_get_chain_range(chains);
- reformat_and_fwd = is_esw ?
- MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
- MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
-
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
+ ns_type = mlx5e_get_flow_namespace(flow);
- if (!mlx5_chains_backwards_supported(chains) &&
- dest_chain <= attr->chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto lower numbered chain isn't supported");
- return -EOPNOTSUPP;
- }
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ return -EOPNOTSUPP;
+ }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
+ if (!tc_act->can_offload(parse_state, act, i))
+ return -EOPNOTSUPP;
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ return err;
}
- if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- !reformat_and_fwd) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto chain is not allowed if action has reformat or decap");
- return -EOPNOTSUPP;
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse ||
+ !tc_act->can_offload(parse_state, act, i))
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ return err;
}
return 0;
@@ -3478,19 +3281,19 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
!hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
return 0;
- ns_type = get_flow_name_space(flow);
+ ns_type = mlx5e_get_flow_namespace(flow);
err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
&attr->action, extack);
if (err)
return err;
- /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
if (parse_attr->mod_hdr_acts.num_actions > 0)
return 0;
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
return 0;
@@ -3503,19 +3306,9 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
}
static int
-parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+flow_action_supported(struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
{
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
- struct pedit_headers_action hdrs[2] = {};
- const struct flow_action_entry *act;
- struct mlx5_nic_flow_attr *nic_attr;
- u32 action = 0;
- int err, i;
-
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
@@ -3527,108 +3320,35 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- nic_attr = attr->nic_attr;
- nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- parse_attr = attr->parse_attr;
-
- flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_ACCEPT:
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_MANGLE:
- case FLOW_ACTION_ADD:
- err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, NULL, extack);
- if (err)
- return err;
-
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- break;
- case FLOW_ACTION_VLAN_MANGLE:
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_KERNEL,
- act, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
-
- break;
- case FLOW_ACTION_CSUM:
- if (csum_offload_supported(priv, action,
- act->csum_flags,
- extack))
- break;
-
- return -EOPNOTSUPP;
- case FLOW_ACTION_REDIRECT: {
- struct net_device *peer_dev = act->dev;
-
- if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
- same_hw_devs(priv, netdev_priv(peer_dev))) {
- parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
- flow_flag_set(flow, HAIRPIN);
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "device is not on same HW, can't offload");
- netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
- peer_dev->name);
- return -EOPNOTSUPP;
- }
- }
- break;
- case FLOW_ACTION_MARK: {
- u32 mark = act->mark;
-
- if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
- NL_SET_ERR_MSG_MOD(extack,
- "Bad flow mark - only 16 bit is supported");
- return -EOPNOTSUPP;
- }
-
- nic_attr->flow_tag = mark;
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
- break;
- case FLOW_ACTION_GOTO:
- err = validate_goto_chain(priv, flow, act, action,
- extack);
- if (err)
- return err;
+ return 0;
+}
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = act->chain_index;
- break;
- case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
- &parse_attr->mod_hdr_acts,
- act, extack);
- if (err)
- return err;
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_tc_act_parse_state *parse_state;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct pedit_headers_action *hdrs;
+ int err;
- flow_flag_set(flow, CT);
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack,
- "The offload action is not supported in NIC action");
- return -EOPNOTSUPP;
- }
- }
+ err = flow_action_supported(flow_action, extack);
+ if (err)
+ return err;
- attr->action = action;
+ attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ parse_attr = attr->parse_attr;
+ parse_state = &parse_attr->parse_state;
+ mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
+ parse_state->ct_priv = get_ct_priv(priv);
+ hdrs = parse_state->hdrs;
- if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
+ err = parse_tc_actions(parse_state, flow_action);
+ if (err)
+ return err;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
@@ -3650,147 +3370,7 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mlx5e_eswitch_vf_rep(priv->netdev) &&
mlx5e_eswitch_vf_rep(peer_netdev) &&
- same_hw_devs(priv, peer_priv));
-}
-
-static int parse_tc_vlan_action(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
- struct mlx5_esw_flow_attr *attr,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- u8 vlan_idx = attr->total_vlan;
-
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
- NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
- return -EOPNOTSUPP;
- }
-
- switch (act->id) {
- case FLOW_ACTION_VLAN_POP:
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan pop action is not supported");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
- } else {
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- }
- break;
- case FLOW_ACTION_VLAN_PUSH:
- attr->vlan_vid[vlan_idx] = act->vlan.vid;
- attr->vlan_prio[vlan_idx] = act->vlan.prio;
- attr->vlan_proto[vlan_idx] = act->vlan.proto;
- if (!attr->vlan_proto[vlan_idx])
- attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
-
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported for vlan depth > 1");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- } else {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported");
- return -EOPNOTSUPP;
- }
-
- *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- }
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
- return -EINVAL;
- }
-
- attr->total_vlan = vlan_idx + 1;
-
- return 0;
-}
-
-static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
- struct net_device *out_dev)
-{
- struct net_device *fdb_out_dev = out_dev;
- struct net_device *uplink_upper;
-
- rcu_read_lock();
- uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
- if (uplink_upper && netif_is_lag_master(uplink_upper) &&
- uplink_upper == out_dev) {
- fdb_out_dev = uplink_dev;
- } else if (netif_is_lag_master(out_dev)) {
- fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
- if (fdb_out_dev &&
- (!mlx5e_eswitch_rep(fdb_out_dev) ||
- !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
- fdb_out_dev = NULL;
- }
- rcu_read_unlock();
- return fdb_out_dev;
-}
-
-static int add_vlan_push_action(struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr,
- struct net_device **out_dev,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- struct net_device *vlan_dev = *out_dev;
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_PUSH,
- .vlan.vid = vlan_dev_vlan_id(vlan_dev),
- .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
- .vlan.prio = 0,
- };
- int err;
-
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
- if (err)
- return err;
-
- rcu_read_lock();
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
- rcu_read_unlock();
- if (!*out_dev)
- return -ENODEV;
-
- if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action, extack);
-
- return err;
-}
-
-static int add_vlan_pop_action(struct mlx5e_priv *priv,
- struct mlx5_flow_attr *attr,
- u32 *action,
- struct netlink_ext_ack *extack)
-{
- struct flow_action_entry vlan_act = {
- .id = FLOW_ACTION_VLAN_POP,
- };
- int nest_level, err = 0;
-
- nest_level = attr->parse_attr->filter_dev->lower_level -
- priv->netdev->lower_level;
- while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
- if (err)
- return err;
- }
-
- return err;
+ mlx5e_same_hw_devs(priv, peer_priv));
}
static bool same_hw_reps(struct mlx5e_priv *priv,
@@ -3802,7 +3382,7 @@ static bool same_hw_reps(struct mlx5e_priv *priv,
return mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
- same_hw_devs(priv, peer_priv);
+ mlx5e_same_hw_devs(priv, peer_priv);
}
static bool is_lag_dev(struct mlx5e_priv *priv,
@@ -3826,66 +3406,6 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_port_devs(priv, netdev_priv(out_dev));
}
-static bool is_duplicated_output_device(struct net_device *dev,
- struct net_device *out_dev,
- int *ifindexes, int if_count,
- struct netlink_ext_ack *extack)
-{
- int i;
-
- for (i = 0; i < if_count; i++) {
- if (ifindexes[i] == out_dev->ifindex) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't duplicate output to same device");
- netdev_err(dev, "can't duplicate output to same device: %s\n",
- out_dev->name);
- return true;
- }
- }
-
- return false;
-}
-
-static int verify_uplink_forwarding(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *out_dev,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *rep_priv;
-
- /* Forwarding non encapsulated traffic between
- * uplink ports is allowed only if
- * termination_table_raw_traffic cap is set.
- *
- * Input vport was stored attr->in_rep.
- * In LAG case, *priv* is the private data of
- * uplink which may be not the input vport.
- */
- rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
-
- if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
- mlx5e_eswitch_uplink_rep(out_dev)))
- return 0;
-
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
- termination_table_raw_traffic)) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are both uplink, can't offload forwarding");
- pr_err("devices %s %s are both uplink, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
- return -EOPNOTSUPP;
- } else if (out_dev != rep_priv->netdev) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not the same uplink, can't offload forwarding");
- pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
- priv->netdev->name, out_dev->name);
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
int ifindex,
@@ -3925,386 +3445,33 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static int
+parse_tc_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- struct pedit_headers_action hdrs[2] = {};
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5e_sample_attr sample_attr = {};
- const struct ip_tunnel_info *info = NULL;
struct mlx5_flow_attr *attr = flow->attr;
- int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
- bool ft_flow = mlx5e_is_ft_flow(flow);
- const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
- bool encap = false, decap = false;
- u32 action = attr->action;
- int err, i, if_count = 0;
- bool ptype_host = false;
- bool mpls_push = false;
-
- if (!flow_action_has_entries(flow_action)) {
- NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
- return -EINVAL;
- }
+ struct pedit_headers_action *hdrs;
+ int err;
- if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
- NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
- return -EOPNOTSUPP;
- }
+ err = flow_action_supported(flow_action, extack);
+ if (err)
+ return err;
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
+ parse_state = &parse_attr->parse_state;
+ mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
+ parse_state->ct_priv = get_ct_priv(priv);
+ hdrs = parse_state->hdrs;
- flow_action_for_each(i, act, flow_action) {
- switch (act->id) {
- case FLOW_ACTION_ACCEPT:
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
- break;
- case FLOW_ACTION_PTYPE:
- if (act->ptype != PACKET_HOST) {
- NL_SET_ERR_MSG_MOD(extack,
- "skbedit ptype is only supported with type host");
- return -EOPNOTSUPP;
- }
-
- ptype_host = true;
- break;
- case FLOW_ACTION_DROP:
- action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- break;
- case FLOW_ACTION_TRAP:
- if (!flow_offload_has_one_action(flow_action)) {
- NL_SET_ERR_MSG_MOD(extack,
- "action trap is supported as a sole action only");
- return -EOPNOTSUPP;
- }
- action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT);
- attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- break;
- case FLOW_ACTION_MPLS_PUSH:
- if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
- reformat_l2_to_l3_tunnel) ||
- act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls push is supported only for mpls_uc protocol");
- return -EOPNOTSUPP;
- }
- mpls_push = true;
- break;
- case FLOW_ACTION_MPLS_POP:
- /* we only support mpls pop if it is the first action
- * and the filter net device is bareudp. Subsequent
- * actions can be pedit and the last can be mirred
- * egress redirect.
- */
- if (i) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls pop supported only as first action");
- return -EOPNOTSUPP;
- }
- if (!netif_is_bareudp(parse_attr->filter_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls pop supported only on bareudp devices");
- return -EOPNOTSUPP;
- }
-
- parse_attr->eth.h_proto = act->mpls_pop.proto;
- action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_flag_set(flow, L3_TO_L2_DECAP);
- break;
- case FLOW_ACTION_MANGLE:
- case FLOW_ACTION_ADD:
- err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, flow, extack);
- if (err)
- return err;
-
- if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- esw_attr->split_count = esw_attr->out_count;
- }
- break;
- case FLOW_ACTION_CSUM:
- if (csum_offload_supported(priv, action,
- act->csum_flags, extack))
- break;
-
- return -EOPNOTSUPP;
- case FLOW_ACTION_REDIRECT_INGRESS: {
- struct net_device *out_dev;
-
- out_dev = act->dev;
- if (!out_dev)
- return -EOPNOTSUPP;
-
- if (!netif_is_ovs_master(out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to ingress is supported only for OVS internal ports");
- return -EOPNOTSUPP;
- }
-
- if (netif_is_ovs_master(parse_attr->filter_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to ingress is not supported from internal port");
- return -EOPNOTSUPP;
- }
-
- if (!ptype_host) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to int port ingress requires ptype=host action");
- return -EOPNOTSUPP;
- }
-
- if (esw_attr->out_count) {
- NL_SET_ERR_MSG_MOD(extack,
- "redirect to int port ingress is supported only as single destination");
- return -EOPNOTSUPP;
- }
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
-
- err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
- MLX5E_TC_INT_PORT_INGRESS,
- &action, esw_attr->out_count);
- if (err)
- return err;
-
- esw_attr->out_count++;
-
- break;
- }
- case FLOW_ACTION_REDIRECT:
- case FLOW_ACTION_MIRRED: {
- struct mlx5e_priv *out_priv;
- struct net_device *out_dev;
-
- out_dev = act->dev;
- if (!out_dev) {
- /* out_dev is NULL when filters with
- * non-existing mirred device are replayed to
- * the driver.
- */
- return -EINVAL;
- }
-
- if (mpls_push && !netif_is_bareudp(out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "mpls is supported only through a bareudp device");
- return -EOPNOTSUPP;
- }
-
- if (ft_flow && out_dev == priv->netdev) {
- /* Ignore forward to self rules generated
- * by adding both mlx5 devs to the flow table
- * block on a normal nft offload setup.
- */
- return -EOPNOTSUPP;
- }
-
- if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't support more output ports, can't offload forwarding");
- netdev_warn(priv->netdev,
- "can't support more than %d output ports, can't offload forwarding\n",
- esw_attr->out_count);
- return -EOPNOTSUPP;
- }
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- if (encap) {
- parse_attr->mirred_ifindex[esw_attr->out_count] =
- out_dev->ifindex;
- parse_attr->tun_info[esw_attr->out_count] =
- mlx5e_dup_tun_info(info);
- if (!parse_attr->tun_info[esw_attr->out_count])
- return -ENOMEM;
- encap = false;
- esw_attr->dests[esw_attr->out_count].flags |=
- MLX5_ESW_DEST_ENCAP;
- esw_attr->out_count++;
- /* attr->dests[].rep is resolved when we
- * handle encap
- */
- } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
-
- if (is_duplicated_output_device(priv->netdev,
- out_dev,
- ifindexes,
- if_count,
- extack))
- return -EOPNOTSUPP;
-
- ifindexes[if_count] = out_dev->ifindex;
- if_count++;
-
- out_dev = get_fdb_out_dev(uplink_dev, out_dev);
- if (!out_dev)
- return -ENODEV;
-
- if (is_vlan_dev(out_dev)) {
- err = add_vlan_push_action(priv, attr,
- &out_dev,
- &action, extack);
- if (err)
- return err;
- }
-
- if (is_vlan_dev(parse_attr->filter_dev)) {
- err = add_vlan_pop_action(priv, attr,
- &action, extack);
- if (err)
- return err;
- }
-
- if (netif_is_macvlan(out_dev))
- out_dev = macvlan_dev_real_dev(out_dev);
-
- err = verify_uplink_forwarding(priv, flow, out_dev, extack);
- if (err)
- return err;
-
- if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not on same switch HW, can't offload forwarding");
- return -EOPNOTSUPP;
- }
-
- if (same_vf_reps(priv, out_dev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "can't forward from a VF to itself");
- return -EOPNOTSUPP;
- }
-
- out_priv = netdev_priv(out_dev);
- rpriv = out_priv->ppriv;
- esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
- esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
- esw_attr->out_count++;
- } else if (netif_is_ovs_master(out_dev)) {
- err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
- out_dev->ifindex,
- MLX5E_TC_INT_PORT_EGRESS,
- &action,
- esw_attr->out_count);
- if (err)
- return err;
-
- esw_attr->out_count++;
- } else if (parse_attr->filter_dev != priv->netdev) {
- /* All mlx5 devices are called to configure
- * high level device filters. Therefore, the
- * *attempt* to install a filter on invalid
- * eswitch should not trigger an explicit error
- */
- return -EINVAL;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "devices are not on same switch HW, can't offload forwarding");
- netdev_warn(priv->netdev,
- "devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name,
- out_dev->name);
- return -EOPNOTSUPP;
- }
- }
- break;
- case FLOW_ACTION_TUNNEL_ENCAP:
- info = act->tunnel;
- if (info) {
- encap = true;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "Zero tunnel attributes is not supported");
- return -EOPNOTSUPP;
- }
-
- break;
- case FLOW_ACTION_VLAN_PUSH:
- case FLOW_ACTION_VLAN_POP:
- if (act->id == FLOW_ACTION_VLAN_PUSH &&
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
- /* Replace vlan pop+push with vlan modify */
- action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_FDB,
- act, parse_attr, hdrs,
- &action, extack);
- } else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
- }
- if (err)
- return err;
-
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_VLAN_MANGLE:
- err = add_vlan_rewrite_action(priv,
- MLX5_FLOW_NAMESPACE_FDB,
- act, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
-
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_TUNNEL_DECAP:
- decap = true;
- break;
- case FLOW_ACTION_GOTO:
- err = validate_goto_chain(priv, flow, act, action,
- extack);
- if (err)
- return err;
-
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- attr->dest_chain = act->chain_index;
- break;
- case FLOW_ACTION_CT:
- if (flow_flag_test(flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
- return -EOPNOTSUPP;
- }
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
- &parse_attr->mod_hdr_acts,
- act, extack);
- if (err)
- return err;
-
- flow_flag_set(flow, CT);
- esw_attr->split_count = esw_attr->out_count;
- break;
- case FLOW_ACTION_SAMPLE:
- if (flow_flag_test(flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
- return -EOPNOTSUPP;
- }
- sample_attr.rate = act->sample.rate;
- sample_attr.group_num = act->sample.psample_group->group_num;
- if (act->sample.truncate)
- sample_attr.trunc_size = act->sample.trunc_size;
- flow_flag_set(flow, SAMPLE);
- break;
- default:
- NL_SET_ERR_MSG_MOD(extack,
- "The offload action is not supported in FDB action");
- return -EOPNOTSUPP;
- }
- }
+ err = parse_tc_actions(parse_state, flow_action);
+ if (err)
+ return err;
/* Forward to/from internal port can only have 1 dest */
if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
@@ -4314,23 +3481,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- /* always set IP version for indirect table handling */
- attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
-
- if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
- action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
- /* For prio tag mode, replace vlan pop with rewrite vlan prio
- * tag rewrite.
- */
- action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
- &action, extack);
- if (err)
- return err;
- }
-
- attr->action = action;
-
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
@@ -4338,30 +3488,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- if (attr->dest_chain && decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
-
- NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
- netdev_warn(priv->netdev, "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- /* Allocate sample attribute only when there is a sample action and
- * no errors after parsing.
- */
- if (flow_flag_test(flow, SAMPLE)) {
- attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
- if (!attr->sample_attr)
- return -ENOMEM;
- *attr->sample_attr = sample_attr;
- }
-
return 0;
}
@@ -4458,7 +3584,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->priv = priv;
- attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
+ attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
if (!attr)
goto err_free;
@@ -4553,6 +3679,9 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ /* always set IP version for indirect table handling */
+ flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
@@ -4714,7 +3843,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
err_free:
flow_flag_set(flow, FAILED);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+ mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
@@ -5014,14 +4143,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = ma->common.extack;
- if (!mlx5_esw_qos_enabled(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
- return -EOPNOTSUPP;
- }
-
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
@@ -5063,7 +4186,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
u16 peer_vhca_id;
int bkt;
- if (!same_hw_devs(priv, peer_priv))
+ if (!mlx5e_same_hw_devs(priv, peer_priv))
return;
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index fdb222793027..5ffae9b13066 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -151,7 +151,6 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
-bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -247,11 +246,6 @@ int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow);
-int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
- int namespace,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
-void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
-
struct mlx5e_tc_flow;
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 792e0d6aa861..48a45aa54a3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -19,6 +19,7 @@
#include "lib/clock.h"
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
+#include "devlink.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -58,6 +59,8 @@ struct mlx5_eq_table {
struct mutex lock; /* sync async eqs creations */
int num_comp_eqs;
struct mlx5_irq_table *irq_table;
+ struct mlx5_irq **comp_irqs;
+ struct mlx5_irq *ctrl_irq;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
@@ -265,8 +268,8 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
- u16 vecidx = param->irq_index;
__be64 *pas;
+ u16 vecidx;
void *eqc;
int inlen;
u32 *in;
@@ -288,20 +291,16 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
- eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
- if (IS_ERR(eq->irq)) {
- err = PTR_ERR(eq->irq);
- goto err_buf;
- }
-
+ eq->irq = param->irq;
vecidx = mlx5_irq_get_index(eq->irq);
+
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
- goto err_irq;
+ goto err_buf;
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
@@ -345,8 +344,6 @@ err_eq:
err_in:
kvfree(in);
-err_irq:
- mlx5_irq_release(eq->irq);
err_buf:
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
@@ -400,7 +397,6 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- mlx5_irq_release(eq->irq);
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
@@ -593,11 +589,8 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
eq->irq_nb.notifier_call = mlx5_eq_async_int;
spin_lock_init(&eq->lock);
- if (!zalloc_cpumask_var(&param->affinity, GFP_KERNEL))
- return -ENOMEM;
err = create_async_eq(dev, &eq->core, param);
- free_cpumask_var(param->affinity);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
@@ -622,17 +615,38 @@ static void cleanup_async_eq(struct mlx5_core_dev *dev,
name, err);
}
+static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
+ return MLX5_NUM_ASYNC_EQE;
+}
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_param param = {};
int err;
+ /* All the async_eqs are using single IRQ, request one IRQ and share its
+ * index among all the async_eqs of this device.
+ */
+ table->ctrl_irq = mlx5_ctrl_irq_request(dev);
+ if (IS_ERR(table->ctrl_irq))
+ return PTR_ERR(table->ctrl_irq);
+
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
+ .irq = table->ctrl_irq,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -645,8 +659,8 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
- .nent = MLX5_NUM_ASYNC_EQE,
+ .irq = table->ctrl_irq,
+ .nent = async_eq_depth_devlink_param_get(dev),
};
gather_async_events_mask(dev, param.mask);
@@ -655,7 +669,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
- .irq_index = MLX5_IRQ_EQ_CTRL,
+ .irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -674,6 +688,7 @@ err2:
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
+ mlx5_ctrl_irq_release(table->ctrl_irq);
return err;
}
@@ -688,6 +703,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
+ mlx5_ctrl_irq_release(table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -715,12 +731,10 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
int err;
- if (!cpumask_available(param->affinity))
- return ERR_PTR(-EINVAL);
-
if (!eq)
return ERR_PTR(-ENOMEM);
+ param->irq = dev->priv.eq_table->ctrl_irq;
err = create_async_eq(dev, eq, param);
if (err) {
kvfree(eq);
@@ -780,6 +794,54 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
+static void comp_irqs_release(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+
+ if (mlx5_core_is_sf(dev))
+ mlx5_irq_affinity_irqs_release(dev, table->comp_irqs, table->num_comp_eqs);
+ else
+ mlx5_irqs_release_vectors(table->comp_irqs, table->num_comp_eqs);
+ kfree(table->comp_irqs);
+}
+
+static int comp_irqs_request(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ int ncomp_eqs = table->num_comp_eqs;
+ u16 *cpus;
+ int ret;
+ int i;
+
+ ncomp_eqs = table->num_comp_eqs;
+ table->comp_irqs = kcalloc(ncomp_eqs, sizeof(*table->comp_irqs), GFP_KERNEL);
+ if (!table->comp_irqs)
+ return -ENOMEM;
+ if (mlx5_core_is_sf(dev)) {
+ ret = mlx5_irq_affinity_irqs_request_auto(dev, ncomp_eqs, table->comp_irqs);
+ if (ret < 0)
+ goto free_irqs;
+ return ret;
+ }
+
+ cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
+ if (!cpus) {
+ ret = -ENOMEM;
+ goto free_irqs;
+ }
+ for (i = 0; i < ncomp_eqs; i++)
+ cpus[i] = cpumask_local_spread(i, dev->priv.numa_node);
+ ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
+ kfree(cpus);
+ if (ret < 0)
+ goto free_irqs;
+ return ret;
+
+free_irqs:
+ kfree(table->comp_irqs);
+ return ret;
+}
+
static void destroy_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
@@ -794,6 +856,22 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
tasklet_disable(&eq->tasklet_ctx.task);
kfree(eq);
}
+ comp_irqs_release(dev);
+}
+
+static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
+ return MLX5_COMP_EQ_SIZE;
}
static int create_comp_eqs(struct mlx5_core_dev *dev)
@@ -805,12 +883,13 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
int err;
int i;
+ ncomp_eqs = comp_irqs_request(dev);
+ if (ncomp_eqs < 0)
+ return ncomp_eqs;
INIT_LIST_HEAD(&table->comp_eqs_list);
- ncomp_eqs = table->num_comp_eqs;
- nent = MLX5_COMP_EQ_SIZE;
+ nent = comp_eq_depth_devlink_param_get(dev);
for (i = 0; i < ncomp_eqs; i++) {
struct mlx5_eq_param param = {};
- int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -825,18 +904,11 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
- .irq_index = vecidx,
+ .irq = table->comp_irqs[i],
.nent = nent,
};
- if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
- err = -ENOMEM;
- goto clean_eq;
- }
- cpumask_set_cpu(cpumask_local_spread(i, dev->priv.numa_node),
- param.affinity);
err = create_map_eq(dev, &eq->core, &param);
- free_cpumask_var(param.affinity);
if (err)
goto clean_eq;
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
@@ -850,7 +922,9 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
list_add_tail(&eq->list, &table->comp_eqs_list);
}
+ table->num_comp_eqs = ncomp_eqs;
return 0;
+
clean_eq:
kfree(eq);
clean:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index 425c91814b34..c275fe028b6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -14,6 +14,7 @@
#include "fs_core.h"
#include "esw/indir_table.h"
#include "lib/fs_chains.h"
+#include "en/mod_hdr.h"
#define MLX5_ESW_INDIR_TABLE_SIZE 128
#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
@@ -226,7 +227,7 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
goto err_handle;
}
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
rule->handle = handle;
rule->vni = esw_attr->rx_tun_attr->vni;
rule->mh = flow_act.modify_hdr;
@@ -243,7 +244,7 @@ err_table:
mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
err_mod_hdr_alloc:
err_mod_hdr_regc1:
- dealloc_mod_hdr_actions(&mod_acts);
+ mlx5e_mod_hdr_dealloc(&mod_acts);
err_mod_hdr_regc0:
err_ethertype:
kfree(rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index df277a6cddc0..9d17206d1625 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -431,7 +431,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int err = 0;
if (!mlx5_esw_allowed(esw))
- return -EPERM;
+ return vlan ? -EPERM : 0;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
@@ -522,9 +522,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL);
- if (!err)
- err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL);
+ err = mlx5_esw_qos_set_vport_rate(esw, evport, max_rate, min_rate);
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d377ddc70fc7..11bbcd5f5b8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -204,10 +204,8 @@ static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divid
return 0;
}
-int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 min_rate,
- struct netlink_ext_ack *extack)
+static int esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 min_rate, struct netlink_ext_ack *extack)
{
u32 fw_max_bw_share, previous_min_rate;
bool min_rate_supported;
@@ -231,10 +229,8 @@ int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
return err;
}
-int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 max_rate,
- struct netlink_ext_ack *extack)
+static int esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 max_rate, struct netlink_ext_ack *extack)
{
u32 act_max_rate = max_rate;
bool max_rate_supported;
@@ -432,16 +428,13 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
}
static struct mlx5_esw_rate_group *
-esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+__esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group;
u32 divider;
int err;
- if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
- return ERR_PTR(-EOPNOTSUPP);
-
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -482,9 +475,32 @@ err_sched_elem:
return ERR_PTR(err);
}
-static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
+static void esw_qos_put(struct mlx5_eswitch *esw);
+
+static struct mlx5_esw_rate_group *
+esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_rate_group *group;
+ int err;
+
+ if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return ERR_PTR(err);
+
+ group = __esw_qos_create_rate_group(esw, extack);
+ if (IS_ERR(group))
+ esw_qos_put(esw);
+
+ return group;
+}
+
+static int __esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_rate_group *group,
+ struct netlink_ext_ack *extack)
{
u32 divider;
int err;
@@ -503,7 +519,21 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
+
kfree(group);
+
+ return err;
+}
+
+static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_rate_group *group,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = __esw_qos_destroy_rate_group(esw, group, extack);
+ esw_qos_put(esw);
+
return err;
}
@@ -526,7 +556,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
return false;
}
-void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
+static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = esw->dev;
@@ -534,14 +564,10 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return;
+ return -EOPNOTSUPP;
if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
- return;
-
- mutex_lock(&esw->state_lock);
- if (esw->qos.enabled)
- goto unlock;
+ return -EOPNOTSUPP;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
@@ -555,75 +581,94 @@ void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
- goto unlock;
+ return err;
}
INIT_LIST_HEAD(&esw->qos.groups);
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.group0 = esw_qos_create_rate_group(esw, NULL);
+ esw->qos.group0 = __esw_qos_create_rate_group(esw, extack);
if (IS_ERR(esw->qos.group0)) {
esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
PTR_ERR(esw->qos.group0));
+ err = PTR_ERR(esw->qos.group0);
goto err_group0;
}
}
- esw->qos.enabled = true;
-unlock:
- mutex_unlock(&esw->state_lock);
- return;
+ refcount_set(&esw->qos.refcnt, 1);
+
+ return 0;
err_group0:
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix);
- if (err)
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
- mutex_unlock(&esw->state_lock);
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
+ esw->qos.root_tsar_ix))
+ esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
+
+ return err;
}
-void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw)
+static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
int err;
- devlink_rate_nodes_destroy(devlink);
- mutex_lock(&esw->state_lock);
- if (!esw->qos.enabled)
- goto unlock;
-
if (esw->qos.group0)
- esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
+ __esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
+}
- esw->qos.enabled = false;
-unlock:
- mutex_unlock(&esw->state_lock);
+static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ lockdep_assert_held(&esw->state_lock);
+
+ if (!refcount_inc_not_zero(&esw->qos.refcnt)) {
+ /* esw_qos_create() set refcount to 1 only on success.
+ * No need to decrement on failure.
+ */
+ err = esw_qos_create(esw, extack);
+ }
+
+ return err;
}
-int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share)
+static void esw_qos_put(struct mlx5_eswitch *esw)
+{
+ lockdep_assert_held(&esw->state_lock);
+ if (refcount_dec_and_test(&esw->qos.refcnt))
+ esw_qos_destroy(esw);
+}
+
+static int esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
{
int err;
lockdep_assert_held(&esw->state_lock);
- if (!esw->qos.enabled)
+ if (vport->qos.enabled)
return 0;
- if (vport->qos.enabled)
- return -EEXIST;
+ err = esw_qos_get(esw, extack);
+ if (err)
+ return err;
vport->qos.group = esw->qos.group0;
err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
- if (!err) {
- vport->qos.enabled = true;
- trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
- }
+ if (err)
+ goto err_out;
+
+ vport->qos.enabled = true;
+ trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate);
+
+ return 0;
+
+err_out:
+ esw_qos_put(esw);
return err;
}
@@ -633,7 +678,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
int err;
lockdep_assert_held(&esw->state_lock);
- if (!esw->qos.enabled || !vport->qos.enabled)
+ if (!vport->qos.enabled)
return;
WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
"Disabling QoS on port before detaching it from group");
@@ -645,8 +690,27 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
vport->vport, err);
- vport->qos.enabled = false;
+ memset(&vport->qos, 0, sizeof(vport->qos));
trace_mlx5_esw_vport_qos_destroy(vport);
+
+ esw_qos_put(esw);
+}
+
+int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ u32 min_rate, u32 max_rate)
+{
+ int err;
+
+ lockdep_assert_held(&esw->state_lock);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, NULL);
+ if (err)
+ return err;
+
+ err = esw_qos_set_vport_min_rate(esw, vport, min_rate, NULL);
+ if (!err)
+ err = esw_qos_set_vport_max_rate(esw, vport, max_rate, NULL);
+
+ return err;
}
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
@@ -654,22 +718,29 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
u32 bitmask;
+ int err;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
- if (!vport->qos.enabled)
- return -EOPNOTSUPP;
-
- MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
- bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ mutex_lock(&esw->state_lock);
+ if (!vport->qos.enabled) {
+ /* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
+ err = esw_qos_vport_enable(esw, vport, rate_mbps, vport->qos.bw_share, NULL);
+ } else {
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+
+ bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ err = mlx5_modify_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ ctx,
+ vport->qos.esw_tsar_ix,
+ bitmask);
+ }
+ mutex_unlock(&esw->state_lock);
- return mlx5_modify_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- ctx,
- vport->qos.esw_tsar_ix,
- bitmask);
+ return err;
}
#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
@@ -728,7 +799,12 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
return err;
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (err)
+ goto unlock;
+
+ err = esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -749,7 +825,12 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
return err;
mutex_lock(&esw->state_lock);
- err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (err)
+ goto unlock;
+
+ err = esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -846,7 +927,9 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
int err;
mutex_lock(&esw->state_lock);
- err = esw_qos_vport_update_group(esw, vport, group, extack);
+ err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
+ if (!err)
+ err = esw_qos_vport_update_group(esw, vport, group, extack);
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 28451abe2d2f..0141e9d52037 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -6,18 +6,8 @@
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 min_rate,
- struct netlink_ext_ack *extack);
-int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
- struct mlx5_vport *evport,
- u32 max_rate,
- struct netlink_ext_ack *extack);
-void mlx5_esw_qos_create(struct mlx5_eswitch *esw);
-void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw);
-int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 max_rate, u32 bw_share);
+int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *evport,
+ u32 max_rate, u32 min_rate);
void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 51a8cecc4a7c..458ec0bca1b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -781,9 +781,6 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (err)
return err;
- /* Attach vport to the eswitch rate limiter */
- mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
-
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
@@ -1260,8 +1257,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- mlx5_esw_qos_create(esw);
-
esw->mode = mode;
if (mode == MLX5_ESWITCH_LEGACY) {
@@ -1290,7 +1285,6 @@ abort:
if (mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
- mlx5_esw_qos_destroy(esw);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1338,6 +1332,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
int old_mode;
lockdep_assert_held_write(&esw->mode_lock);
@@ -1367,7 +1362,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
if (old_mode == MLX5_ESWITCH_OFFLOADS)
mlx5_rescan_drivers(esw->dev);
- mlx5_esw_qos_destroy(esw);
+ devlink_rate_nodes_destroy(devlink);
+
mlx5_esw_acls_ns_cleanup(esw);
if (clear_vf)
@@ -1576,6 +1572,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
lockdep_register_key(&esw->mode_lock_key);
init_rwsem(&esw->mode_lock);
lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key);
+ refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1614,6 +1611,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
+ WARN_ON(refcount_read(&esw->qos.refcnt));
lockdep_unregister_key(&esw->mode_lock_key);
mutex_destroy(&esw->state_lock);
WARN_ON(!xa_empty(&esw->offloads.vhca_map));
@@ -1703,82 +1701,6 @@ bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
}
-static bool
-is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
-{
- return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num) ||
- mlx5_esw_is_sf_vport(esw, vport_num);
-}
-
-int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
- u8 *hw_addr, int *hw_addr_len,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw))
- return PTR_ERR(esw);
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num))
- return -EOPNOTSUPP;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
-
- mutex_lock(&esw->state_lock);
- if (vport->enabled) {
- ether_addr_copy(hw_addr, vport->info.mac);
- *hw_addr_len = ETH_ALEN;
- err = 0;
- }
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw;
- struct mlx5_vport *vport;
- int err = -EOPNOTSUPP;
- u16 vport_num;
-
- esw = mlx5_devlink_eswitch_get(port->devlink);
- if (IS_ERR(esw)) {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
- return PTR_ERR(esw);
- }
-
- vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
- if (!is_port_function_supported(esw, vport_num)) {
- NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
- return -EINVAL;
- }
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport)) {
- NL_SET_ERR_MSG_MOD(extack, "Invalid port");
- return PTR_ERR(vport);
- }
-
- mutex_lock(&esw->state_lock);
- if (vport->enabled)
- err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
- else
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
@@ -1835,8 +1757,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
- ivi->min_tx_rate = evport->qos.min_rate;
- ivi->max_tx_rate = evport->qos.max_rate;
+ if (evport->qos.enabled) {
+ ivi->min_tx_rate = evport->qos.min_rate;
+ ivi->max_tx_rate = evport->qos.max_rate;
+ }
mutex_unlock(&esw->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 42f8ee2e5d9f..ead5e8acc8be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -308,10 +308,14 @@ struct mlx5_eswitch {
atomic64_t user_count;
struct {
- bool enabled;
u32 root_tsar_ix;
struct mlx5_esw_rate_group *group0;
struct list_head groups; /* Protected by esw->state_lock */
+
+ /* Protected by esw->state_lock.
+ * Initially 0, meaning no QoS users and QoS is disabled.
+ */
+ refcount_t refcnt;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
@@ -339,9 +343,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
-bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
-int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
-
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
@@ -516,11 +517,6 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
-static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
-{
- return esw->qos.enabled;
-}
-
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
u8 vlan_depth)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 32bc08a39925..9a7b25692505 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -295,26 +295,28 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- int j, err;
+ int err;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
- for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
- err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
- if (err)
- goto err_setup_chain;
+ /* flow steering cannot handle more than one dest with the same ft
+ * in a single flow
+ */
+ if (esw_attr->out_count - esw_attr->split_count > 1)
+ return -EOPNOTSUPP;
- if (esw_attr->dests[j].pkt_reformat) {
- flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
- }
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
+ if (err)
+ return err;
+
+ if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
}
- return 0;
+ (*i)++;
-err_setup_chain:
- esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
- return err;
+ return 0;
}
static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
@@ -3867,3 +3869,62 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
return vport->metadata;
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
+
+static bool
+is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num == MLX5_VPORT_PF ||
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
+}
+
+int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num))
+ return -EOPNOTSUPP;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ ether_addr_copy(hw_addr, vport->info.mac);
+ *hw_addr_len = ETH_ALEN;
+ mutex_unlock(&esw->state_lock);
+ return 0;
+}
+
+int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw)) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
+ return PTR_ERR(esw);
+ }
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num)) {
+ NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
+ return -EINVAL;
+ }
+
+ return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 750b21124a1a..dafe341358c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -451,7 +451,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
@@ -788,7 +789,8 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
int err;
u32 *in;
- if (namespace == MLX5_FLOW_NAMESPACE_FDB)
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
+ namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
@@ -860,6 +862,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
switch (namespace) {
case MLX5_FLOW_NAMESPACE_FDB:
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 386ab9a2d490..b628917e38e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1525,7 +1525,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2)
{
if (d1->type == d2->type) {
- if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
d1->vport.num == d2->vport.num &&
d1->vport.flags == d2->vport.flags &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
@@ -2206,6 +2207,22 @@ struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
+static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_LAG:
+ case MLX5_FLOW_NAMESPACE_OFFLOADS:
+ case MLX5_FLOW_NAMESPACE_ETHTOOL:
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ case MLX5_FLOW_NAMESPACE_LEFTOVERS:
+ case MLX5_FLOW_NAMESPACE_ANCHOR:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
@@ -2235,31 +2252,39 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
- default:
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ root_ns = steering->fdb_root_ns;
+ prio = FDB_BYPASS_PATH;
break;
- }
-
- if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
- type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
root_ns = steering->egress_root_ns;
prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_BYPASS_PRIO;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_KERNEL_PRIO;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_COUNTERS_PRIO;
- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
root_ns = steering->rdma_tx_root_ns;
prio = RDMA_TX_COUNTERS_PRIO;
- } else { /* Must be NIC RX */
+ break;
+ default: /* Must be NIC RX */
+ WARN_ON(!is_nic_rx_ns(type));
root_ns = steering->root_ns;
prio = type;
+ break;
}
if (!root_ns)
@@ -2822,6 +2847,28 @@ static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
return 0;
}
+static int create_fdb_bypass(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_flow_namespace *ns;
+ struct fs_prio *prio;
+ int i;
+
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+
+ ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
+ prio = fs_create_prio(ns, i, 1);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *maj_prio;
@@ -2831,12 +2878,10 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_root_ns)
return -ENOMEM;
- maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
- 1);
- if (IS_ERR(maj_prio)) {
- err = PTR_ERR(maj_prio);
+ err = create_fdb_bypass(steering);
+ if (err)
goto out_err;
- }
+
err = create_fdb_fast_path(steering);
if (err)
goto out_err;
@@ -3038,6 +3083,11 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
+ if (mlx5_fs_dr_is_supported(dev))
+ steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
sizeof(struct mlx5_flow_group), 0,
0, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 7711db245c63..5469b08d635f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -203,7 +203,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 7e0e04cf26f8..b406e0367af6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -38,9 +38,10 @@
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
-#define MLX5_SF_NUM_COUNTERS_BULK 8
+#define MLX5_INIT_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
@@ -145,13 +146,15 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
+static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
- int num_counters_bulk = mlx5_core_is_sf(dev) ?
- MLX5_SF_NUM_COUNTERS_BULK :
- MLX5_SW_MAX_COUNTERS_BULK;
+ return min_t(int, MLX5_INIT_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- return min_t(int, num_counters_bulk,
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
+{
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
@@ -177,7 +180,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
bool query_more_counters = (first->id <= last_id);
- int max_bulk_len = get_max_bulk_query_len(dev);
+ int cur_bulk_len = fc_stats->bulk_query_len;
u32 *data = fc_stats->bulk_query_out;
struct mlx5_fc *counter = first;
u32 bulk_base_id;
@@ -189,7 +192,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
bulk_base_id = counter->id & ~0x3;
/* number of counters to query inc. the last counter */
- bulk_len = min_t(int, max_bulk_len,
+ bulk_len = min_t(int, cur_bulk_len,
ALIGN(last_id - bulk_base_id + 1, 4));
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
@@ -230,6 +233,41 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
+static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ unsigned long now = jiffies;
+ u32 *bulk_query_out_tmp;
+ int max_out_len;
+
+ if (fc_stats->bulk_query_alloc_failed &&
+ time_before(now, fc_stats->next_bulk_query_alloc))
+ return;
+
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
+ if (!bulk_query_out_tmp) {
+ mlx5_core_warn_once(dev,
+ "Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
+ max_bulk_len);
+ fc_stats->bulk_query_alloc_failed = true;
+ fc_stats->next_bulk_query_alloc =
+ now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
+ return;
+ }
+
+ kfree(fc_stats->bulk_query_out);
+ fc_stats->bulk_query_out = bulk_query_out_tmp;
+ fc_stats->bulk_query_len = max_bulk_len;
+ if (fc_stats->bulk_query_alloc_failed) {
+ mlx5_core_info(dev,
+ "Flow counters bulk query buffer size increased, bulk_size(%d)\n",
+ max_bulk_len);
+ fc_stats->bulk_query_alloc_failed = false;
+ }
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
@@ -247,15 +285,22 @@ static void mlx5_fc_stats_work(struct work_struct *work)
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
- llist_for_each_entry(counter, addlist, addlist)
+ llist_for_each_entry(counter, addlist, addlist) {
mlx5_fc_stats_insert(dev, counter);
+ fc_stats->num_counters++;
+ }
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
mlx5_fc_release(dev, counter);
+ fc_stats->num_counters--;
}
+ if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
+ fc_stats->num_counters > get_init_bulk_query_len(dev))
+ mlx5_fc_stats_bulk_query_size_increase(dev);
+
if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return;
@@ -378,8 +423,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- int max_bulk_len;
- int max_out_len;
+ int init_bulk_len;
+ int init_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -387,11 +432,12 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
- max_bulk_len = get_max_bulk_query_len(dev);
- max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
- fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ init_bulk_len = get_init_bulk_query_len(dev);
+ init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
+ fc_stats->bulk_query_len = init_bulk_len;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 3ca998874c50..737df402c927 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -420,6 +420,11 @@ static void print_health_info(struct mlx5_core_dev *dev)
if (!ioread8(&h->synd))
return;
+ if (ioread32be(&h->fw_ver) == 0xFFFFFFFF) {
+ mlx5_log(dev, LOGLEVEL_ERR, "PCI slot is unavailable\n");
+ return;
+ }
+
rfr_severity = ioread8(&h->rfr_severity);
severity = mlx5_health_get_severity(rfr_severity);
mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 962d41418ce7..f4f7eaf16446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -67,7 +67,9 @@ static void mlx5i_get_ethtool_stats(struct net_device *dev,
}
static int mlx5i_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -75,7 +77,9 @@ static int mlx5i_set_ringparam(struct net_device *dev,
}
static void mlx5i_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -105,7 +109,7 @@ static int mlx5i_set_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_set_coalesce(priv, coal);
+ return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static int mlx5i_get_coalesce(struct net_device *netdev,
@@ -115,7 +119,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- return mlx5e_ethtool_get_coalesce(priv, coal);
+ return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
static int mlx5i_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index ea1efdecc88c..0a99a020a3b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -110,14 +110,14 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- struct mlx5e_sw_stats s = { 0 };
+ struct rtnl_link_stats64 s = {};
int i, j;
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
- channel_stats = &priv->channel_stats[i];
+ channel_stats = priv->channel_stats[i];
rq_stats = &channel_stats->rq;
s.rx_packets += rq_stats->packets;
@@ -128,11 +128,17 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
s.tx_packets += sq_stats->packets;
s.tx_bytes += sq_stats->bytes;
- s.tx_queue_dropped += sq_stats->dropped;
+ s.tx_dropped += sq_stats->dropped;
}
}
- memcpy(&priv->stats.sw, &s, sizeof(s));
+ memset(&priv->stats.sw, 0, sizeof(s));
+
+ priv->stats.sw.rx_packets = s.rx_packets;
+ priv->stats.sw.rx_bytes = s.rx_bytes;
+ priv->stats.sw.tx_packets = s.tx_packets;
+ priv->stats.sw.tx_bytes = s.tx_bytes;
+ priv->stats.sw.tx_queue_dropped = s.tx_dropped;
}
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
@@ -443,7 +449,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
- .rx_ptp_support = false,
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 5308f23702bc..0b86e78dbc0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -350,7 +350,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
- .rx_ptp_support = false,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
new file mode 100644
index 000000000000..380a208ab137
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "mlx5_irq.h"
+#include "pci_irq.h"
+
+static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
+{
+ pool->irqs_per_cpu[cpu]--;
+}
+
+static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
+{
+ pool->irqs_per_cpu[cpu]++;
+}
+
+/* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */
+static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
+ const struct cpumask *req_mask)
+{
+ int best_cpu = -1;
+ int cpu;
+
+ for_each_cpu_and(cpu, req_mask, cpu_online_mask) {
+ /* CPU has zero IRQs on it. No need to search any more CPUs. */
+ if (!pool->irqs_per_cpu[cpu]) {
+ best_cpu = cpu;
+ break;
+ }
+ if (best_cpu < 0)
+ best_cpu = cpu;
+ if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu])
+ best_cpu = cpu;
+ }
+ if (best_cpu == -1) {
+ /* There isn't online CPUs in req_mask */
+ mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n",
+ cpumask_pr_args(req_mask));
+ best_cpu = cpumask_first(cpu_online_mask);
+ }
+ pool->irqs_per_cpu[best_cpu]++;
+ return best_cpu;
+}
+
+/* Creating an IRQ from irq_pool */
+static struct mlx5_irq *
+irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ cpumask_var_t auto_mask;
+ struct mlx5_irq *irq;
+ u32 irq_index;
+ int err;
+
+ if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+ err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
+ if (err)
+ return ERR_PTR(err);
+ if (pool->irqs_per_cpu) {
+ if (cpumask_weight(req_mask) > 1)
+ /* if req_mask contain more then one CPU, set the least loadad CPU
+ * of req_mask
+ */
+ cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), auto_mask);
+ else
+ cpu_get(pool, cpumask_first(req_mask));
+ }
+ irq = mlx5_irq_alloc(pool, irq_index, cpumask_empty(auto_mask) ? req_mask : auto_mask);
+ free_cpumask_var(auto_mask);
+ return irq;
+}
+
+/* Looking for the IRQ with the smallest refcount that fits req_mask.
+ * If pool is sf_comp_pool, then we are looking for an IRQ with any of the
+ * requested CPUs in req_mask.
+ * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask
+ * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask,
+ * we don't skip it.
+ * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will
+ * fit. And since mask is subset of itself, we will pass the first if bellow.
+ */
+static struct mlx5_irq *
+irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ int start = pool->xa_num_irqs.min;
+ int end = pool->xa_num_irqs.max;
+ struct mlx5_irq *irq = NULL;
+ struct mlx5_irq *iter;
+ int irq_refcount = 0;
+ unsigned long index;
+
+ lockdep_assert_held(&pool->lock);
+ xa_for_each_range(&pool->irqs, index, iter, start, end) {
+ struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter);
+ int iter_refcount = mlx5_irq_read_locked(iter);
+
+ if (!cpumask_subset(iter_mask, req_mask))
+ /* skip IRQs with a mask which is not subset of req_mask */
+ continue;
+ if (iter_refcount < pool->min_threshold)
+ /* If we found an IRQ with less than min_thres, return it */
+ return iter;
+ if (!irq || iter_refcount < irq_refcount) {
+ /* In case we won't find an IRQ with less than min_thres,
+ * keep a pointer to the least used IRQ
+ */
+ irq_refcount = iter_refcount;
+ irq = iter;
+ }
+ }
+ return irq;
+}
+
+/**
+ * mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @pool: IRQ pool to request from.
+ * @req_mask: cpumask requested for this IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
+ */
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ struct mlx5_irq *least_loaded_irq, *new_irq;
+
+ mutex_lock(&pool->lock);
+ least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask);
+ if (least_loaded_irq &&
+ mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
+ goto out;
+ /* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
+ new_irq = irq_pool_request_irq(pool, req_mask);
+ if (IS_ERR(new_irq)) {
+ if (!least_loaded_irq) {
+ /* We failed to create an IRQ and we didn't find an IRQ */
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
+ PTR_ERR(new_irq));
+ mutex_unlock(&pool->lock);
+ return new_irq;
+ }
+ /* We failed to create a new IRQ for the requested affinity,
+ * sharing existing IRQ.
+ */
+ goto out;
+ }
+ least_loaded_irq = new_irq;
+ goto unlock;
+out:
+ mlx5_irq_get_locked(least_loaded_irq);
+ if (mlx5_irq_read_locked(least_loaded_irq) > pool->max_threshold)
+ mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
+ pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(least_loaded_irq)), pool->name,
+ mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
+unlock:
+ mutex_unlock(&pool->lock);
+ return least_loaded_irq;
+}
+
+void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
+ int num_irqs)
+{
+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ int i;
+
+ for (i = 0; i < num_irqs; i++) {
+ int cpu = cpumask_first(mlx5_irq_get_affinity_mask(irqs[i]));
+
+ synchronize_irq(pci_irq_vector(pool->dev->pdev,
+ mlx5_irq_get_index(irqs[i])));
+ if (mlx5_irq_put(irqs[i]))
+ if (pool->irqs_per_cpu)
+ cpu_put(pool, cpu);
+ }
+}
+
+/**
+ * mlx5_irq_affinity_irqs_request_auto - request one or more IRQs for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQs.
+ * @nirqs: number of IRQs to request.
+ * @irqs: an output array of IRQs pointers.
+ *
+ * Each IRQ is bounded to at most 1 CPU.
+ * This function is requesting IRQs according to the default assignment.
+ * The default assignment policy is:
+ * - in each iteration, request the least loaded IRQ which is not bound to any
+ * CPU of the previous IRQs requested.
+ *
+ * This function returns the number of IRQs requested, (which might be smaller than
+ * @nirqs), if successful, or a negative error code in case of an error.
+ */
+int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
+ struct mlx5_irq **irqs)
+{
+ struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+ int i = 0;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_copy(req_mask, cpu_online_mask);
+ for (i = 0; i < nirqs; i++) {
+ if (mlx5_irq_pool_is_sf_pool(pool))
+ irq = mlx5_irq_affinity_request(pool, req_mask);
+ else
+ /* In case SF pool doesn't exists, fallback to the PF IRQs.
+ * The PF IRQs are already allocated and binded to CPU
+ * at this point. Hence, only an index is needed.
+ */
+ irq = mlx5_irq_request(dev, i, NULL);
+ if (IS_ERR(irq))
+ break;
+ irqs[i] = irq;
+ cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), req_mask);
+ mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
+ cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
+ mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+ }
+ free_cpumask_var(req_mask);
+ if (!i)
+ return PTR_ERR(irq);
+ return i;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index bf4d3cbefa63..1ca01a5b6cdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -268,10 +268,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
fi = fen_info->fi;
- if (fi->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
- return notifier_from_errno(-EINVAL);
- }
+ if (fi->nh)
+ return NOTIFY_DONE;
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 97e5845b4cfd..d5e47630e284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7df9c7f8d9c8..2c774f367199 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -98,6 +98,8 @@ enum {
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
};
+#define LOG_MAX_SUPPORTED_QPS 0xff
+
static struct mlx5_profile profile[] = {
[0] = {
.mask = 0,
@@ -109,7 +111,7 @@ static struct mlx5_profile profile[] = {
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
- .log_max_qp = 18,
+ .log_max_qp = LOG_MAX_SUPPORTED_QPS,
.mr_cache[0] = {
.size = 500,
.limit = 250
@@ -484,10 +486,26 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
}
+static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &val);
+ if (!err)
+ return val.vu32;
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return err;
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
+ int max_uc_list;
int err;
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
@@ -507,7 +525,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
+ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
@@ -561,6 +581,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN(dev, roce_rw_supported))
MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ max_uc_list = max_uc_list_get_devlink_param(dev);
+ if (max_uc_list > 0)
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
+ ilog2(max_uc_list));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -1604,12 +1629,28 @@ static void remove_one(struct pci_dev *pdev)
mlx5_devlink_free(devlink);
}
+#define mlx5_pci_trace(dev, fmt, ...) ({ \
+ struct mlx5_core_dev *__dev = (dev); \
+ mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
+ __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
+ __dev->pci_status, ##__VA_ARGS__); \
+})
+
+static const char *result2str(enum pci_ers_result result)
+{
+ return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
+ result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
+ result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
+ "unknown";
+}
+
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ enum pci_ers_result res;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
@@ -1617,8 +1658,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
- return state == pci_channel_io_perm_failure ?
+ res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+
+ mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
+ return res;
}
/* wait for the device to show vital signs by waiting
@@ -1652,28 +1696,34 @@ static int wait_vital(struct pci_dev *pdev)
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
+ enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter\n");
err = mlx5_pci_enable_device(dev);
if (err) {
mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
__func__, err);
- return PCI_ERS_RESULT_DISCONNECT;
+ goto out;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (wait_vital(pdev)) {
- mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
- return PCI_ERS_RESULT_DISCONNECT;
+ err = wait_vital(pdev);
+ if (err) {
+ mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
+ __func__, err);
+ goto out;
}
- return PCI_ERS_RESULT_RECOVERED;
+ res = PCI_ERS_RESULT_RECOVERED;
+out:
+ mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
+ return res;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1681,14 +1731,12 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_core_info(dev, "%s was called\n", __func__);
+ mlx5_pci_trace(dev, "Enter, loading driver..\n");
err = mlx5_load_one(dev);
- if (err)
- mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
- __func__, err);
- else
- mlx5_core_info(dev, "%s: device recovered\n", __func__);
+
+ mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
+ !err ? "recovered" : "Failed");
}
static const struct pci_error_handlers mlx5_err_handler = {
@@ -1809,12 +1857,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
int mlx5_recover_device(struct mlx5_core_dev *dev)
{
- int ret = -EIO;
+ if (!mlx5_core_is_sf(dev)) {
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
+ return -EIO;
+ }
- mlx5_pci_disable_device(dev);
- if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
- ret = mlx5_load_one(dev);
- return ret;
+ return mlx5_load_one(dev);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index bb677329ea08..6f8baa0f2a73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -305,5 +305,6 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
bool mlx5_eth_supported(struct mlx5_core_dev *dev);
bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
+bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 8116815663a7..23cb63fa4588 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -22,12 +22,40 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int msix_vec_count);
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
+void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct cpumask *affinity);
-void mlx5_irq_release(struct mlx5_irq *irq);
+int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
+ struct mlx5_irq **irqs);
+void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
+struct mlx5_irq_pool;
+#ifdef CONFIG_MLX5_SF
+int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
+ struct mlx5_irq **irqs);
+struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
+ const struct cpumask *req_mask);
+void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
+ int num_irqs);
+#else
+static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
+ struct mlx5_irq **irqs)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev,
+ struct mlx5_irq **irqs, int num_irqs) {}
+#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 830444f927d4..41807ef55201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -7,15 +7,12 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
+#include "pci_irq.h"
#include "lib/sf.h"
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
-#define MLX5_MAX_IRQ_NAME (32)
-/* max irq_index is 2047, so four chars */
-#define MLX5_MAX_IRQ_IDX_CHARS (4)
-
#define MLX5_SFS_PER_CTRL_IRQ 64
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
@@ -25,7 +22,6 @@
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
#define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
#define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
-#define MLX5_EQ_REFS_PER_IRQ (2)
struct mlx5_irq {
struct atomic_notifier_head nh;
@@ -37,16 +33,6 @@ struct mlx5_irq {
int irqn;
};
-struct mlx5_irq_pool {
- char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
- struct xa_limit xa_num_irqs;
- struct mutex lock; /* sync IRQs creations */
- struct xarray irqs;
- u32 max_threshold;
- u32 min_threshold;
- struct mlx5_core_dev *dev;
-};
-
struct mlx5_irq_table {
struct mlx5_irq_pool *pf_pool;
struct mlx5_irq_pool *sf_ctrl_pool;
@@ -143,28 +129,38 @@ static void irq_release(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
- /* free_irq requires that affinity and rmap will be cleared
+ /* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
- irq_set_affinity_hint(irq->irqn, NULL);
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh);
kfree(irq);
}
-static void irq_put(struct mlx5_irq *irq)
+int mlx5_irq_put(struct mlx5_irq *irq)
{
struct mlx5_irq_pool *pool = irq->pool;
+ int ret = 0;
mutex_lock(&pool->lock);
irq->refcount--;
- if (!irq->refcount)
+ if (!irq->refcount) {
irq_release(irq);
+ ret = 1;
+ }
mutex_unlock(&pool->lock);
+ return ret;
+}
+
+int mlx5_irq_read_locked(struct mlx5_irq *irq)
+{
+ lockdep_assert_held(&irq->pool->lock);
+ return irq->refcount;
}
-static int irq_get_locked(struct mlx5_irq *irq)
+int mlx5_irq_get_locked(struct mlx5_irq *irq)
{
lockdep_assert_held(&irq->pool->lock);
if (WARN_ON_ONCE(!irq->refcount))
@@ -178,7 +174,7 @@ static int irq_get(struct mlx5_irq *irq)
int err;
mutex_lock(&irq->pool->lock);
- err = irq_get_locked(irq);
+ err = mlx5_irq_get_locked(irq);
mutex_unlock(&irq->pool->lock);
return err;
}
@@ -210,12 +206,8 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
}
-static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
-{
- return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
-}
-
-static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
+struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ const struct cpumask *affinity)
{
struct mlx5_core_dev *dev = pool->dev;
char name[MLX5_MAX_IRQ_NAME];
@@ -226,7 +218,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
- if (!irq_pool_is_sf_pool(pool))
+ if (!mlx5_irq_pool_is_sf_pool(pool))
irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
@@ -244,6 +236,10 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
err = -ENOMEM;
goto err_cpumask;
}
+ if (affinity) {
+ cpumask_copy(irq->mask, affinity);
+ irq_set_affinity_and_hint(irq->irqn, irq->mask);
+ }
irq->pool = pool;
irq->refcount = 1;
irq->index = i;
@@ -255,6 +251,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
}
return irq;
err_xa:
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
err_cpumask:
free_irq(irq->irqn, &irq->nh);
@@ -275,7 +272,7 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
return -ENOENT;
ret = atomic_notifier_chain_register(&irq->nh, nb);
if (ret)
- irq_put(irq);
+ mlx5_irq_put(irq);
return ret;
}
@@ -284,7 +281,7 @@ int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
int err = 0;
err = atomic_notifier_chain_unregister(&irq->nh, nb);
- irq_put(irq);
+ mlx5_irq_put(irq);
return err;
}
@@ -300,131 +297,121 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
/* irq_pool API */
-/* creating an irq from irq_pool */
-static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
- struct cpumask *affinity)
+/* requesting an irq from a given pool according to given index */
+static struct mlx5_irq *
+irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
+ struct cpumask *affinity)
{
struct mlx5_irq *irq;
- u32 irq_index;
- int err;
- err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
- GFP_KERNEL);
- if (err)
- return ERR_PTR(err);
- irq = irq_request(pool, irq_index);
- if (IS_ERR(irq))
- return irq;
- cpumask_copy(irq->mask, affinity);
- irq_set_affinity_hint(irq->irqn, irq->mask);
+ mutex_lock(&pool->lock);
+ irq = xa_load(&pool->irqs, vecidx);
+ if (irq) {
+ mlx5_irq_get_locked(irq);
+ goto unlock;
+ }
+ irq = mlx5_irq_alloc(pool, vecidx, affinity);
+unlock:
+ mutex_unlock(&pool->lock);
return irq;
}
-/* looking for the irq with the smallest refcount and the same affinity */
-static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
- struct cpumask *affinity)
+static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_table)
{
- int start = pool->xa_num_irqs.min;
- int end = pool->xa_num_irqs.max;
- struct mlx5_irq *irq = NULL;
- struct mlx5_irq *iter;
- unsigned long index;
+ return irq_table->sf_ctrl_pool;
+}
- lockdep_assert_held(&pool->lock);
- xa_for_each_range(&pool->irqs, index, iter, start, end) {
- if (!cpumask_equal(iter->mask, affinity))
- continue;
- if (iter->refcount < pool->min_threshold)
- return iter;
- if (!irq || iter->refcount < irq->refcount)
- irq = iter;
- }
- return irq;
+static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+{
+ return irq_table->sf_comp_pool;
}
-/* requesting an irq from a given pool according to given affinity */
-static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
- struct cpumask *affinity)
+struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
{
- struct mlx5_irq *least_loaded_irq, *new_irq;
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = NULL;
- mutex_lock(&pool->lock);
- least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
- if (least_loaded_irq &&
- least_loaded_irq->refcount < pool->min_threshold)
- goto out;
- new_irq = irq_pool_create_irq(pool, affinity);
- if (IS_ERR(new_irq)) {
- if (!least_loaded_irq) {
- mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
- cpumask_first(affinity));
- mutex_unlock(&pool->lock);
- return new_irq;
- }
- /* We failed to create a new IRQ for the requested affinity,
- * sharing existing IRQ.
- */
- goto out;
- }
- least_loaded_irq = new_irq;
- goto unlock;
-out:
- irq_get_locked(least_loaded_irq);
- if (least_loaded_irq->refcount > pool->max_threshold)
- mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
- least_loaded_irq->irqn, pool->name,
- least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
-unlock:
- mutex_unlock(&pool->lock);
- return least_loaded_irq;
+ if (mlx5_core_is_sf(dev))
+ pool = sf_irq_pool_get(irq_table);
+
+ /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+ * the PF IRQs pool in case the SF pool doesn't exist.
+ */
+ return pool ? pool : irq_table->pf_pool;
}
-/* requesting an irq from a given pool according to given index */
-static struct mlx5_irq *
-irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
- struct cpumask *affinity)
+static struct mlx5_irq_pool *ctrl_irq_pool_get(struct mlx5_core_dev *dev)
{
- struct mlx5_irq *irq;
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool = NULL;
- mutex_lock(&pool->lock);
- irq = xa_load(&pool->irqs, vecidx);
- if (irq) {
- irq_get_locked(irq);
- goto unlock;
+ if (mlx5_core_is_sf(dev))
+ pool = sf_ctrl_irq_pool_get(irq_table);
+
+ /* In some configs, there won't be a pool of SFs IRQs. Hence, returning
+ * the PF IRQs pool in case the SF pool doesn't exist.
+ */
+ return pool ? pool : irq_table->pf_pool;
+}
+
+/**
+ * mlx5_irqs_release - release one or more IRQs back to the system.
+ * @irqs: IRQs to be released.
+ * @nirqs: number of IRQs to be released.
+ */
+static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
+{
+ int i;
+
+ for (i = 0; i < nirqs; i++) {
+ synchronize_irq(irqs[i]->irqn);
+ mlx5_irq_put(irqs[i]);
}
- irq = irq_request(pool, vecidx);
- if (IS_ERR(irq) || !affinity)
- goto unlock;
- cpumask_copy(irq->mask, affinity);
- if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
- cpumask_empty(irq->mask))
- cpumask_set_cpu(0, irq->mask);
- irq_set_affinity_hint(irq->irqn, irq->mask);
-unlock:
- mutex_unlock(&pool->lock);
- return irq;
}
-static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
- int i, struct cpumask *affinity)
+/**
+ * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @ctrl_irq: ctrl IRQ to be released.
+ */
+void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
{
- if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
- return irq_table->sf_ctrl_pool;
- return irq_table->sf_comp_pool;
+ mlx5_irqs_release(&ctrl_irq, 1);
}
/**
- * mlx5_irq_release - release an IRQ back to the system.
- * @irq: irq to be released.
+ * mlx5_ctrl_irq_request - request a ctrl IRQ for mlx5 device.
+ * @dev: mlx5 device that requesting the IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
-void mlx5_irq_release(struct mlx5_irq *irq)
+struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
{
- synchronize_irq(irq->irqn);
- irq_put(irq);
+ struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+ cpumask_copy(req_mask, cpu_online_mask);
+ if (!mlx5_irq_pool_is_sf_pool(pool)) {
+ /* In case we are allocating a control IRQ for PF/VF */
+ if (!pool->xa_num_irqs.max) {
+ cpumask_clear(req_mask);
+ /* In case we only have a single IRQ for PF/VF */
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
+ }
+ /* Allocate the IRQ in the last index of the pool */
+ irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
+ } else {
+ irq = mlx5_irq_affinity_request(pool, req_mask);
+ }
+
+ free_cpumask_var(req_mask);
+ return irq;
}
/**
- * mlx5_irq_request - request an IRQ for mlx5 device.
+ * mlx5_irq_request - request an IRQ for mlx5 PF/VF device.
* @dev: mlx5 device that requesting the IRQ.
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
* provided.
@@ -439,23 +426,8 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct mlx5_irq_pool *pool;
struct mlx5_irq *irq;
- if (mlx5_core_is_sf(dev)) {
- pool = find_sf_irq_pool(irq_table, vecidx, affinity);
- if (!pool)
- /* we don't have IRQs for SFs, using the PF IRQs */
- goto pf_irq;
- if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp"))
- /* In case an SF user request IRQ with vecidx */
- irq = irq_pool_request_vector(pool, vecidx, NULL);
- else
- irq = irq_pool_request_affinity(pool, affinity);
- goto out;
- }
-pf_irq:
pool = irq_table->pf_pool;
- vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
-out:
if (IS_ERR(irq))
return irq;
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -464,6 +436,51 @@ out:
return irq;
}
+/**
+ * mlx5_irqs_release_vectors - release one or more IRQs back to the system.
+ * @irqs: IRQs to be released.
+ * @nirqs: number of IRQs to be released.
+ */
+void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
+{
+ mlx5_irqs_release(irqs, nirqs);
+}
+
+/**
+ * mlx5_irqs_request_vectors - request one or more IRQs for mlx5 device.
+ * @dev: mlx5 device that is requesting the IRQs.
+ * @cpus: CPUs array for binding the IRQs
+ * @nirqs: number of IRQs to request.
+ * @irqs: an output array of IRQs pointers.
+ *
+ * Each IRQ is bound to at most 1 CPU.
+ * This function is requests nirqs IRQs, starting from @vecidx.
+ *
+ * This function returns the number of IRQs requested, (which might be smaller than
+ * @nirqs), if successful, or a negative error code in case of an error.
+ */
+int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
+ struct mlx5_irq **irqs)
+{
+ cpumask_var_t req_mask;
+ struct mlx5_irq *irq;
+ int i;
+
+ if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
+ return -ENOMEM;
+ for (i = 0; i < nirqs; i++) {
+ cpumask_set_cpu(cpus[i], req_mask);
+ irq = mlx5_irq_request(dev, i, req_mask);
+ if (IS_ERR(irq))
+ break;
+ cpumask_clear(req_mask);
+ irqs[i] = irq;
+ }
+
+ free_cpumask_var(req_mask);
+ return i ? i : PTR_ERR(irq);
+}
+
static struct mlx5_irq_pool *
irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
u32 min_threshold, u32 max_threshold)
@@ -479,7 +496,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->xa_num_irqs.max = start + size - 1;
if (name)
snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
- name);
+ "%s", name);
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
@@ -500,6 +517,7 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
irq_release(irq);
xa_destroy(&pool->irqs);
mutex_destroy(&pool->lock);
+ kfree(pool->irqs_per_cpu);
kvfree(pool);
}
@@ -547,7 +565,17 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
err = PTR_ERR(table->sf_comp_pool);
goto err_sf_ctrl;
}
+
+ table->sf_comp_pool->irqs_per_cpu = kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL);
+ if (!table->sf_comp_pool->irqs_per_cpu) {
+ err = -ENOMEM;
+ goto err_irqs_per_cpu;
+ }
+
return 0;
+
+err_irqs_per_cpu:
+ irq_pool_free(table->sf_comp_pool);
err_sf_ctrl:
irq_pool_free(table->sf_ctrl_pool);
err_pf:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
new file mode 100644
index 000000000000..5c7e68bee43a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __PCI_IRQ_H__
+#define __PCI_IRQ_H__
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5_MAX_IRQ_NAME (32)
+/* max irq_index is 2047, so four chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (4)
+#define MLX5_EQ_REFS_PER_IRQ (2)
+
+struct mlx5_irq;
+
+struct mlx5_irq_pool {
+ char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
+ struct xa_limit xa_num_irqs;
+ struct mutex lock; /* sync IRQs creations */
+ struct xarray irqs;
+ u32 max_threshold;
+ u32 min_threshold;
+ u16 *irqs_per_cpu;
+ struct mlx5_core_dev *dev;
+};
+
+struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
+static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+{
+ return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
+}
+
+struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
+ const struct cpumask *affinity);
+int mlx5_irq_get_locked(struct mlx5_irq *irq);
+int mlx5_irq_read_locked(struct mlx5_irq *irq);
+int mlx5_irq_put(struct mlx5_irq *irq);
+
+#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index f37db7cc32a6..7da012ff0d41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -30,10 +30,7 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
- if (!mlx5_sf_dev_supported(dev))
- return false;
-
- return !xa_empty(&table->devices);
+ return table && !xa_empty(&table->devices);
}
static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 252d6017387d..17aa348989cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -247,7 +247,7 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
u16 max_ext_fn = 0;
- u16 ext_base_id;
+ u16 ext_base_id = 0;
u16 max_fn = 0;
u16 base_id;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 07936841ce99..c61a5e83c78c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1560,6 +1560,12 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
}
+static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
+{
+ return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
+ !MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
+}
+
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 max_hw_actions,
u32 num_sw_actions,
@@ -1591,8 +1597,13 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
if (ret)
return ret;
- if (!(*modify_ttl))
- *modify_ttl = dr_action_modify_check_is_ttl_modify(sw_action);
+ if (!(*modify_ttl) &&
+ dr_action_modify_check_is_ttl_modify(sw_action)) {
+ if (dr_action_modify_ttl_ignore(dmn))
+ continue;
+
+ *modify_ttl = true;
+ }
/* Convert SW action to HW action */
ret = dr_action_modify_sw_to_hw(dmn,
@@ -1631,7 +1642,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
* modify actions doesn't exceeds the limit
*/
hw_idx++;
- if ((num_sw_actions + hw_idx - i) >= max_hw_actions) {
+ if (hw_idx >= max_hw_actions) {
mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
return -EINVAL;
}
@@ -1642,6 +1653,10 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
hw_idx++;
}
+ /* if the resulting HW actions list is empty, add NOP action */
+ if (!hw_idx)
+ hw_idx++;
+
*num_hw_actions = hw_idx;
return 0;
@@ -1792,7 +1807,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
- if (refcount_read(&action->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
return -EBUSY;
switch (action->action_type) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 1d8febed0d76..4dd619d238cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -132,6 +132,13 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
+ /* geneve_tlv_option_0_exist is the indication of
+ * STE support for lookup type flex_parser_ok
+ */
+ caps->flex_parser_ok_bits_supp =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
@@ -152,7 +159,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->flex_parser_id_mpls_over_gre =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
- if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
caps->flex_parser_id_mpls_over_udp =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
@@ -599,7 +606,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
for (i = 0; i < fte->dests_size; i++) {
if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
@@ -724,12 +732,19 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = fte->dest_arr[i].ft_id;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- id = fte->dest_arr[i].vport.num;
- MLX5_SET(dest_format_struct, in_dests,
- destination_eswitch_owner_vhca_id_valid,
- !!(fte->dest_arr[i].vport.flags &
- MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
+ id = fte->dest_arr[i].vport.num;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ !!(fte->dest_arr[i].vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ } else {
+ id = 0;
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ }
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
fte->dest_arr[i].vport.vhca_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
new file mode 100644
index 000000000000..2784cd59fefe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "dr_types.h"
+
+#define DR_DBG_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum dr_dump_rec_type {
+ DR_DUMP_REC_TYPE_DOMAIN = 3000,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER = 3001,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_DEV_ATTR = 3002,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT = 3003,
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS = 3004,
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING = 3005,
+
+ DR_DUMP_REC_TYPE_TABLE = 3100,
+ DR_DUMP_REC_TYPE_TABLE_RX = 3101,
+ DR_DUMP_REC_TYPE_TABLE_TX = 3102,
+
+ DR_DUMP_REC_TYPE_MATCHER = 3200,
+ DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
+ DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
+ DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+
+ DR_DUMP_REC_TYPE_RULE = 3300,
+ DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0 = 3302,
+ DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 = 3303,
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1 = 3304,
+
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2 = 3400,
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3 = 3401,
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR = 3402,
+ DR_DUMP_REC_TYPE_ACTION_DROP = 3403,
+ DR_DUMP_REC_TYPE_ACTION_QP = 3404,
+ DR_DUMP_REC_TYPE_ACTION_FT = 3405,
+ DR_DUMP_REC_TYPE_ACTION_CTR = 3406,
+ DR_DUMP_REC_TYPE_ACTION_TAG = 3407,
+ DR_DUMP_REC_TYPE_ACTION_VPORT = 3408,
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2 = 3409,
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3 = 3410,
+ DR_DUMP_REC_TYPE_ACTION_DEVX_TIR = 3411,
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN = 3412,
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421
+};
+
+void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
+{
+ mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
+ list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list);
+ mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl)
+{
+ mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
+ list_del(&tbl->dbg_node);
+ mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+ mutex_lock(&dmn->dump_info.dbg_mutex);
+ list_add_tail(&rule->dbg_node, &rule->matcher->dbg_rule_list);
+ mutex_unlock(&dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+ mutex_lock(&dmn->dump_info.dbg_mutex);
+ list_del(&rule->dbg_node);
+ mutex_unlock(&dmn->dump_info.dbg_mutex);
+}
+
+static u64 dr_dump_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+#define DR_HEX_SIZE 256
+
+static void
+dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
+{
+ if (WARN_ON_ONCE(DR_HEX_SIZE < 2 * size + 1))
+ size = DR_HEX_SIZE / 2 - 1; /* truncate */
+
+ bin2hex(hex, src, size);
+ hex[2 * size] = 0; /* NULL-terminate */
+}
+
+static int
+dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
+ struct mlx5dr_rule_action_member *action_mem)
+{
+ struct mlx5dr_action *action = action_mem->action;
+ const u64 action_id = DR_DBG_PTR_TO_ID(action);
+
+ switch (action->action_type) {
+ case DR_ACTION_TYP_DROP:
+ seq_printf(file, "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ break;
+ case DR_ACTION_TYP_FT:
+ if (action->dest_tbl->is_fw_tbl)
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id);
+ else
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id);
+
+ break;
+ case DR_ACTION_TYP_CTR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ break;
+ case DR_ACTION_TYP_TAG:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ break;
+ case DR_ACTION_TYP_MODIFY_HDR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index);
+ break;
+ case DR_ACTION_TYP_VPORT:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ break;
+ case DR_ACTION_TYP_TNL_L2_TO_L2:
+ seq_printf(file, "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ break;
+ case DR_ACTION_TYP_TNL_L3_TO_L2:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id, action->rewrite->index);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L2:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ break;
+ case DR_ACTION_TYP_L2_TO_TNL_L3:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ break;
+ case DR_ACTION_TYP_POP_VLAN:
+ seq_printf(file, "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ break;
+ case DR_ACTION_TYP_PUSH_VLAN:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ break;
+ case DR_ACTION_TYP_INSERT_HDR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ break;
+ case DR_ACTION_TYP_REMOVE_HDR:
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ break;
+ case DR_ACTION_TYP_SAMPLER:
+ seq_printf(file,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
+ 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
+ bool is_rx, const u64 rule_id, u8 format_ver)
+{
+ char hw_ste_dump[DR_HEX_SIZE];
+ u32 mem_rec_type;
+
+ if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
+ mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0;
+ } else {
+ mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 :
+ DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
+ }
+
+ dr_dump_hex_print(hw_ste_dump, (char *)ste->hw_ste, DR_STE_SIZE_REDUCED);
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
+ hw_ste_dump);
+
+ return 0;
+}
+
+static int
+dr_dump_rule_rx_tx(struct seq_file *file, struct mlx5dr_rule_rx_tx *rule_rx_tx,
+ bool is_rx, const u64 rule_id, u8 format_ver)
+{
+ struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
+ struct mlx5dr_ste *curr_ste = rule_rx_tx->last_rule_ste;
+ int ret, i;
+
+ if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
+ return 0;
+
+ while (i--) {
+ ret = dr_dump_rule_mem(file, ste_arr[i], is_rx, rule_id,
+ format_ver);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_rule_action_member *action_mem;
+ const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ struct mlx5dr_rule_rx_tx *rx = &rule->rx;
+ struct mlx5dr_rule_rx_tx *tx = &rule->tx;
+ u8 format_ver;
+ int ret;
+
+ format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
+
+ seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
+ DR_DBG_PTR_TO_ID(rule->matcher));
+
+ if (rx->nic_matcher) {
+ ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (tx->nic_matcher) {
+ ret = dr_dump_rule_rx_tx(file, tx, false, rule_id, format_ver);
+ if (ret < 0)
+ return ret;
+ }
+
+ list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
+ ret = dr_dump_rule_action_mem(file, rule_id, action_mem);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
+ u8 criteria, const u64 matcher_id)
+{
+ char dump[DR_HEX_SIZE];
+
+ seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
+ matcher_id);
+
+ if (criteria & DR_MATCHER_CRITERIA_OUTER) {
+ dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_INNER) {
+ dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_MISC) {
+ dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_MISC2) {
+ dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
+ seq_printf(file, "%s,", dump);
+ } else {
+ seq_puts(file, ",");
+ }
+
+ if (criteria & DR_MATCHER_CRITERIA_MISC3) {
+ dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
+ seq_printf(file, "%s\n", dump);
+ } else {
+ seq_puts(file, ",\n");
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
+ u32 index, bool is_rx, const u64 matcher_id)
+{
+ seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
+ builder->lu_type);
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
+ struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
+ const u64 matcher_id)
+{
+ enum dr_dump_rec_type rec_type;
+ int i, ret;
+
+ rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
+ DR_DUMP_REC_TYPE_MATCHER_TX;
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(matcher_rx_tx->s_htbl->chunk->icm_addr),
+ dr_dump_icm_to_idx(matcher_rx_tx->e_anchor->chunk->icm_addr));
+
+ for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
+ ret = dr_dump_matcher_builder(file,
+ &matcher_rx_tx->ste_builder[i],
+ i, is_rx, matcher_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
+ struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ u64 matcher_id;
+ int ret;
+
+ matcher_id = DR_DBG_PTR_TO_ID(matcher);
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+
+ ret = dr_dump_matcher_mask(file, &matcher->mask,
+ matcher->match_criteria, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ if (rx->nic_tbl) {
+ ret = dr_dump_matcher_rx_tx(file, true, rx, matcher_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (tx->nic_tbl) {
+ ret = dr_dump_matcher_rx_tx(file, false, tx, matcher_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
+{
+ struct mlx5dr_rule *rule;
+ int ret;
+
+ ret = dr_dump_matcher(file, matcher);
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(rule, &matcher->dbg_rule_list, dbg_node) {
+ ret = dr_dump_rule(file, rule);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
+ struct mlx5dr_table_rx_tx *table_rx_tx,
+ const u64 table_id)
+{
+ enum dr_dump_rec_type rec_type;
+
+ rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
+ DR_DUMP_REC_TYPE_TABLE_TX;
+
+ seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(table_rx_tx->s_anchor->chunk->icm_addr));
+
+ return 0;
+}
+
+static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+{
+ struct mlx5dr_table_rx_tx *rx = &table->rx;
+ struct mlx5dr_table_rx_tx *tx = &table->tx;
+ int ret;
+
+ seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+
+ if (rx->nic_dmn) {
+ ret = dr_dump_table_rx_tx(file, true, rx,
+ DR_DBG_PTR_TO_ID(table));
+ if (ret < 0)
+ return ret;
+ }
+
+ if (tx->nic_dmn) {
+ ret = dr_dump_table_rx_tx(file, false, tx,
+ DR_DBG_PTR_TO_ID(table));
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
+{
+ struct mlx5dr_matcher *matcher;
+ int ret;
+
+ ret = dr_dump_table(file, tbl);
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(matcher, &tbl->matcher_list, list_node) {
+ ret = dr_dump_matcher_all(file, matcher);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
+ const u64 domain_id)
+{
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
+ domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ return 0;
+}
+
+static int
+dr_dump_domain_info_flex_parser(struct seq_file *file,
+ const char *flex_parser_name,
+ const u8 flex_parser_value,
+ const u64 domain_id)
+{
+ seq_printf(file, "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ return 0;
+}
+
+static int
+dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
+ const u64 domain_id)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ unsigned long i, vports_num;
+
+ xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
+ ; /* count the number of vports in xarray */
+
+ seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+
+ xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
+ vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
+
+ seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
+ vport_caps->vport_gvmi, vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ }
+ return 0;
+}
+
+static int
+dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
+ const u64 domain_id)
+{
+ int ret;
+
+ ret = dr_dump_domain_info_caps(file, &info->caps, domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmp_dw0",
+ info->caps.flex_parser_id_icmp_dw0,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmp_dw1",
+ info->caps.flex_parser_id_icmp_dw1,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw0",
+ info->caps.flex_parser_id_icmpv6_dw0,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ ret = dr_dump_domain_info_flex_parser(file, "icmpv6_dw1",
+ info->caps.flex_parser_id_icmpv6_dw1,
+ domain_id);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
+{
+ u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
+ int ret;
+
+ seq_printf(file, "%d,0x%llx,%d,0%x,%d,%s\n", DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering, pci_name(dmn->mdev->pdev));
+
+ ret = dr_dump_domain_info(file, &dmn->info, domain_id);
+ if (ret < 0)
+ return ret;
+
+ if (dmn->info.supp_sw_steering) {
+ ret = dr_dump_send_ring(file, dmn->send_ring, domain_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_table *tbl;
+ int ret;
+
+ mutex_lock(&dmn->dump_info.dbg_mutex);
+ mlx5dr_domain_lock(dmn);
+
+ ret = dr_dump_domain(file, dmn);
+ if (ret < 0)
+ goto unlock_mutex;
+
+ list_for_each_entry(tbl, &dmn->dbg_tbl_list, dbg_node) {
+ ret = dr_dump_table_all(file, tbl);
+ if (ret < 0)
+ break;
+ }
+
+unlock_mutex:
+ mlx5dr_domain_unlock(dmn);
+ mutex_unlock(&dmn->dump_info.dbg_mutex);
+ return ret;
+}
+
+static int dr_dump_show(struct seq_file *file, void *priv)
+{
+ return dr_dump_domain_all(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
+{
+ struct mlx5_core_dev *dev = dmn->mdev;
+ char file_name[128];
+
+ if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+ mlx5_core_warn(dev,
+ "Steering dump is not supported for NIC RX/TX domains\n");
+ return;
+ }
+
+ dmn->dump_info.steering_debugfs =
+ debugfs_create_dir("steering", dev->priv.dbg_root);
+ dmn->dump_info.fdb_debugfs =
+ debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
+
+ sprintf(file_name, "dmn_%p", dmn);
+ debugfs_create_file(file_name, 0444, dmn->dump_info.fdb_debugfs,
+ dmn, &dr_dump_fops);
+
+ INIT_LIST_HEAD(&dmn->dbg_tbl_list);
+ mutex_init(&dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn)
+{
+ debugfs_remove_recursive(dmn->dump_info.steering_debugfs);
+ mutex_destroy(&dmn->dump_info.dbg_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
new file mode 100644
index 000000000000..def6cf853eea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+struct mlx5dr_dbg_dump_info {
+ struct mutex dbg_mutex; /* protect dbg lists */
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
+void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
+void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl);
+void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl);
+void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule);
+void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 8cbd36c82b3b..5fa7f9d6d8b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/mlx5/eswitch.h>
+#include <linux/err.h>
#include "dr_types.h"
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
@@ -72,9 +73,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
}
dmn->uar = mlx5_get_uars_page(dmn->mdev);
- if (!dmn->uar) {
+ if (IS_ERR(dmn->uar)) {
mlx5dr_err(dmn, "Couldn't allocate UAR\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(dmn->uar);
goto clean_pd;
}
@@ -163,9 +164,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- return dr_domain_query_vport(dmn,
- dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
- false,
+ return dr_domain_query_vport(dmn, 0, false,
&dmn->info.caps.vports.esw_manager_caps);
}
@@ -396,7 +395,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
}
dr_domain_init_csum_recalc_fts(dmn);
-
+ mlx5dr_dbg_init_dump(dmn);
return dmn;
uninit_caps:
@@ -432,11 +431,12 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
{
- if (refcount_read(&dmn->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
return -EBUSY;
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
+ mlx5dr_dbg_uninit_dump(dmn);
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 793365242e85..e87cf498c77b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -141,6 +141,19 @@ static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
}
static bool
+dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_parser_ok_bits_supp;
+}
+
+static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
+ struct mlx5dr_domain *dmn)
+{
+ return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
+ misc->geneve_tlv_option_0_exist;
+}
+
+static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
@@ -359,7 +372,7 @@ static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
+ return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
}
static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
@@ -368,6 +381,12 @@ static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
}
+
+static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
+{
+ return misc5->tunnel_header_0 || misc5->tunnel_header_1;
+}
+
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
@@ -424,6 +443,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
mask.misc4 = matcher->mask.misc4;
+ if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
+ mask.misc5 = matcher->mask.misc5;
+
ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
&matcher->mask, NULL);
if (ret)
@@ -443,7 +465,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
DR_MATCHER_CRITERIA_MISC |
DR_MATCHER_CRITERIA_MISC2 |
- DR_MATCHER_CRITERIA_MISC3)) {
+ DR_MATCHER_CRITERIA_MISC3 |
+ DR_MATCHER_CRITERIA_MISC5)) {
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
@@ -511,6 +534,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
+ if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
+ mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
} else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
@@ -525,6 +552,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_tnl_gtpu(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
&mask, inner, rx);
+ } else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
+ mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
@@ -653,10 +683,10 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
return 0;
}
-static int dr_matcher_connect(struct mlx5dr_domain *dmn,
- struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
- struct mlx5dr_matcher_rx_tx *next_nic_matcher,
- struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
{
struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
@@ -712,58 +742,50 @@ static int dr_matcher_connect(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
+int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher;
- struct mlx5dr_table *tbl = matcher->tbl;
- struct mlx5dr_domain *dmn = tbl->dmn;
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
+ struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
bool first = true;
int ret;
- next_matcher = NULL;
- list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) {
- if (tmp_matcher->prio >= matcher->prio) {
- next_matcher = tmp_matcher;
+ /* If the nic matcher is already on its parent nic table list,
+ * then it is already connected to the chain of nic matchers.
+ */
+ if (!list_empty(&nic_matcher->list_node))
+ return 0;
+
+ next_nic_matcher = NULL;
+ list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
+ if (tmp_nic_matcher->prio >= nic_matcher->prio) {
+ next_nic_matcher = tmp_nic_matcher;
break;
}
first = false;
}
- prev_matcher = NULL;
- if (next_matcher && !first)
- prev_matcher = list_prev_entry(next_matcher, matcher_list);
+ prev_nic_matcher = NULL;
+ if (next_nic_matcher && !first)
+ prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
else if (!first)
- prev_matcher = list_last_entry(&tbl->matcher_list,
- struct mlx5dr_matcher,
- matcher_list);
-
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
- ret = dr_matcher_connect(dmn, &matcher->rx,
- next_matcher ? &next_matcher->rx : NULL,
- prev_matcher ? &prev_matcher->rx : NULL);
- if (ret)
- return ret;
- }
+ prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
+ struct mlx5dr_matcher_rx_tx,
+ list_node);
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
- ret = dr_matcher_connect(dmn, &matcher->tx,
- next_matcher ? &next_matcher->tx : NULL,
- prev_matcher ? &prev_matcher->tx : NULL);
- if (ret)
- return ret;
- }
+ ret = dr_nic_matcher_connect(dmn, nic_matcher,
+ next_nic_matcher, prev_nic_matcher);
+ if (ret)
+ return ret;
- if (prev_matcher)
- list_add(&matcher->matcher_list, &prev_matcher->matcher_list);
- else if (next_matcher)
- list_add_tail(&matcher->matcher_list,
- &next_matcher->matcher_list);
+ if (prev_nic_matcher)
+ list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
+ else if (next_nic_matcher)
+ list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
else
- list_add(&matcher->matcher_list, &tbl->matcher_list);
+ list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
- return 0;
+ return ret;
}
static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
@@ -822,6 +844,9 @@ static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
int ret;
+ nic_matcher->prio = matcher->prio;
+ INIT_LIST_HEAD(&nic_matcher->list_node);
+
ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
if (ret)
return ret;
@@ -872,13 +897,12 @@ uninit_nic_rx:
return ret;
}
-static int dr_matcher_init(struct mlx5dr_matcher *matcher,
- struct mlx5dr_match_parameters *mask)
+static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *mask)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_parameters consumed_mask;
- struct mlx5dr_table *tbl = matcher->tbl;
- struct mlx5dr_domain *dmn = tbl->dmn;
- int i, ret;
+ int i, ret = 0;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_err(dmn, "Invalid match criteria attribute\n");
@@ -898,10 +922,36 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
consumed_mask.match_sz = mask->match_sz;
memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
mlx5dr_ste_copy_param(matcher->match_criteria,
- &matcher->mask, &consumed_mask,
- true);
+ &matcher->mask, &consumed_mask, true);
+
+ /* Check that all mask data was consumed */
+ for (i = 0; i < consumed_mask.match_sz; i++) {
+ if (!((u8 *)consumed_mask.match_buf)[i])
+ continue;
+
+ mlx5dr_dbg(dmn,
+ "Match param mask contains unsupported parameters\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ kfree(consumed_mask.match_buf);
}
+ return ret;
+}
+
+static int dr_matcher_init(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_match_parameters *mask)
+{
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_domain *dmn = tbl->dmn;
+ int ret;
+
+ ret = dr_matcher_copy_param(matcher, mask);
+ if (ret)
+ return ret;
+
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
matcher->rx.nic_tbl = &tbl->rx;
@@ -919,23 +969,23 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
default:
WARN_ON(true);
ret = -EINVAL;
- goto free_consumed_mask;
}
- /* Check that all mask data was consumed */
- for (i = 0; i < consumed_mask.match_sz; i++) {
- if (!((u8 *)consumed_mask.match_buf)[i])
- continue;
+ return ret;
+}
- mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
- ret = -EOPNOTSUPP;
- goto free_consumed_mask;
- }
+static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
+{
+ mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+ list_add(&matcher->list_node, &matcher->tbl->matcher_list);
+ mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+}
- ret = 0;
-free_consumed_mask:
- kfree(consumed_mask.match_buf);
- return ret;
+static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
+{
+ mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+ list_del(&matcher->list_node);
+ mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
}
struct mlx5dr_matcher *
@@ -957,7 +1007,8 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
matcher->prio = priority;
matcher->match_criteria = match_criteria_enable;
refcount_set(&matcher->refcount, 1);
- INIT_LIST_HEAD(&matcher->matcher_list);
+ INIT_LIST_HEAD(&matcher->list_node);
+ INIT_LIST_HEAD(&matcher->dbg_rule_list);
mlx5dr_domain_lock(tbl->dmn);
@@ -965,16 +1016,12 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
if (ret)
goto free_matcher;
- ret = dr_matcher_add_to_tbl(matcher);
- if (ret)
- goto matcher_uninit;
+ dr_matcher_add_to_dbg_list(matcher);
mlx5dr_domain_unlock(tbl->dmn);
return matcher;
-matcher_uninit:
- dr_matcher_uninit(matcher);
free_matcher:
mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
@@ -983,10 +1030,10 @@ dec_ref:
return NULL;
}
-static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
- struct mlx5dr_table_rx_tx *nic_tbl,
- struct mlx5dr_matcher_rx_tx *next_nic_matcher,
- struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl,
+ struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
struct mlx5dr_htbl_connect_info info;
@@ -1013,43 +1060,34 @@ static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
&info, true);
}
-static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher)
+int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher)
{
- struct mlx5dr_matcher *prev_matcher, *next_matcher;
- struct mlx5dr_table *tbl = matcher->tbl;
- struct mlx5dr_domain *dmn = tbl->dmn;
- int ret = 0;
+ struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
+ struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
+ int ret;
- if (list_is_last(&matcher->matcher_list, &tbl->matcher_list))
- next_matcher = NULL;
- else
- next_matcher = list_next_entry(matcher, matcher_list);
+ /* If the nic matcher is not on its parent nic table list,
+ * then it is detached - no need to disconnect it.
+ */
+ if (list_empty(&nic_matcher->list_node))
+ return 0;
- if (matcher->matcher_list.prev == &tbl->matcher_list)
- prev_matcher = NULL;
+ if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
+ next_nic_matcher = NULL;
else
- prev_matcher = list_prev_entry(matcher, matcher_list);
-
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
- ret = dr_matcher_disconnect(dmn, &tbl->rx,
- next_matcher ? &next_matcher->rx : NULL,
- prev_matcher ? &prev_matcher->rx : NULL);
- if (ret)
- return ret;
- }
+ next_nic_matcher = list_next_entry(nic_matcher, list_node);
- if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
- dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
- ret = dr_matcher_disconnect(dmn, &tbl->tx,
- next_matcher ? &next_matcher->tx : NULL,
- prev_matcher ? &prev_matcher->tx : NULL);
- if (ret)
- return ret;
- }
+ if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
+ prev_nic_matcher = NULL;
+ else
+ prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
- list_del(&matcher->matcher_list);
+ ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
+ if (ret)
+ return ret;
+ list_del_init(&nic_matcher->list_node);
return 0;
}
@@ -1057,12 +1095,12 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
{
struct mlx5dr_table *tbl = matcher->tbl;
- if (refcount_read(&matcher->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
return -EBUSY;
mlx5dr_domain_lock(tbl->dmn);
- dr_matcher_remove_from_tbl(matcher);
+ dr_matcher_remove_from_dbg_list(matcher);
dr_matcher_uninit(matcher);
refcount_dec(&matcher->tbl->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6a390e981b09..b4374578425b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -5,11 +5,6 @@
#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
-struct mlx5dr_rule_action_member {
- struct mlx5dr_action *action;
- struct list_head list;
-};
-
static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
@@ -979,14 +974,36 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
}
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
+ s_idx = offsetof(struct mlx5dr_match_param, misc5);
+ e_idx = min(s_idx + sizeof(param->misc5), value_size);
+
+ if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+ mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
+ return false;
+ }
+ }
return true;
}
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
+ /* Check if this nic rule was actually created, or was it skipped
+ * and only the other type of the RX/TX nic rule was created.
+ */
+ if (!nic_rule->last_rule_ste)
+ return 0;
+
mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
+
+ nic_rule->nic_matcher->rules--;
+ if (!nic_rule->nic_matcher->rules)
+ mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
+ nic_rule->nic_matcher);
+
mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
return 0;
@@ -1003,6 +1020,8 @@ static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
{
struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+ mlx5dr_dbg_rule_del(rule);
+
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
dr_rule_destroy_rule_nic(rule, &rule->rx);
@@ -1091,24 +1110,28 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_lock(nic_dmn);
+ ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
+ if (ret)
+ goto free_hw_ste;
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- goto free_hw_ste;
+ goto remove_from_nic_tbl;
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret)
- goto free_hw_ste;
+ goto remove_from_nic_tbl;
/* Set the actions values/addresses inside the ste array */
ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
num_actions, hw_ste_arr,
&new_hw_ste_arr_sz);
if (ret)
- goto free_hw_ste;
+ goto remove_from_nic_tbl;
cur_htbl = nic_matcher->s_htbl;
@@ -1155,6 +1178,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
+ nic_matcher->rules++;
+
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
@@ -1168,6 +1193,10 @@ free_rule:
list_del(&ste_info->send_list);
kfree(ste_info);
}
+
+remove_from_nic_tbl:
+ mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
+
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
@@ -1257,6 +1286,8 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
if (ret)
goto remove_action_members;
+ INIT_LIST_HEAD(&rule->dbg_node);
+ mlx5dr_dbg_rule_add(rule);
return rule;
remove_action_members:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 219a5474a8a4..7e61742e58a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -719,6 +719,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bo
spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
+ spec->geneve_tlv_option_0_exist =
+ IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
spec->outer_ipv6_flow_label =
@@ -880,6 +882,26 @@ static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec,
IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
}
+static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
+{
+ spec->macsec_tag_0 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
+ spec->macsec_tag_1 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
+ spec->macsec_tag_2 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
+ spec->macsec_tag_3 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
+ spec->tunnel_header_0 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
+ spec->tunnel_header_1 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
+ spec->tunnel_header_2 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
+ spec->tunnel_header_3 =
+ IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
+}
+
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
struct mlx5dr_match_parameters *mask,
@@ -966,6 +988,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
}
+
+ param_location += sizeof(struct mlx5dr_match_misc4);
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
+ if (mask->match_sz < param_location +
+ sizeof(struct mlx5dr_match_misc5)) {
+ memcpy(tail_param, data + param_location,
+ mask->match_sz - param_location);
+ buff = tail_param;
+ } else {
+ buff = data + param_location;
+ }
+ dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
+ }
}
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1180,6 +1216,21 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
}
+void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
+{
+ if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
+ return;
+
+ sb->rx = rx;
+ sb->caps = caps;
+ sb->inner = inner;
+ ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
+}
+
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -1269,6 +1320,16 @@ void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_flex_parser_1_init(sb, mask);
}
+void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
+{
+ sb->rx = rx;
+ sb->inner = inner;
+ ste_ctx->build_tnl_header_0_1_init(sb, mask);
+}
+
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 2d52d065dc8b..ca8fa32b8680 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -135,12 +135,14 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
+ void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist);
void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn);
void DR_STE_CTX_BUILDER(flex_parser_0);
void DR_STE_CTX_BUILDER(flex_parser_1);
void DR_STE_CTX_BUILDER(tnl_gtpu);
+ void DR_STE_CTX_BUILDER(tnl_header_0_1);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index b0649c2877dd..2d62950f7a29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -80,6 +80,7 @@ enum {
DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ DR_STE_V0_LU_TYPE_TUNNEL_HEADER = 0x34,
DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
};
@@ -1704,7 +1705,7 @@ static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id;
u8 *parser_ptr;
- if (parser_is_used[id])
+ if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return;
parser_is_used[id] = true;
@@ -1875,6 +1876,27 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
}
+static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc5 *misc5 = &value->misc5;
+
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
+
+ return 0;
+}
+
+static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
+ dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
+}
+
struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
@@ -1903,6 +1925,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v0_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index cb9cf67b0a02..6ca06800f1d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -47,6 +47,7 @@ enum {
DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_OK = 0x0011,
DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
@@ -1713,6 +1714,27 @@ dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
}
+static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ struct mlx5dr_match_misc5 *misc5 = &value->misc5;
+
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
+ DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
+}
+
static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@@ -1833,7 +1855,7 @@ static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
u32 id = *misc4_field_id;
u8 *parser_ptr;
- if (parser_is_used[id])
+ if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
return;
parser_is_used[id] = true;
@@ -1921,6 +1943,32 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
}
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ uint8_t *tag)
+{
+ u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ if (misc->geneve_tlv_option_0_exist) {
+ MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
+ misc->geneve_tlv_option_0_exist = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
+ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
+}
+
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
@@ -2020,12 +2068,14 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+ .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+ .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 30ae3cda6d2e..8ca110643cc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -3,69 +3,66 @@
#include "dr_types.h"
-int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
- struct mlx5dr_action *action)
+static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_table_rx_tx *nic_tbl,
+ struct mlx5dr_action *action)
{
- struct mlx5dr_matcher *last_matcher = NULL;
+ struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *last_htbl;
int ret;
+ if (!list_empty(&nic_tbl->nic_matcher_list))
+ last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
+ struct mlx5dr_matcher_rx_tx,
+ list_node);
+
+ if (last_nic_matcher)
+ last_htbl = last_nic_matcher->e_anchor;
+ else
+ last_htbl = nic_tbl->s_anchor;
+
+ if (action)
+ nic_tbl->default_icm_addr =
+ nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+ action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
+ action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr;
+ else
+ nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+
+ info.type = CONNECT_MISS;
+ info.miss_icm_addr = nic_tbl->default_icm_addr;
+
+ ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn,
+ last_htbl, &info, true);
+ if (ret)
+ mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret);
+
+ return ret;
+}
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+ struct mlx5dr_action *action)
+{
+ int ret;
+
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
mlx5dr_domain_lock(tbl->dmn);
- if (!list_empty(&tbl->matcher_list))
- last_matcher = list_last_entry(&tbl->matcher_list,
- struct mlx5dr_matcher,
- matcher_list);
-
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
- if (last_matcher)
- last_htbl = last_matcher->rx.e_anchor;
- else
- last_htbl = tbl->rx.s_anchor;
-
- tbl->rx.default_icm_addr = action ?
- action->dest_tbl->tbl->rx.s_anchor->chunk->icm_addr :
- tbl->rx.nic_dmn->default_icm_addr;
-
- info.type = CONNECT_MISS;
- info.miss_icm_addr = tbl->rx.default_icm_addr;
-
- ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
- tbl->rx.nic_dmn,
- last_htbl,
- &info, true);
- if (ret) {
- mlx5dr_dbg(tbl->dmn, "Failed to set RX miss action, ret %d\n", ret);
+ ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
+ if (ret)
goto out;
- }
}
if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
- if (last_matcher)
- last_htbl = last_matcher->tx.e_anchor;
- else
- last_htbl = tbl->tx.s_anchor;
-
- tbl->tx.default_icm_addr = action ?
- action->dest_tbl->tbl->tx.s_anchor->chunk->icm_addr :
- tbl->tx.nic_dmn->default_icm_addr;
-
- info.type = CONNECT_MISS;
- info.miss_icm_addr = tbl->tx.default_icm_addr;
-
- ret = mlx5dr_ste_htbl_init_and_postsend(tbl->dmn,
- tbl->tx.nic_dmn,
- last_htbl, &info, true);
- if (ret) {
- mlx5dr_dbg(tbl->dmn, "Failed to set TX miss action, ret %d\n", ret);
+ ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action);
+ if (ret)
goto out;
- }
}
/* Release old action */
@@ -122,6 +119,8 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
struct mlx5dr_htbl_connect_info info;
int ret;
+ INIT_LIST_HEAD(&nic_tbl->nic_matcher_list);
+
nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -266,6 +265,8 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u
if (ret)
goto uninit_tbl;
+ INIT_LIST_HEAD(&tbl->dbg_node);
+ mlx5dr_dbg_tbl_add(tbl);
return tbl;
uninit_tbl:
@@ -281,9 +282,10 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
{
int ret;
- if (refcount_read(&tbl->refcount) > 1)
+ if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1))
return -EBUSY;
+ mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 2333c2439c28..1b3d484b99be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -11,6 +11,7 @@
#include "lib/mlx5.h"
#include "mlx5_ifc_dr.h"
#include "mlx5dr.h"
+#include "dr_dbg.h"
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
@@ -104,7 +105,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
- DR_MATCHER_CRITERIA_MAX = 1 << 6,
+ DR_MATCHER_CRITERIA_MISC5 = 1 << 6,
+ DR_MATCHER_CRITERIA_MAX = 1 << 7,
};
enum mlx5dr_action_type {
@@ -440,6 +442,11 @@ void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -454,6 +461,10 @@ void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
+void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -494,57 +505,64 @@ struct mlx5dr_match_spec {
/* Incoming packet Ethertype - this is the Ethertype
* following the last VLAN tag of the packet
*/
- u32 ethertype:16;
u32 smac_15_0:16; /* Source MAC address of incoming packet */
+ u32 ethertype:16;
+
u32 dmac_47_16; /* Destination MAC address of incoming packet */
- /* VLAN ID of first VLAN tag in the incoming packet.
+
+ u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
+ /* Priority of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
- u32 first_vid:12;
+ u32 first_prio:3;
/* CFI bit of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
u32 first_cfi:1;
- /* Priority of first VLAN tag in the incoming packet.
+ /* VLAN ID of first VLAN tag in the incoming packet.
* Valid only when cvlan_tag==1 or svlan_tag==1
*/
- u32 first_prio:3;
- u32 dmac_15_0:16; /* Destination MAC address of incoming packet */
- /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
- * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
+ u32 first_vid:12;
+
+ u32 ip_protocol:8; /* IP protocol */
+ /* Differentiated Services Code Point derived from
+ * Traffic Class/TOS field of IPv6/v4
*/
- u32 tcp_flags:9;
- u32 ip_version:4; /* IP version */
- u32 frag:1; /* Packet is an IP fragment */
- /* The first vlan in the packet is s-vlan (0x8a88).
- * cvlan_tag and svlan_tag cannot be set together
+ u32 ip_dscp:6;
+ /* Explicit Congestion Notification derived from
+ * Traffic Class/TOS field of IPv6/v4
*/
- u32 svlan_tag:1;
+ u32 ip_ecn:2;
/* The first vlan in the packet is c-vlan (0x8100).
* cvlan_tag and svlan_tag cannot be set together
*/
u32 cvlan_tag:1;
- /* Explicit Congestion Notification derived from
- * Traffic Class/TOS field of IPv6/v4
+ /* The first vlan in the packet is s-vlan (0x8a88).
+ * cvlan_tag and svlan_tag cannot be set together
*/
- u32 ip_ecn:2;
- /* Differentiated Services Code Point derived from
- * Traffic Class/TOS field of IPv6/v4
+ u32 svlan_tag:1;
+ u32 frag:1; /* Packet is an IP fragment */
+ u32 ip_version:4; /* IP version */
+ /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
+ * Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
*/
- u32 ip_dscp:6;
- u32 ip_protocol:8; /* IP protocol */
+ u32 tcp_flags:9;
+
+ /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
+ u32 tcp_sport:16;
/* TCP destination port.
* tcp and udp sport/dport are mutually exclusive
*/
u32 tcp_dport:16;
- /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
- u32 tcp_sport:16;
+
+ u32 reserved_auto1:24;
u32 ttl_hoplimit:8;
- u32 reserved:24;
- /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
- u32 udp_dport:16;
+
/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
u32 udp_sport:16;
+ /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
+ u32 udp_dport:16;
+
/* IPv6 source address of incoming packets
* For IPv4 address use bits 31:0 (rest of the bits are reserved)
* This field should be qualified by an appropriate ethertype
@@ -588,96 +606,114 @@ struct mlx5dr_match_spec {
};
struct mlx5dr_match_misc {
- u32 source_sqn:24; /* Source SQN */
- u32 source_vhca_port:4;
- /* used with GRE, sequence number exist when gre_s_present == 1 */
- u32 gre_s_present:1;
- /* used with GRE, key exist when gre_k_present == 1 */
- u32 gre_k_present:1;
- u32 reserved_auto1:1;
/* used with GRE, checksum exist when gre_c_present == 1 */
u32 gre_c_present:1;
+ u32 reserved_auto1:1;
+ /* used with GRE, key exist when gre_k_present == 1 */
+ u32 gre_k_present:1;
+ /* used with GRE, sequence number exist when gre_s_present == 1 */
+ u32 gre_s_present:1;
+ u32 source_vhca_port:4;
+ u32 source_sqn:24; /* Source SQN */
+
+ u32 source_eswitch_owner_vhca_id:16;
/* Source port.;0xffff determines wire port */
u32 source_port:16;
- u32 source_eswitch_owner_vhca_id:16;
- /* VLAN ID of first VLAN tag the inner header of the incoming packet.
- * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
- */
- u32 inner_second_vid:12;
- /* CFI bit of first VLAN tag in the inner header of the incoming packet.
- * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
- */
- u32 inner_second_cfi:1;
- /* Priority of second VLAN tag in the inner header of the incoming packet.
- * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
- */
- u32 inner_second_prio:3;
- /* VLAN ID of first VLAN tag the outer header of the incoming packet.
+
+ /* Priority of second VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
- u32 outer_second_vid:12;
+ u32 outer_second_prio:3;
/* CFI bit of first VLAN tag in the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
u32 outer_second_cfi:1;
- /* Priority of second VLAN tag in the outer header of the incoming packet.
+ /* VLAN ID of first VLAN tag the outer header of the incoming packet.
* Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
*/
- u32 outer_second_prio:3;
- u32 gre_protocol:16; /* GRE Protocol (outer) */
- u32 reserved_auto3:12;
- /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
- * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ u32 outer_second_vid:12;
+ /* Priority of second VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
*/
- u32 inner_second_svlan_tag:1;
- /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
+ u32 inner_second_prio:3;
+ /* CFI bit of first VLAN tag in the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_cfi:1;
+ /* VLAN ID of first VLAN tag the inner header of the incoming packet.
+ * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+ */
+ u32 inner_second_vid:12;
+
+ u32 outer_second_cvlan_tag:1;
+ u32 inner_second_cvlan_tag:1;
+ /* The second vlan in the outer header of the packet is c-vlan (0x8100).
* outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
*/
u32 outer_second_svlan_tag:1;
/* The second vlan in the inner header of the packet is c-vlan (0x8100).
* inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
*/
- u32 inner_second_cvlan_tag:1;
- /* The second vlan in the outer header of the packet is c-vlan (0x8100).
+ u32 inner_second_svlan_tag:1;
+ /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
* outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
*/
- u32 outer_second_cvlan_tag:1;
- u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
+ u32 reserved_auto2:12;
+ /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
+ * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+ */
+ u32 gre_protocol:16; /* GRE Protocol (outer) */
+
u32 gre_key_h:24; /* GRE Key[31:8] (outer) */
- u32 reserved_auto4:8;
+ u32 gre_key_l:8; /* GRE Key [7:0] (outer) */
+
u32 vxlan_vni:24; /* VXLAN VNI (outer) */
- u32 geneve_oam:1; /* GENEVE OAM field (outer) */
- u32 reserved_auto5:7;
+ u32 reserved_auto3:8;
+
u32 geneve_vni:24; /* GENEVE VNI field (outer) */
+ u32 reserved_auto4:6;
+ u32 geneve_tlv_option_0_exist:1;
+ u32 geneve_oam:1; /* GENEVE OAM field (outer) */
+
+ u32 reserved_auto5:12;
u32 outer_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (outer) */
+
u32 reserved_auto6:12;
u32 inner_ipv6_flow_label:20; /* Flow label of incoming IPv6 packet (inner) */
- u32 reserved_auto7:12;
- u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
+
+ u32 reserved_auto7:10;
u32 geneve_opt_len:6; /* GENEVE OptLen (outer) */
- u32 reserved_auto8:10;
+ u32 geneve_protocol_type:16; /* GENEVE protocol type (outer) */
+
+ u32 reserved_auto8:8;
u32 bth_dst_qp:24; /* Destination QP in BTH header */
- u32 reserved_auto9:8;
- u8 reserved_auto10[20];
+
+ u32 reserved_auto9;
+ u32 outer_esp_spi;
+ u32 reserved_auto10[3];
};
struct mlx5dr_match_misc2 {
- u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
- u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
- u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
u32 outer_first_mpls_label:20; /* First MPLS LABEL (outer) */
- u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
- u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
- u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
+ u32 outer_first_mpls_exp:3; /* First MPLS EXP (outer) */
+ u32 outer_first_mpls_s_bos:1; /* First MPLS S_BOS (outer) */
+ u32 outer_first_mpls_ttl:8; /* First MPLS TTL (outer) */
+
u32 inner_first_mpls_label:20; /* First MPLS LABEL (inner) */
- u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
- u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
- u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
+ u32 inner_first_mpls_exp:3; /* First MPLS EXP (inner) */
+ u32 inner_first_mpls_s_bos:1; /* First MPLS S_BOS (inner) */
+ u32 inner_first_mpls_ttl:8; /* First MPLS TTL (inner) */
+
u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
- u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
- u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
- u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_gre_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_gre_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_gre_ttl:8; /* last MPLS TTL (outer) */
+
u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
+ u32 outer_first_mpls_over_udp_exp:3; /* last MPLS EXP (outer) */
+ u32 outer_first_mpls_over_udp_s_bos:1; /* last MPLS S_BOS (outer) */
+ u32 outer_first_mpls_over_udp_ttl:8; /* last MPLS TTL (outer) */
+
u32 metadata_reg_c_7; /* metadata_reg_c_7 */
u32 metadata_reg_c_6; /* metadata_reg_c_6 */
u32 metadata_reg_c_5; /* metadata_reg_c_5 */
@@ -687,7 +723,7 @@ struct mlx5dr_match_misc2 {
u32 metadata_reg_c_1; /* metadata_reg_c_1 */
u32 metadata_reg_c_0; /* metadata_reg_c_0 */
u32 metadata_reg_a; /* metadata_reg_a */
- u8 reserved_auto2[12];
+ u32 reserved_auto1[3];
};
struct mlx5dr_match_misc3 {
@@ -695,24 +731,34 @@ struct mlx5dr_match_misc3 {
u32 outer_tcp_seq_num;
u32 inner_tcp_ack_num;
u32 outer_tcp_ack_num;
- u32 outer_vxlan_gpe_vni:24;
+
u32 reserved_auto1:8;
- u32 reserved_auto2:16;
- u32 outer_vxlan_gpe_flags:8;
+ u32 outer_vxlan_gpe_vni:24;
+
u32 outer_vxlan_gpe_next_protocol:8;
+ u32 outer_vxlan_gpe_flags:8;
+ u32 reserved_auto2:16;
+
u32 icmpv4_header_data;
u32 icmpv6_header_data;
- u8 icmpv6_code;
- u8 icmpv6_type;
- u8 icmpv4_code;
+
u8 icmpv4_type;
+ u8 icmpv4_code;
+ u8 icmpv6_type;
+ u8 icmpv6_code;
+
u32 geneve_tlv_option_0_data;
- u8 gtpu_msg_flags;
- u8 gtpu_msg_type;
+
u32 gtpu_teid;
+
+ u8 gtpu_msg_type;
+ u8 gtpu_msg_flags;
+ u32 reserved_auto3:16;
+
u32 gtpu_dw_2;
u32 gtpu_first_ext_dw_0;
u32 gtpu_dw_0;
+ u32 reserved_auto4;
};
struct mlx5dr_match_misc4 {
@@ -724,6 +770,18 @@ struct mlx5dr_match_misc4 {
u32 prog_sample_field_id_2;
u32 prog_sample_field_value_3;
u32 prog_sample_field_id_3;
+ u32 reserved_auto1[8];
+};
+
+struct mlx5dr_match_misc5 {
+ u32 macsec_tag_0;
+ u32 macsec_tag_1;
+ u32 macsec_tag_2;
+ u32 macsec_tag_3;
+ u32 tunnel_header_0;
+ u32 tunnel_header_1;
+ u32 tunnel_header_2;
+ u32 tunnel_header_3;
};
struct mlx5dr_match_param {
@@ -733,6 +791,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3;
struct mlx5dr_match_misc4 misc4;
+ struct mlx5dr_match_misc5 misc5;
};
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
@@ -789,6 +848,7 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_teid;
u8 flex_parser_id_gtpu_dw_2;
u8 flex_parser_id_gtpu_first_ext_dw_0;
+ u8 flex_parser_ok_bits_supp;
u8 max_ft_level;
u16 roce_min_src_udp;
u8 sw_format_ver;
@@ -843,12 +903,15 @@ struct mlx5dr_domain {
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
+ struct list_head dbg_tbl_list;
+ struct mlx5dr_dbg_dump_info dump_info;
};
struct mlx5dr_table_rx_tx {
struct mlx5dr_ste_htbl *s_anchor;
struct mlx5dr_domain_rx_tx *nic_dmn;
u64 default_icm_addr;
+ struct list_head nic_matcher_list;
};
struct mlx5dr_table {
@@ -862,6 +925,7 @@ struct mlx5dr_table {
struct list_head matcher_list;
struct mlx5dr_action *miss_action;
refcount_t refcount;
+ struct list_head dbg_node;
};
struct mlx5dr_matcher_rx_tx {
@@ -875,18 +939,21 @@ struct mlx5dr_matcher_rx_tx {
u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
u64 default_icm_addr;
struct mlx5dr_table_rx_tx *nic_tbl;
+ u32 prio;
+ struct list_head list_node;
+ u32 rules;
};
struct mlx5dr_matcher {
struct mlx5dr_table *tbl;
struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx;
- struct list_head matcher_list;
+ struct list_head list_node; /* Used for both matchers and dbg managing */
u32 prio;
struct mlx5dr_match_param mask;
u8 match_criteria;
refcount_t refcount;
- struct mlx5dv_flow_matcher *dv_matcher;
+ struct list_head dbg_rule_list;
};
struct mlx5dr_ste_action_modify_field {
@@ -958,6 +1025,11 @@ struct mlx5dr_action_flow_tag {
u32 flow_tag;
};
+struct mlx5dr_rule_action_member {
+ struct mlx5dr_action *action;
+ struct list_head list;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -998,6 +1070,7 @@ struct mlx5dr_rule {
struct mlx5dr_rule_rx_tx rx;
struct mlx5dr_rule_rx_tx tx;
struct list_head rule_actions_list;
+ struct list_head dbg_node;
u32 flow_source;
};
@@ -1050,6 +1123,11 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
mlx5dr_domain_nic_unlock(&dmn->info.rx);
}
+int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher);
+int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
+ struct mlx5dr_matcher_rx_tx *nic_matcher);
+
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 2632d5ae9bc0..a476da2424f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
@@ -194,6 +195,15 @@ static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
dest_attr->vport.vhca_id);
}
+static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+ return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
+ dest_attr->vport.vhca_id);
+}
+
static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
@@ -218,7 +228,8 @@ static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domai
static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
{
- return dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@@ -411,8 +422,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- tmp_action = create_vport_action(domain, dst);
+ tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
+ create_vport_action(domain, dst) :
+ create_uplink_action(domain, dst);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index d2a937f69784..9604b2091358 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -447,6 +447,14 @@ struct mlx5_ifc_ste_flex_parser_1_bits {
u8 flex_parser_4[0x20];
};
+struct mlx5_ifc_ste_flex_parser_ok_bits {
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parsers_ok[0x8];
+ u8 reserved_at_48[0x18];
+ u8 flex_parser_0[0x20];
+};
+
struct mlx5_ifc_ste_flex_parser_tnl_bits {
u8 flex_parser_tunneling_header_63_32[0x20];
@@ -490,6 +498,14 @@ struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_ste_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20];
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 92b798f8e73a..ceeb7f4c3f6c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -33,8 +33,11 @@ static void mlxbf_gige_get_regs(struct net_device *netdev,
memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
}
-static void mlxbf_gige_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+mlxbf_gige_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index d1ae248e125c..4683312861ac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -66,7 +66,7 @@ config MLXSW_SPECTRUM
default m
help
This driver supports Mellanox Technologies
- Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs.
+ Spectrum/Spectrum-2/Spectrum-3/Spectrum-4 Ethernet Switch ASICs.
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 392ce3cb27f7..51b260d54237 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -935,6 +935,18 @@ static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp {
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE,
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE,
+};
+
+/* cmd_mbox_sw2hw_dq_sdq_lp
+ * SDQ local Processing
+ * 0: local processing by wqe.lp
+ * 1: local processing (ignoring wqe.lp)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1);
+
/* cmd_mbox_sw2hw_dq_sdq_tclass
* SDQ: CPU Egress TClass
* RDQ: Reserved
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 3fd3812b8f31..866b9357939b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -47,7 +47,7 @@ static struct workqueue_struct *mlxsw_owq;
struct mlxsw_core_port {
struct devlink_port devlink_port;
void *port_driver_priv;
- u8 local_port;
+ u16 local_port;
};
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
@@ -77,7 +77,7 @@ struct mlxsw_core {
bool enable_string_tlv;
} emad;
struct {
- u8 *mapping; /* lag_id+port_index to local_port mapping */
+ u16 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
struct mlxsw_res res;
struct mlxsw_hwmon *hwmon;
@@ -160,7 +160,7 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
if (!reload)
- devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core));
kfree(mlxsw_core->ports);
}
@@ -718,7 +718,7 @@ static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
}
/* called with rcu read lock held */
-static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
struct mlxsw_core *mlxsw_core = priv;
@@ -1708,6 +1708,124 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
static const struct mlxsw_listener mlxsw_core_health_listener =
MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+static int
+mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val, tile_v;
+ int err;
+
+ val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
+ if (tile_v) {
+ val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
+ if (err)
+ return err;
+ }
+ val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
+ return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+}
+
+static int
+mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
+ struct devlink_fmsg *fmsg)
+{
+ u32 val;
+ int err;
+
+ val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
+ err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
@@ -1741,6 +1859,46 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
val_str = "KVD insertion machine stopped";
break;
+ case MLXSW_REG_MFDE_EVENT_ID_TEST:
+ val_str = "Test";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ val_str = "FW assert";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ val_str = "Fatal cause";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_severity_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
+ if (err)
+ return err;
+ switch (val) {
+ case MLXSW_REG_MFDE_SEVERITY_FATL:
+ val_str = "Fatal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_NRML:
+ val_str = "Normal";
+ break;
+ case MLXSW_REG_MFDE_SEVERITY_INTR:
+ val_str = "Debug";
+ break;
default:
val_str = NULL;
}
@@ -1749,6 +1907,7 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
if (err)
return err;
}
+
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -1800,24 +1959,18 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
if (err)
return err;
- if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
- val = mlxsw_reg_mfde_log_address_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
- if (err)
- return err;
- val = mlxsw_reg_mfde_log_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
- if (err)
- return err;
- val = mlxsw_reg_mfde_log_ip_get(mfde_pl);
- err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
- if (err)
- return err;
- } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
- val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
- if (err)
- return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
+ fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
+ return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
+ case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
+ return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
+ fmsg);
}
return 0;
@@ -1959,7 +2112,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
- alloc_size = sizeof(u8) *
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
@@ -2033,7 +2186,7 @@ err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
@@ -2099,7 +2252,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
if (!reload)
devlink_free(devlink);
@@ -2108,7 +2261,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_resources_unregister(devlink, NULL);
+ devlink_resources_unregister(devlink);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
@@ -2130,7 +2283,7 @@ int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
EXPORT_SYMBOL(mlxsw_core_skb_transmit);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
if (mlxsw_core->driver->ptp_transmitted)
mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
@@ -2208,7 +2361,7 @@ mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
rxl_item->enabled = enabled;
}
-static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
struct mlxsw_event_listener_item *event_listener_item = priv;
@@ -2641,7 +2794,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
{
struct mlxsw_rx_listener_item *rxl_item;
const struct mlxsw_rx_listener *rxl;
- u8 local_port;
+ u16 local_port;
bool found = false;
if (rx_info->is_lag) {
@@ -2699,7 +2852,7 @@ static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
}
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index, u8 local_port)
+ u16 lag_id, u8 port_index, u16 local_port)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
@@ -2708,8 +2861,8 @@ void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
-u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index)
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index)
{
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, port_index);
@@ -2719,7 +2872,7 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 local_port)
+ u16 lag_id, u16 local_port)
{
int i;
@@ -2747,7 +2900,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
-static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
u32 port_number, bool split,
u32 split_port_subnumber,
@@ -2778,7 +2931,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
return err;
}
-static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
+static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2788,7 +2941,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
}
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
u32 port_number, bool split,
u32 split_port_subnumber,
bool splittable, u32 lanes,
@@ -2810,7 +2963,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_init);
-void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
{
atomic_dec(&mlxsw_core->active_ports_count);
@@ -2845,7 +2998,7 @@ void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2857,7 +3010,7 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2869,7 +3022,7 @@ void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_ib_set);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
struct mlxsw_core_port *mlxsw_core_port =
@@ -2882,7 +3035,7 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
EXPORT_SYMBOL(mlxsw_core_port_clear);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2895,7 +3048,7 @@ EXPORT_SYMBOL(mlxsw_core_port_type_get);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
@@ -2905,7 +3058,7 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port)
+bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
{
const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
int i;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 12023a550007..f30bb8614e69 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -54,7 +54,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, bool reload);
struct mlxsw_tx_info {
- u8 local_port;
+ u16 local_port;
bool is_emad;
};
@@ -67,7 +67,7 @@ struct mlxsw_rx_md_info {
u16 tx_sys_port;
u16 tx_lag_id;
};
- u8 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
+ u16 tx_lag_port_index; /* Valid when 'tx_port_is_lag' is set. */
u8 tx_tc;
u8 latency_valid:1,
tx_congestion_valid:1,
@@ -82,11 +82,11 @@ bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
struct mlxsw_rx_listener {
- void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
- u8 local_port;
+ void (*func)(struct sk_buff *skb, u16 local_port, void *priv);
+ u16 local_port;
u8 mirror_reason;
u16 trap_id;
};
@@ -209,7 +209,7 @@ struct mlxsw_rx_info {
u16 sys_port;
u16 lag_id;
} u;
- u8 lag_port_index;
+ u16 lag_port_index;
u8 mirror_reason;
int trap_id;
};
@@ -218,36 +218,36 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
struct mlxsw_rx_info *rx_info);
void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index, u8 local_port);
-u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 port_index);
+ u16 lag_id, u8 port_index, u16 local_port);
+u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
+ u16 lag_id, u8 port_index);
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
- u16 lag_id, u8 local_port);
+ u16 lag_id, u16 local_port);
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
u32 port_number, bool split, u32 split_port_subnumber,
bool splittable, u32 lanes,
const unsigned char *switch_id,
unsigned char switch_id_len);
-void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
+void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port);
int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
void *port_driver_priv,
const unsigned char *switch_id,
unsigned char switch_id_len);
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
-void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
+void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u8 local_port);
+ u16 local_port);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
- u8 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
+ u16 local_port);
+bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -316,11 +316,11 @@ struct mlxsw_driver {
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
int (*basic_trap_groups_set)(struct mlxsw_core *mlxsw_core);
- int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_type new_type);
- int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count, struct netlink_ext_ack *extack);
- int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
@@ -394,7 +394,7 @@ struct mlxsw_driver {
* is responsible for freeing the passed-in SKB.
*/
void (*ptp_transmitted)(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 78d9c0196f2b..77e82e6cf6e8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -113,7 +113,7 @@ static const struct rhashtable_params mlxsw_afa_set_ht_params = {
};
struct mlxsw_afa_fwd_entry_ht_key {
- u8 local_port;
+ u16 local_port;
};
struct mlxsw_afa_fwd_entry {
@@ -555,7 +555,7 @@ int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
EXPORT_SYMBOL(mlxsw_afa_block_terminate);
static struct mlxsw_afa_fwd_entry *
-mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
{
struct mlxsw_afa_fwd_entry *fwd_entry;
int err;
@@ -598,7 +598,7 @@ static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
}
static struct mlxsw_afa_fwd_entry *
-mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
{
struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
struct mlxsw_afa_fwd_entry *fwd_entry;
@@ -647,7 +647,7 @@ mlxsw_afa_fwd_entry_ref_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_fwd_entry_ref *
-mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
+mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u16 local_port)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
struct mlxsw_afa_fwd_entry *fwd_entry;
@@ -1352,7 +1352,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
struct mlxsw_afa_mirror {
struct mlxsw_afa_resource resource;
int span_id;
- u8 local_in_port;
+ u16 local_in_port;
bool ingress;
};
@@ -1379,7 +1379,7 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_mirror *
-mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u16 local_in_port,
const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
@@ -1423,7 +1423,7 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
}
int
-mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u16 local_in_port,
const struct net_device *out_dev, bool ingress,
struct netlink_ext_ack *extack)
{
@@ -1663,7 +1663,7 @@ mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
}
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port,
+ u16 local_port, bool in_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
@@ -2038,7 +2038,7 @@ static void mlxsw_afa_sampler_pack(char *payload, u8 mirror_agent, u32 rate)
struct mlxsw_afa_sampler {
struct mlxsw_afa_resource resource;
int span_id;
- u8 local_port;
+ u16 local_port;
bool ingress;
};
@@ -2061,7 +2061,7 @@ static void mlxsw_afa_sampler_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_sampler *
-mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u8 local_port,
+mlxsw_afa_sampler_create(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group, u32 rate,
u32 trunc_size, bool truncate, bool ingress,
struct netlink_ext_ack *extack)
@@ -2104,7 +2104,7 @@ mlxsw_afa_block_append_allocated_sampler(struct mlxsw_afa_block *block,
return 0;
}
-int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index b65bf98eb5ab..16cbd6acbb01 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -17,24 +17,24 @@ struct mlxsw_afa_ops {
void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
int (*kvdl_set_activity_get)(void *priv, u32 kvdl_index,
bool *activity);
- int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
+ int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u16 local_port);
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
void (*counter_index_put)(void *priv, unsigned int counter_index);
- int (*mirror_add)(void *priv, u8 local_in_port,
+ int (*mirror_add)(void *priv, u16 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id);
- void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
+ void (*mirror_del)(void *priv, u16 local_in_port, int span_id,
bool ingress);
int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
void (*policer_del)(void *priv, u16 policer_index);
- int (*sampler_add)(void *priv, u8 local_port,
+ int (*sampler_add)(void *priv, u16 local_port,
struct psample_group *psample_group, u32 rate,
u32 trunc_size, bool truncate, bool ingress,
int *p_span_id, struct netlink_ext_ack *extack);
- void (*sampler_del)(void *priv, u8 local_port, int span_id,
+ void (*sampler_del)(void *priv, u16 local_port, int span_id,
bool ingress);
bool dummy_first_set;
};
@@ -62,12 +62,12 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port,
+ u16 local_in_port,
const struct net_device *out_dev,
bool ingress,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
- u8 local_port, bool in_port,
+ u16 local_port, bool in_port,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et,
@@ -98,7 +98,7 @@ int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
u32 fa_index, u64 rate_bytes_ps, u32 burst,
u16 *p_policer_index,
struct netlink_ext_ack *extack);
-int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u8 local_port,
+int mlxsw_afa_block_append_sampler(struct mlxsw_afa_block *block, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index f1b09c2f9eda..bd1a51a0a540 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index a47a17c04c62..3a037fe47211 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -33,8 +33,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_TTL_,
MLXSW_AFK_ELEMENT_IP_ECN,
MLXSW_AFK_ELEMENT_IP_DSCP,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_MAX,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index ab70a873a01a..cfafbeb42586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -367,6 +367,42 @@ mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
+#define LOCAL_PORT_LSB_SIZE 8
+#define LOCAL_PORT_MSB_SIZE 2
+
+#define MLXSW_ITEM32_LP(_type, _cname, _offset1, _shift1, _offset2, _shift2) \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, local_port) = { \
+ .offset = _offset1, \
+ .shift = _shift1, \
+ .size = {.bits = LOCAL_PORT_LSB_SIZE,}, \
+ .name = #_type "_" #_cname "_local_port", \
+}; \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, lp_msb) = { \
+ .offset = _offset2, \
+ .shift = _shift2, \
+ .size = {.bits = LOCAL_PORT_MSB_SIZE,}, \
+ .name = #_type "_" #_cname "_lp_msb", \
+}; \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_get(const char *buf) \
+{ \
+ u32 local_port, lp_msb; \
+ \
+ local_port = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, \
+ local_port), 0); \
+ lp_msb = __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, lp_msb), \
+ 0); \
+ return (lp_msb << LOCAL_PORT_LSB_SIZE) + local_port; \
+} \
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_local_port_set(char *buf, u32 val) \
+{ \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, local_port), 0, \
+ val & ((1 << LOCAL_PORT_LSB_SIZE) - 1)); \
+ __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, lp_msb), 0, \
+ val >> LOCAL_PORT_LSB_SIZE); \
+}
+
#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
_step, _instepoffset, _norealshift) \
static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 5d4dfa5ddbb5..10d13f5f9c7d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -38,7 +38,7 @@ struct mlxsw_m {
struct mlxsw_m_port {
struct net_device *dev;
struct mlxsw_m *mlxsw_m;
- u8 local_port;
+ u16 local_port;
u8 module;
};
@@ -180,7 +180,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
};
static int
-mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port,
+mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *p_module, u8 *p_width)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -214,7 +214,7 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
}
static int
-mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
{
struct mlxsw_m_port *mlxsw_m_port;
struct net_device *dev;
@@ -277,7 +277,7 @@ err_alloc_etherdev:
return err;
}
-static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
+static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
{
struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port];
@@ -288,7 +288,7 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port)
mlxsw_core_port_fini(mlxsw_m->core, local_port);
}
-static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
+static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *last_module)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index a15c95a10bae..f91dde4df152 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -285,6 +285,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
int tclass;
+ int lp;
int i;
int err;
@@ -292,9 +293,12 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->consumer_counter = 0;
tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
MLXSW_PCI_SDQ_CTL_TC;
+ lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
+ MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
/* Set CQ of same number of this SDQ. */
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
@@ -1678,7 +1682,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
- mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+ mlxsw_pci_wqe_lp_set(wqe, 0);
mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
@@ -1973,6 +1977,7 @@ int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
{
pci_driver->probe = mlxsw_pci_probe;
pci_driver->remove = mlxsw_pci_remove;
+ pci_driver->shutdown = mlxsw_pci_remove;
return pci_register_driver(pci_driver);
}
EXPORT_SYMBOL(mlxsw_pci_driver_register);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 9899c1a2ea8f..cacc2f9fa1d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM4 0xcf80
#if IS_ENABLED(CONFIG_MLXSW_PCI)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8d420eb8ade2..24cc65018b41 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -69,52 +69,6 @@ MLXSW_REG_DEFINE(spad, MLXSW_REG_SPAD_ID, MLXSW_REG_SPAD_LEN);
*/
MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
-/* SMID - Switch Multicast ID
- * --------------------------
- * The MID record maps from a MID (Multicast ID), which is a unique identifier
- * of the multicast group within the stacking domain, into a list of local
- * ports into which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x240
-
-MLXSW_REG_DEFINE(smid, MLXSW_REG_SMID_ID, MLXSW_REG_SMID_LEN);
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid,
- u8 port, bool set)
-{
- MLXSW_REG_ZERO(smid, payload);
- mlxsw_reg_smid_swid_set(payload, 0);
- mlxsw_reg_smid_mid_set(payload, mid);
- mlxsw_reg_smid_port_set(payload, port, set);
- mlxsw_reg_smid_port_mask_set(payload, port, 1);
-}
-
/* SSPR - Switch System Port Record Register
* -----------------------------------------
* Configures the system port to local port mapping.
@@ -141,7 +95,7 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
*
* Access: RW
*/
-MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
/* reg_sspr_sub_port
* Virtual port within the physical port.
@@ -161,7 +115,7 @@ MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
*/
MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
-static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(sspr, payload);
mlxsw_reg_sspr_m_set(payload, 1);
@@ -407,7 +361,7 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid_vid,
enum mlxsw_reg_sfd_rec_action action,
- u8 local_port)
+ u16 local_port)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action);
@@ -417,15 +371,6 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
-static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
- char *mac, u16 *p_fid_vid,
- u8 *p_local_port)
-{
- mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
- *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
-}
-
/* reg_sfd_uc_lag_sub_port
* LAG sub port.
* Must be 0 if multichannel VEPA is not enabled.
@@ -478,15 +423,6 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
-static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index,
- char *mac, u16 *p_vid,
- u16 *p_lag_id)
-{
- mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
- *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index);
- *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index);
-}
-
/* reg_sfd_mc_pgi
*
* Multicast port group index - index into the port group table.
@@ -568,19 +504,43 @@ static inline void
mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
const char *mac, u16 fid,
- enum mlxsw_reg_sfd_rec_action action, u32 uip,
+ enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_uc_tunnel_protocol proto)
{
mlxsw_reg_sfd_rec_pack(payload, rec_index,
MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac,
action);
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
- mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
- mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid);
mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto);
}
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack4(char *payload, int rec_index,
+ enum mlxsw_reg_sfd_rec_policy policy,
+ const char *mac, u16 fid,
+ enum mlxsw_reg_sfd_rec_action action, u32 uip)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24);
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip);
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index, policy, mac, fid,
+ action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4);
+}
+
+static inline void
+mlxsw_reg_sfd_uc_tunnel_pack6(char *payload, int rec_index, const char *mac,
+ u16 fid, enum mlxsw_reg_sfd_rec_action action,
+ u32 uip_ptr)
+{
+ mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip_ptr);
+ /* Only static policy is supported for IPv6 unicast tunnel entry. */
+ mlxsw_reg_sfd_uc_tunnel_pack(payload, rec_index,
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY,
+ mac, fid, action,
+ MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6);
+}
+
enum mlxsw_reg_tunnel_port {
MLXSW_REG_TUNNEL_PORT_NVE,
MLXSW_REG_TUNNEL_PORT_VPLS,
@@ -692,7 +652,7 @@ MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
char *mac, u16 *p_vid,
- u8 *p_local_port)
+ u16 *p_local_port)
{
mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
*p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
@@ -781,7 +741,7 @@ MLXSW_REG_DEFINE(spms, MLXSW_REG_SPMS_ID, MLXSW_REG_SPMS_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spms, 0x00, 16, 0x00, 12);
enum mlxsw_reg_spms_state {
MLXSW_REG_SPMS_STATE_NO_CHANGE,
@@ -800,7 +760,7 @@ enum mlxsw_reg_spms_state {
*/
MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_spms_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(spms, payload);
mlxsw_reg_spms_local_port_set(payload, local_port);
@@ -833,7 +793,7 @@ MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1);
* When tport = 1: Tunnel port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvid, 0x00, 16, 0x00, 12);
/* reg_spvid_sub_port
* Virtual port within the physical port.
@@ -868,7 +828,7 @@ MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
*/
MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
-static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid,
+static inline void mlxsw_reg_spvid_pack(char *payload, u16 local_port, u16 pvid,
u8 et_vlan)
{
MLXSW_REG_ZERO(spvid, payload);
@@ -911,7 +871,7 @@ MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvm, 0x00, 16, 0x00, 12);
/* reg_spvm_sub_port
* Virtual port within the physical port.
@@ -959,7 +919,7 @@ MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
MLXSW_REG_SPVM_BASE_LEN, 0, 12,
MLXSW_REG_SPVM_REC_LEN, 0, false);
-static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spvm_pack(char *payload, u16 local_port,
u16 vid_begin, u16 vid_end,
bool is_member, bool untagged)
{
@@ -994,7 +954,7 @@ MLXSW_REG_DEFINE(spaft, MLXSW_REG_SPAFT_ID, MLXSW_REG_SPAFT_LEN);
*
* Note: CPU port is not supported (all tag types are allowed).
*/
-MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spaft, 0x00, 16, 0x00, 12);
/* reg_spaft_sub_port
* Virtual port within the physical port.
@@ -1021,7 +981,7 @@ MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
-static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
bool allow_untagged)
{
MLXSW_REG_ZERO(spaft, payload);
@@ -1126,76 +1086,6 @@ mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
}
-/* SFTR - Switch Flooding Table Register
- * -------------------------------------
- * The switch flooding table is used for flooding packet replication. The table
- * defines a bit mask of ports for packet replication.
- */
-#define MLXSW_REG_SFTR_ID 0x2012
-#define MLXSW_REG_SFTR_LEN 0x420
-
-MLXSW_REG_DEFINE(sftr, MLXSW_REG_SFTR_ID, MLXSW_REG_SFTR_LEN);
-
-/* reg_sftr_swid
- * Switch partition ID with which to associate the port.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
-
-/* reg_sftr_flood_table
- * Flooding table index to associate with the specific type on the specific
- * switch partition.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
-
-/* reg_sftr_index
- * Index. Used as an index into the Flooding Table in case the table is
- * configured to use VID / FID or FID Offset.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
-
-/* reg_sftr_table_type
- * See mlxsw_flood_table_type
- * Access: RW
- */
-MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
-
-/* reg_sftr_range
- * Range of entries to update
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
-
-/* reg_sftr_port
- * Local port membership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
-
-/* reg_sftr_cpu_port_mask
- * CPU port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_sftr_pack(char *payload,
- unsigned int flood_table,
- unsigned int index,
- enum mlxsw_flood_table_type table_type,
- unsigned int range, u8 port, bool set)
-{
- MLXSW_REG_ZERO(sftr, payload);
- mlxsw_reg_sftr_swid_set(payload, 0);
- mlxsw_reg_sftr_flood_table_set(payload, flood_table);
- mlxsw_reg_sftr_index_set(payload, index);
- mlxsw_reg_sftr_table_type_set(payload, table_type);
- mlxsw_reg_sftr_range_set(payload, range);
- mlxsw_reg_sftr_port_set(payload, port, set);
- mlxsw_reg_sftr_port_mask_set(payload, port, 1);
-}
-
/* SFDF - Switch Filtering DB Flush
* --------------------------------
* The switch filtering DB flush register is used to flush the FDB.
@@ -1347,7 +1237,7 @@ MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8);
MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false);
static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(sldr, payload);
mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST);
@@ -1357,7 +1247,7 @@ static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id,
}
static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(sldr, payload);
mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST);
@@ -1397,7 +1287,7 @@ MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1);
* Reserved when pp = Global Configuration
* Access: Index
*/
-MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, slcr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_slcr_type {
MLXSW_REG_SLCR_TYPE_CRC, /* default */
@@ -1515,7 +1405,7 @@ MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, slcor, 0x00, 16, 0x00, 12);
/* reg_slcor_lag_id
* LAG Identifier. Index into the LAG descriptor table.
@@ -1531,7 +1421,7 @@ MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10);
MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10);
static inline void mlxsw_reg_slcor_pack(char *payload,
- u8 local_port, u16 lag_id,
+ u16 local_port, u16 lag_id,
enum mlxsw_reg_slcor_col col)
{
MLXSW_REG_ZERO(slcor, payload);
@@ -1541,7 +1431,7 @@ static inline void mlxsw_reg_slcor_pack(char *payload,
}
static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
- u8 local_port, u16 lag_id,
+ u16 local_port, u16 lag_id,
u8 port_index)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
@@ -1550,21 +1440,21 @@ static inline void mlxsw_reg_slcor_port_add_pack(char *payload,
}
static inline void mlxsw_reg_slcor_port_remove_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT);
}
static inline void mlxsw_reg_slcor_col_enable_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
}
static inline void mlxsw_reg_slcor_col_disable_pack(char *payload,
- u8 local_port, u16 lag_id)
+ u16 local_port, u16 lag_id)
{
mlxsw_reg_slcor_pack(payload, local_port, lag_id,
MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED);
@@ -1583,7 +1473,7 @@ MLXSW_REG_DEFINE(spmlr, MLXSW_REG_SPMLR_ID, MLXSW_REG_SPMLR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spmlr, 0x00, 16, 0x00, 12);
/* reg_spmlr_sub_port
* Virtual port within the physical port.
@@ -1611,7 +1501,7 @@ enum mlxsw_reg_spmlr_learn_mode {
*/
MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
-static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
enum mlxsw_reg_spmlr_learn_mode mode)
{
MLXSW_REG_ZERO(spmlr, payload);
@@ -1642,7 +1532,7 @@ MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
*
* Note: Reserved for 802.1Q FIDs.
*/
-MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
@@ -1696,7 +1586,7 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
enum mlxsw_reg_svfa_mt mt, bool valid,
u16 fid, u16 vid)
{
@@ -1733,7 +1623,7 @@ MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1);
* When tport = 1: tunnel port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvtr, 0x00, 16, 0x00, 12);
/* reg_spvtr_ippe
* Ingress Port Prio Mode Update Enable.
@@ -1803,7 +1693,7 @@ enum mlxsw_reg_spvtr_epvid_mode {
MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4);
static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport,
- u8 local_port,
+ u16 local_port,
enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode)
{
MLXSW_REG_ZERO(spvtr, payload);
@@ -1828,7 +1718,7 @@ MLXSW_REG_DEFINE(svpe, MLXSW_REG_SVPE_ID, MLXSW_REG_SVPE_LEN);
*
* Note: CPU port is not supported (uses VLAN mode only).
*/
-MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, svpe, 0x00, 16, 0x00, 12);
/* reg_svpe_vp_en
* Virtual port enable.
@@ -1838,7 +1728,7 @@ MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
-static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
bool enable)
{
MLXSW_REG_ZERO(svpe, payload);
@@ -1948,7 +1838,7 @@ MLXSW_REG_DEFINE(spvmlr, MLXSW_REG_SPVMLR_ID, MLXSW_REG_SPVMLR_LEN);
*
* Note: CPU port is not supported.
*/
-MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvmlr, 0x00, 16, 0x00, 12);
/* reg_spvmlr_num_rec
* Number of records to update.
@@ -1971,7 +1861,7 @@ MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u16 local_port,
u16 vid_begin, u16 vid_end,
bool learn_enable)
{
@@ -2009,7 +1899,7 @@ MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN);
* through Rx port i and a Tx port j then port i and port j must have the
* same configuration.
*/
-MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spvc, 0x00, 16, 0x00, 12);
/* reg_spvc_inner_et2
* Vlan Tag1 EtherType2 enable.
@@ -2074,7 +1964,7 @@ MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1);
*/
MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1);
-static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
+static inline void mlxsw_reg_spvc_pack(char *payload, u16 local_port, bool et1,
bool et0)
{
MLXSW_REG_ZERO(spvc, payload);
@@ -2104,7 +1994,7 @@ MLXSW_REG_DEFINE(spevet, MLXSW_REG_SPEVET_ID, MLXSW_REG_SPEVET_LEN);
* Not supported to CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, spevet, 0x00, 16, 0x00, 12);
/* reg_spevet_et_vlan
* Egress EtherType VLAN to push when SPVID.egr_et_set field set for the packet:
@@ -2115,7 +2005,7 @@ MLXSW_ITEM32(reg, spevet, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spevet, et_vlan, 0x04, 16, 2);
-static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
u8 et_vlan)
{
MLXSW_REG_ZERO(spevet, payload);
@@ -2123,6 +2013,122 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u8 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
+/* SFTR-V2 - Switch Flooding Table Version 2 Register
+ * --------------------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR2_ID 0x202F
+#define MLXSW_REG_SFTR2_LEN 0x120
+
+MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN);
+
+/* reg_sftr2_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8);
+
+/* reg_sftr2_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6);
+
+/* reg_sftr2_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16);
+
+/* reg_sftr2_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3);
+
+/* reg_sftr2_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16);
+
+/* reg_sftr2_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1);
+
+/* reg_sftr2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_sftr2_pack(char *payload,
+ unsigned int flood_table,
+ unsigned int index,
+ enum mlxsw_flood_table_type table_type,
+ unsigned int range, u16 port, bool set)
+{
+ MLXSW_REG_ZERO(sftr2, payload);
+ mlxsw_reg_sftr2_swid_set(payload, 0);
+ mlxsw_reg_sftr2_flood_table_set(payload, flood_table);
+ mlxsw_reg_sftr2_index_set(payload, index);
+ mlxsw_reg_sftr2_table_type_set(payload, table_type);
+ mlxsw_reg_sftr2_range_set(payload, range);
+ mlxsw_reg_sftr2_port_set(payload, port, set);
+ mlxsw_reg_sftr2_port_mask_set(payload, port, 1);
+}
+
+/* SMID-V2 - Switch Multicast ID Version 2 Register
+ * ------------------------------------------------
+ * The MID record maps from a MID (Multicast ID), which is a unique identifier
+ * of the multicast group within the stacking domain, into a list of local
+ * ports into which the packet is replicated.
+ */
+#define MLXSW_REG_SMID2_ID 0x2034
+#define MLXSW_REG_SMID2_LEN 0x120
+
+MLXSW_REG_DEFINE(smid2, MLXSW_REG_SMID2_ID, MLXSW_REG_SMID2_LEN);
+
+/* reg_smid2_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
+
+/* reg_smid2_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+
+/* reg_smid2_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
+
+/* reg_smid2_port_mask
+ * Local port mask (1 bit per port).
+ * Access: WO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
+
+static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
+ bool set)
+{
+ MLXSW_REG_ZERO(smid2, payload);
+ mlxsw_reg_smid2_swid_set(payload, 0);
+ mlxsw_reg_smid2_mid_set(payload, mid);
+ mlxsw_reg_smid2_port_set(payload, port, set);
+ mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -2139,7 +2145,7 @@ MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8);
+MLXSW_ITEM32_LP(reg, cwtp, 0x00, 16, 0x00, 12);
/* reg_cwtp_traffic_class
* Traffic Class to configure
@@ -2173,7 +2179,7 @@ MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN,
#define MLXSW_REG_CWTP_MAX_PROFILE 2
#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1
-static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_cwtp_pack(char *payload, u16 local_port,
u8 traffic_class)
{
int i;
@@ -2217,7 +2223,7 @@ MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8);
+MLXSW_ITEM32_LP(reg, cwtpm, 0x00, 16, 0x00, 12);
/* reg_cwtpm_traffic_class
* Traffic Class to configure
@@ -2291,7 +2297,7 @@ MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2);
#define MLXSW_REG_CWTPM_RESET_PROFILE 0
-static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_cwtpm_pack(char *payload, u16 local_port,
u8 traffic_class, u8 profile,
bool wred, bool ecn)
{
@@ -2363,7 +2369,7 @@ MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3);
* Local port. Not including CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppbt, 0x00, 16, 0x00, 12);
/* reg_ppbt_g
* group - When set, the binding is of an ACL group. When cleared,
@@ -2382,7 +2388,7 @@ MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16);
static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
enum mlxsw_reg_pxbt_op op,
- u8 local_port, u16 acl_info)
+ u16 local_port, u16 acl_info)
{
MLXSW_REG_ZERO(ppbt, payload);
mlxsw_reg_ppbt_e_set(payload, e);
@@ -3513,7 +3519,7 @@ MLXSW_REG_DEFINE(qpts, MLXSW_REG_QPTS_ID, MLXSW_REG_QPTS_LEN);
*
* Note: CPU port is supported.
*/
-MLXSW_ITEM32(reg, qpts, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpts, 0x00, 16, 0x00, 12);
enum mlxsw_reg_qpts_trust_state {
MLXSW_REG_QPTS_TRUST_STATE_PCP = 1,
@@ -3526,7 +3532,7 @@ enum mlxsw_reg_qpts_trust_state {
*/
MLXSW_ITEM32(reg, qpts, trust_state, 0x04, 0, 3);
-static inline void mlxsw_reg_qpts_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qpts_pack(char *payload, u16 local_port,
enum mlxsw_reg_qpts_trust_state ts)
{
MLXSW_REG_ZERO(qpts, payload);
@@ -3717,7 +3723,7 @@ MLXSW_REG_DEFINE(qtct, MLXSW_REG_QTCT_ID, MLXSW_REG_QTCT_LEN);
*
* Note: CPU port is not supported.
*/
-MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qtct, 0x00, 16, 0x00, 12);
/* reg_qtct_sub_port
* Virtual port within the physical port.
@@ -3742,7 +3748,7 @@ MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
-static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qtct_pack(char *payload, u16 local_port,
u8 switch_prio, u8 tclass)
{
MLXSW_REG_ZERO(qtct, payload);
@@ -3766,7 +3772,7 @@ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
*
* Note: CPU port is supported.
*/
-MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qeec, 0x00, 16, 0x00, 12);
enum mlxsw_reg_qeec_hr {
MLXSW_REG_QEEC_HR_PORT,
@@ -3908,8 +3914,9 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
+#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4 11
-static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qeec_pack(char *payload, u16 local_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index)
{
@@ -3920,7 +3927,7 @@ static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
mlxsw_reg_qeec_next_element_index_set(payload, next_index);
}
-static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qeec_ptps_pack(char *payload, u16 local_port,
bool ptps)
{
MLXSW_REG_ZERO(qeec, payload);
@@ -3944,7 +3951,7 @@ MLXSW_REG_DEFINE(qrwe, MLXSW_REG_QRWE_ID, MLXSW_REG_QRWE_LEN);
*
* Note: CPU port is supported. No support for router port.
*/
-MLXSW_ITEM32(reg, qrwe, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qrwe, 0x00, 16, 0x00, 12);
/* reg_qrwe_dscp
* Whether to enable DSCP rewrite (default is 0, don't rewrite).
@@ -3958,7 +3965,7 @@ MLXSW_ITEM32(reg, qrwe, dscp, 0x04, 1, 1);
*/
MLXSW_ITEM32(reg, qrwe, pcp, 0x04, 0, 1);
-static inline void mlxsw_reg_qrwe_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qrwe_pack(char *payload, u16 local_port,
bool rewrite_pcp, bool rewrite_dscp)
{
MLXSW_REG_ZERO(qrwe, payload);
@@ -3985,7 +3992,7 @@ MLXSW_REG_DEFINE(qpdsm, MLXSW_REG_QPDSM_ID, MLXSW_REG_QPDSM_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdsm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdsm, 0x00, 16, 0x00, 12);
/* reg_qpdsm_prio_entry_color0_e
* Enable update of the entry for color 0 and a given port.
@@ -4038,7 +4045,7 @@ MLXSW_ITEM32_INDEXED(reg, qpdsm, prio_entry_color2_dscp,
MLXSW_REG_QPDSM_BASE_LEN, 8, 6,
MLXSW_REG_QPDSM_PRIO_ENTRY_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_qpdsm_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_qpdsm_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(qpdsm, payload);
mlxsw_reg_qpdsm_local_port_set(payload, local_port);
@@ -4071,7 +4078,7 @@ MLXSW_REG_DEFINE(qpdp, MLXSW_REG_QPDP_ID, MLXSW_REG_QPDP_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdp, 0x00, 16, 0x00, 12);
/* reg_qpdp_switch_prio
* Default port Switch Priority (default 0)
@@ -4079,7 +4086,7 @@ MLXSW_ITEM32(reg, qpdp, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, qpdp, switch_prio, 0x04, 0, 4);
-static inline void mlxsw_reg_qpdp_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_qpdp_pack(char *payload, u16 local_port,
u8 switch_prio)
{
MLXSW_REG_ZERO(qpdp, payload);
@@ -4106,7 +4113,7 @@ MLXSW_REG_DEFINE(qpdpm, MLXSW_REG_QPDPM_ID, MLXSW_REG_QPDPM_LEN);
* Local Port. Supported for data packets from CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qpdpm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qpdpm, 0x00, 16, 0x00, 12);
/* reg_qpdpm_dscp_e
* Enable update of the specific entry. When cleared, the switch_prio and color
@@ -4125,7 +4132,7 @@ MLXSW_ITEM16_INDEXED(reg, qpdpm, dscp_entry_prio,
MLXSW_REG_QPDPM_BASE_LEN, 0, 4,
MLXSW_REG_QPDPM_DSCP_ENTRY_REC_LEN, 0x00, false);
-static inline void mlxsw_reg_qpdpm_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_qpdpm_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(qpdpm, payload);
mlxsw_reg_qpdpm_local_port_set(payload, local_port);
@@ -4157,7 +4164,7 @@ MLXSW_REG_DEFINE(qtctm, MLXSW_REG_QTCTM_ID, MLXSW_REG_QTCTM_LEN);
* No support for CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, qtctm, 0x00, 16, 0x00, 12);
/* reg_qtctm_mc
* Multicast Mode
@@ -4167,7 +4174,7 @@ MLXSW_ITEM32(reg, qtctm, local_port, 0x00, 16, 8);
MLXSW_ITEM32(reg, qtctm, mc, 0x04, 0, 1);
static inline void
-mlxsw_reg_qtctm_pack(char *payload, u8 local_port, bool mc)
+mlxsw_reg_qtctm_pack(char *payload, u16 local_port, bool mc)
{
MLXSW_REG_ZERO(qtctm, payload);
mlxsw_reg_qtctm_local_port_set(payload, local_port);
@@ -4300,7 +4307,7 @@ MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pmlp, 0x00, 16, 0x00, 12);
/* reg_pmlp_width
* 0 - Unmap local port.
@@ -4331,7 +4338,7 @@ MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 4, 0x04, 0x00, false);
*/
MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 4, 0x04, 0x00, false);
-static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pmlp_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pmlp, payload);
mlxsw_reg_pmlp_local_port_set(payload, local_port);
@@ -4350,7 +4357,7 @@ MLXSW_REG_DEFINE(pmtu, MLXSW_REG_PMTU_ID, MLXSW_REG_PMTU_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pmtu, 0x00, 16, 0x00, 12);
/* reg_pmtu_max_mtu
* Maximum MTU.
@@ -4378,7 +4385,7 @@ MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
*/
MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
-static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pmtu_pack(char *payload, u16 local_port,
u16 new_mtu)
{
MLXSW_REG_ZERO(pmtu, payload);
@@ -4412,7 +4419,7 @@ MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ptys, 0x00, 16, 0x00, 12);
#define MLXSW_REG_PTYS_PROTO_MASK_IB BIT(0)
#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2)
@@ -4572,7 +4579,7 @@ enum mlxsw_reg_ptys_connector_type {
*/
MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4);
-static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_eth_pack(char *payload, u16 local_port,
u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4582,7 +4589,7 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
}
-static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u16 local_port,
u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4624,7 +4631,7 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
-static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port,
u16 proto_admin, u16 link_width)
{
MLXSW_REG_ZERO(ptys, payload);
@@ -4672,7 +4679,7 @@ MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
* port number, if single_base_mac = 0 then local_port is reserved
* Access: RW
*/
-MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppad, 0x00, 16, 0x00, 24);
/* reg_ppad_mac
* If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
@@ -4682,7 +4689,7 @@ MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
- u8 local_port)
+ u16 local_port)
{
MLXSW_REG_ZERO(ppad, payload);
mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
@@ -4711,7 +4718,7 @@ MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, paos, 0x00, 16, 0x00, 12);
/* reg_paos_admin_status
* Port administrative state (the desired state of the port):
@@ -4756,7 +4763,7 @@ MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_paos_pack(char *payload, u16 local_port,
enum mlxsw_port_admin_status status)
{
MLXSW_REG_ZERO(paos, payload);
@@ -4782,7 +4789,7 @@ MLXSW_REG_DEFINE(pfcc, MLXSW_REG_PFCC_ID, MLXSW_REG_PFCC_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pfcc, 0x00, 16, 0x00, 12);
/* reg_pfcc_pnat
* Port number access type. Determines the way local_port is interpreted:
@@ -4899,7 +4906,7 @@ static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
}
-static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pfcc_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pfcc, payload);
mlxsw_reg_pfcc_local_port_set(payload, local_port);
@@ -4928,11 +4935,9 @@ MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
/* reg_ppcnt_local_port
* Local port number.
- * 255 indicates all ports on the device, and is only allowed
- * for Set() operation.
* Access: Index
*/
-MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, ppcnt, 0x00, 16, 0x00, 12);
/* reg_ppcnt_pnat
* Port number access type:
@@ -4981,6 +4986,14 @@ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
*/
MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+/* reg_ppcnt_lp_gl
+ * Local port global variable.
+ * 0: local_port 255 = all ports of the device.
+ * 1: local_port indicates local port number for all ports.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, lp_gl, 0x04, 30, 1);
+
/* reg_ppcnt_prio_tc
* Priority for counter set that support per priority, valid values: 0-7.
* Traffic class for counter set that support per traffic class,
@@ -5404,7 +5417,7 @@ MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
{
@@ -5414,6 +5427,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
mlxsw_reg_ppcnt_pnat_set(payload, 0);
mlxsw_reg_ppcnt_grp_set(payload, grp);
mlxsw_reg_ppcnt_clr_set(payload, 0);
+ mlxsw_reg_ppcnt_lp_gl_set(payload, 1);
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
@@ -5430,7 +5444,7 @@ MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, plib, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12);
/* reg_plib_ib_port
* InfiniBand port remapping for local_port.
@@ -5468,7 +5482,7 @@ MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pptb, 0x00, 16, 0x00, 12);
/* reg_pptb_um
* Enables the update of the untagged_buf field.
@@ -5515,7 +5529,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
-static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pptb_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pptb, payload);
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
@@ -5545,7 +5559,7 @@ MLXSW_REG_DEFINE(pbmc, MLXSW_REG_PBMC_ID, MLXSW_REG_PBMC_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pbmc, 0x00, 16, 0x00, 12);
/* reg_pbmc_xoff_timer_value
* When device generates a pause frame, it uses this value as the pause
@@ -5612,7 +5626,7 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
0x08, 0x04, false);
-static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pbmc_pack(char *payload, u16 local_port,
u16 xoff_timer_value, u16 xoff_refresh)
{
MLXSW_REG_ZERO(pbmc, payload);
@@ -5661,7 +5675,7 @@ MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pspa, 0x00, 16, 0x00, 0);
/* reg_pspa_sub_port
* Virtual port within the local port. Set to 0 when virtual ports are
@@ -5670,7 +5684,7 @@ MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
-static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u16 local_port)
{
MLXSW_REG_ZERO(pspa, payload);
mlxsw_reg_pspa_swid_set(payload, swid);
@@ -5772,7 +5786,7 @@ MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pplr, 0x00, 16, 0x00, 12);
/* Phy local loopback. When set the port's egress traffic is looped back
* to the receiver and the port transmitter is disabled.
@@ -5785,7 +5799,7 @@ MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8);
-static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pplr_pack(char *payload, u16 local_port,
bool phy_local)
{
MLXSW_REG_ZERO(pplr, payload);
@@ -5846,7 +5860,7 @@ MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
* the module.
* Access: RO
*/
-MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 10, 0x02, 0x00, false);
static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
u8 ports_width, u8 num_ports)
@@ -5915,7 +5929,7 @@ MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pddr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_pddr_page_select {
MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1,
@@ -5944,7 +5958,7 @@ MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16);
*/
MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16);
-static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_pddr_pack(char *payload, u16 local_port,
u8 page_select)
{
MLXSW_REG_ZERO(pddr, payload);
@@ -6014,7 +6028,7 @@ MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pllp, 0x00, 16, 0x00, 12);
/* reg_pllp_label_port
* Front panel label of the port.
@@ -6034,7 +6048,7 @@ MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
*/
MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
-static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_pllp_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(pllp, payload);
mlxsw_reg_pllp_local_port_set(payload, local_port);
@@ -10245,7 +10259,7 @@ MLXSW_REG_DEFINE(mpar, MLXSW_REG_MPAR_ID, MLXSW_REG_MPAR_LEN);
* The local port to mirror the packets from.
* Access: Index
*/
-MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mpar, 0x00, 16, 0x00, 4);
enum mlxsw_reg_mpar_i_e {
MLXSW_REG_MPAR_TYPE_EGRESS,
@@ -10282,7 +10296,7 @@ MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
*/
MLXSW_ITEM32(reg, mpar, probability_rate, 0x08, 0, 32);
-static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_mpar_pack(char *payload, u16 local_port,
enum mlxsw_reg_mpar_i_e i_e,
bool enable, u8 pa_id,
u32 probability_rate)
@@ -10386,7 +10400,7 @@ MLXSW_REG_DEFINE(mlcr, MLXSW_REG_MLCR_ID, MLXSW_REG_MLCR_LEN);
* Local port number.
* Access: RW
*/
-MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mlcr, 0x00, 16, 0x00, 24);
#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF
@@ -10405,7 +10419,7 @@ MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16);
*/
MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16);
-static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_mlcr_pack(char *payload, u16 local_port,
bool active)
{
MLXSW_REG_ZERO(mlcr, payload);
@@ -10778,7 +10792,7 @@ MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
* Not supported for CPU port
* Access: Index
*/
-MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mpsc, 0x00, 16, 0x00, 12);
/* reg_mpsc_e
* Enable sampling on port local_port
@@ -10795,7 +10809,7 @@ MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
*/
MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
-static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
+static inline void mlxsw_reg_mpsc_pack(char *payload, u16 local_port, bool e,
u32 rate)
{
MLXSW_REG_ZERO(mpsc, payload);
@@ -11003,7 +11017,7 @@ MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, momte, 0x00, 16, 0x00, 12);
enum mlxsw_reg_momte_type {
MLXSW_REG_MOMTE_TYPE_WRED = 0x20,
@@ -11030,7 +11044,7 @@ MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8);
*/
MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1);
-static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_momte_pack(char *payload, u16 local_port,
enum mlxsw_reg_momte_type type)
{
MLXSW_REG_ZERO(momte, payload);
@@ -11098,7 +11112,7 @@ MLXSW_REG_DEFINE(mtpptr, MLXSW_REG_MTPPTR_ID, MLXSW_REG_MTPPTR_LEN);
* Not supported for CPU port.
* Access: Index
*/
-MLXSW_ITEM32(reg, mtpptr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, mtpptr, 0x00, 16, 0x00, 12);
enum mlxsw_reg_mtpptr_dir {
MLXSW_REG_MTPPTR_DIR_INGRESS,
@@ -11305,7 +11319,7 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
* -----------------------------------
*/
#define MLXSW_REG_MFDE_ID 0x9200
-#define MLXSW_REG_MFDE_LEN 0x18
+#define MLXSW_REG_MFDE_LEN 0x30
MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
@@ -11315,10 +11329,32 @@ MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
*/
MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 24, 8);
+enum mlxsw_reg_mfde_severity {
+ /* Unrecoverable switch behavior */
+ MLXSW_REG_MFDE_SEVERITY_FATL = 2,
+ /* Unexpected state with possible systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_NRML = 3,
+ /* Unexpected state without systemic failure */
+ MLXSW_REG_MFDE_SEVERITY_INTR = 5,
+};
+
+/* reg_mfde_severity
+ * The severity of the event.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, severity, 0x00, 16, 8);
+
enum mlxsw_reg_mfde_event_id {
+ /* CRspace timeout */
MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
/* KVD insertion machine stopped */
MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+ /* Triggered by MFGD.trigger_test */
+ MLXSW_REG_MFDE_EVENT_ID_TEST,
+ /* Triggered when firmware hits an assert */
+ MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT,
+ /* Fatal error interrupt from hardware */
+ MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE,
};
/* reg_mfde_event_id
@@ -11359,32 +11395,110 @@ MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
*/
MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
-/* reg_mfde_log_address
+/* reg_mfde_crspace_to_log_address
* crspace address accessed, which resulted in timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, crspace_to_log_address, 0x10, 0, 32);
+
+/* reg_mfde_crspace_to_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, crspace_to_oe, 0x14, 24, 1);
-/* reg_mfde_log_id
+/* reg_mfde_crspace_to_log_id
* Which irisc triggered the timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+MLXSW_ITEM32(reg, mfde, crspace_to_log_id, 0x14, 0, 4);
-/* reg_mfde_log_ip
+/* reg_mfde_crspace_to_log_ip
* IP (instruction pointer) that triggered the timeout.
- * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
* Access: RO
*/
-MLXSW_ITEM64(reg, mfde, log_ip, 0x18, 0, 64);
+MLXSW_ITEM64(reg, mfde, crspace_to_log_ip, 0x18, 0, 64);
+
+/* reg_mfde_kvd_im_stop_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_oe, 0x10, 24, 1);
-/* reg_mfde_pipes_mask
+/* reg_mfde_kvd_im_stop_pipes_mask
* Bit per kvh pipe.
* Access: RO
*/
-MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16);
+MLXSW_ITEM32(reg, mfde, kvd_im_stop_pipes_mask, 0x10, 0, 16);
+
+/* reg_mfde_fw_assert_var0-4
+ * Variables passed to assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_var0, 0x10, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var1, 0x14, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var2, 0x18, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var3, 0x1C, 0, 32);
+MLXSW_ITEM32(reg, mfde, fw_assert_var4, 0x20, 0, 32);
+
+/* reg_mfde_fw_assert_existptr
+ * The instruction pointer when assert was triggered.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_existptr, 0x24, 0, 32);
+
+/* reg_mfde_fw_assert_callra
+ * The return address after triggering assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_callra, 0x28, 0, 32);
+
+/* reg_mfde_fw_assert_oe
+ * 0 - New event
+ * 1 - Old event, occurred before MFGD activation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_oe, 0x2C, 24, 1);
+
+/* reg_mfde_fw_assert_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_v, 0x2C, 23, 1);
+
+/* reg_mfde_fw_assert_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_tile_index, 0x2C, 16, 6);
+
+/* reg_mfde_fw_assert_ext_synd
+ * A generated one-to-one identifier which is specific per-assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fw_assert_ext_synd, 0x2C, 0, 16);
+
+/* reg_mfde_fatal_cause_id
+ * HW interrupt cause id.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_id, 0x10, 0, 18);
+
+/* reg_mfde_fatal_cause_tile_v
+ * 0: The assert was from main
+ * 1: The assert was from a tile
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_v, 0x14, 23, 1);
+
+/* reg_mfde_fatal_cause_tile_index
+ * When tile_v=1, the tile_index that caused the assert.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, fatal_cause_tile_index, 0x14, 16, 6);
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
@@ -11692,7 +11806,7 @@ MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN);
* Local port number (receive port). CPU port is supported.
* Access: Index
*/
-MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, tnqdr, 0x00, 16, 0x00, 12);
/* reg_tnqdr_dscp
* For encapsulation, the default DSCP.
@@ -11700,7 +11814,7 @@ MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6);
-static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port)
+static inline void mlxsw_reg_tnqdr_pack(char *payload, u16 local_port)
{
MLXSW_REG_ZERO(tnqdr, payload);
mlxsw_reg_tnqdr_local_port_set(payload, local_port);
@@ -12028,7 +12142,7 @@ MLXSW_REG_DEFINE(sbcm, MLXSW_REG_SBCM_ID, MLXSW_REG_SBCM_LEN);
* For Egress: excludes IP Router
* Access: Index
*/
-MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbcm, 0x00, 16, 0x00, 4);
/* reg_sbcm_pg_buff
* PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
@@ -12082,7 +12196,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
*/
MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
-static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+static inline void mlxsw_reg_sbcm_pack(char *payload, u16 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff,
bool infi_max, u8 pool)
@@ -12114,7 +12228,7 @@ MLXSW_REG_DEFINE(sbpm, MLXSW_REG_SBPM_ID, MLXSW_REG_SBPM_LEN);
* For Egress: excludes IP Router
* Access: Index
*/
-MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbpm, 0x00, 16, 0x00, 12);
/* reg_sbpm_pool
* The pool associated to quota counting on the local_port.
@@ -12168,7 +12282,7 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
*/
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
-static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+static inline void mlxsw_reg_sbpm_pack(char *payload, u16 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff)
{
@@ -12266,6 +12380,16 @@ MLXSW_REG_DEFINE(sbsr, MLXSW_REG_SBSR_ID, MLXSW_REG_SBSR_LEN);
*/
MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+#define MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE 256
+
+/* reg_sbsr_port_page
+ * Determines the range of the ports specified in the 'ingress_port_mask'
+ * and 'egress_port_mask' bit masks.
+ * {ingress,egress}_port_mask[x] is (256 * port_page) + x
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbsr, port_page, 0x04, 0, 4);
+
/* reg_sbsr_ingress_port_mask
* Bit vector for all ingress network ports.
* Indicates which of the ports (for which the relevant bit is set)
@@ -12353,7 +12477,7 @@ MLXSW_REG_DEFINE(sbib, MLXSW_REG_SBIB_ID, MLXSW_REG_SBIB_LEN);
* Not supported for CPU port and router port
* Access: Index
*/
-MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, sbib, 0x00, 16, 0x00, 12);
/* reg_sbib_buff_size
* Units represented in cells
@@ -12363,7 +12487,7 @@ MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
-static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
+static inline void mlxsw_reg_sbib_pack(char *payload, u16 local_port,
u32 buff_size)
{
MLXSW_REG_ZERO(sbib, payload);
@@ -12374,7 +12498,6 @@ static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(sgcr),
MLXSW_REG(spad),
- MLXSW_REG(smid),
MLXSW_REG(sspr),
MLXSW_REG(sfdat),
MLXSW_REG(sfd),
@@ -12384,7 +12507,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvm),
MLXSW_REG(spaft),
MLXSW_REG(sfgc),
- MLXSW_REG(sftr),
MLXSW_REG(sfdf),
MLXSW_REG(sldr),
MLXSW_REG(slcr),
@@ -12397,6 +12519,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
+ MLXSW_REG(sftr2),
+ MLXSW_REG(smid2),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -12556,7 +12680,7 @@ MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
* Local port number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+MLXSW_ITEM32_LP(reg, pude, 0x00, 16, 0x00, 12);
/* reg_pude_admin_status
* Port administrative state (the desired state).
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 03e5bad4e405..aa411dec62f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -46,8 +46,8 @@
#include "spectrum_trap.h"
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 3326
+#define MLXSW_SP1_FWREV_MINOR 2010
+#define MLXSW_SP1_FWREV_SUBMINOR 1006
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -63,8 +63,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 3326
+#define MLXSW_SP2_FWREV_MINOR 2010
+#define MLXSW_SP2_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -78,8 +78,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 3326
+#define MLXSW_SP3_FWREV_MINOR 2010
+#define MLXSW_SP3_FWREV_SUBMINOR 1006
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -95,6 +95,7 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
+static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
@@ -303,7 +304,7 @@ int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
- unsigned char *addr)
+ const unsigned char *addr)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ppad_pl[MLXSW_REG_PPAD_LEN];
@@ -352,7 +353,7 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
}
static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 swid)
+ u16 local_port, u8 swid)
{
char pspa_pl[MLXSW_REG_PSPA_LEN];
@@ -483,7 +484,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
}
static int
-mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -535,7 +536,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
static int
-mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_port_mapping *port_mapping)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -560,7 +561,7 @@ err_pmlp_write:
return err;
}
-static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 module)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
@@ -1474,7 +1475,7 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 *port_number,
+ u16 local_port, u8 *port_number,
u8 *split_port_subnumber,
u8 *slot_index)
{
@@ -1490,7 +1491,7 @@ static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
bool split,
struct mlxsw_sp_port_mapping *port_mapping)
{
@@ -1781,7 +1782,7 @@ err_port_swid_set:
return err;
}
-static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
u8 module = mlxsw_sp_port->mapping.module;
@@ -1848,12 +1849,12 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp_port);
}
-static bool mlxsw_sp_local_port_valid(u8 local_port)
+static bool mlxsw_sp_local_port_valid(u16 local_port)
{
return local_port != MLXSW_PORT_CPU_PORT;
}
-static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
if (!mlxsw_sp_local_port_valid(local_port))
return false;
@@ -1971,7 +1972,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
split_port_mapping = *port_mapping;
split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (!mlxsw_sp_local_port_valid(s_local_port))
continue;
@@ -1987,7 +1988,7 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
err_port_create:
for (i--; i >= 0; i--) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2004,7 +2005,7 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
/* Go over original unsplit ports in the gap and recreate them. */
for (i = 0; i < count; i++) {
- u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
port_mapping = mlxsw_sp->port_mapping[local_port];
if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
@@ -2015,14 +2016,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_port *
-mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
{
if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
return mlxsw_sp->ports[local_port];
return NULL;
}
-static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count,
struct netlink_ext_ack *extack)
{
@@ -2065,7 +2066,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
port_mapping = mlxsw_sp_port->mapping;
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2085,7 +2086,7 @@ err_port_split_create:
return err;
}
-static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -2121,7 +2122,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
}
for (i = 0; i < count; i++) {
- u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+ u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
@@ -2148,7 +2149,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pude_oper_status status;
unsigned int max_ports;
- u8 local_port;
+ u16 local_port;
max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
@@ -2174,7 +2175,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
char *mtpptr_pl, bool ingress)
{
- u8 local_port;
+ u16 local_port;
u8 num_rec;
int i;
@@ -2212,7 +2213,7 @@ static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
}
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+ u16 local_port, void *priv)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
@@ -2236,7 +2237,7 @@ void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
netif_receive_skb(skb);
}
-static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
void *priv)
{
skb->offload_fwd_mark = 1;
@@ -2244,7 +2245,7 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
}
static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv)
+ u16 local_port, void *priv)
{
skb->offload_l3_fwd_mark = 1;
skb->offload_fwd_mark = 1;
@@ -2252,7 +2253,7 @@ static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
}
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
+ u16 local_port)
{
mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
}
@@ -2755,6 +2756,140 @@ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
mutex_destroy(&mlxsw_sp->parsing.lock);
}
+struct mlxsw_sp_ipv6_addr_node {
+ struct in6_addr key;
+ struct rhash_head ht_node;
+ u32 kvdl_index;
+ refcount_t refcount;
+};
+
+static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
+ .key_len = sizeof(struct in6_addr),
+ .automatic_shrinking = true,
+};
+
+static int
+mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ p_kvdl_index);
+ if (err)
+ return err;
+
+ mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ err = -ENOMEM;
+ goto err_node_alloc;
+ }
+
+ node->key = *addr6;
+ node->kvdl_index = *p_kvdl_index;
+ refcount_set(&node->refcount, 1);
+
+ err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
+ &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(node);
+err_node_alloc:
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ *p_kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipv6_addr_node *node)
+{
+ u32 kvdl_index = node->kvdl_index;
+
+ rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
+ mlxsw_sp_ipv6_addr_ht_params);
+ kfree(node);
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ kvdl_index);
+}
+
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+ int err = 0;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (node) {
+ refcount_inc(&node->refcount);
+ *p_kvdl_index = node->kvdl_index;
+ goto out_unlock;
+ }
+
+ err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+ return err;
+}
+
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_ipv6_addr_node *node;
+
+ mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
+ node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
+ mlxsw_sp_ipv6_addr_ht_params);
+ if (WARN_ON(!node))
+ goto out_unlock;
+
+ if (!refcount_dec_and_test(&node->refcount))
+ goto out_unlock;
+
+ mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
+
+out_unlock:
+ mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
+}
+
+static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
+ &mlxsw_sp_ipv6_addr_ht_params);
+ if (err)
+ return err;
+
+ mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
+ return 0;
+}
+
+static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
+ rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
+}
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack)
@@ -2843,6 +2978,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
+ goto err_ipv6_addr_ht_init;
+ }
+
err = mlxsw_sp_nve_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
@@ -2944,6 +3085,8 @@ err_router_init:
err_acl_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
+err_ipv6_addr_ht_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3013,6 +3156,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
@@ -3042,6 +3186,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
@@ -3058,6 +3203,36 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
+static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
+ mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
+ mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
+ mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
+ mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+ mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
+ mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
+ mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
+ mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
+ mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
+ mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
+ mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
+ mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
+ mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
+ mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+
+ return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
+}
+
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3075,6 +3250,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
+ mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -3336,7 +3512,7 @@ err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3370,7 +3546,7 @@ err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3486,7 +3662,7 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
}
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
@@ -3616,6 +3792,45 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.temp_warn_enabled = true,
};
+static struct mlxsw_driver mlxsw_sp4_driver = {
+ .kind = mlxsw_sp4_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp4_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .trap_group_set = mlxsw_sp_trap_group_set,
+ .trap_policer_init = mlxsw_sp_trap_policer_init,
+ .trap_policer_fini = mlxsw_sp_trap_policer_fini,
+ .trap_policer_set = mlxsw_sp_trap_policer_set,
+ .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
+};
+
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -4783,6 +4998,16 @@ static struct pci_driver mlxsw_sp3_pci_driver = {
.id_table = mlxsw_sp3_pci_id_table,
};
+static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp4_pci_driver = {
+ .name = mlxsw_sp4_driver_name,
+ .id_table = mlxsw_sp4_pci_id_table,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
@@ -4802,6 +5027,10 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp3_core_driver_register;
+ err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
+ if (err)
+ goto err_sp4_core_driver_register;
+
err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
goto err_sp1_pci_driver_register;
@@ -4814,13 +5043,21 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp3_pci_driver_register;
+ err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
+ if (err)
+ goto err_sp4_pci_driver_register;
+
return 0;
+err_sp4_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
err_sp3_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
err_sp2_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
+err_sp4_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
err_sp3_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
@@ -4834,9 +5071,11 @@ err_sp1_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
@@ -4853,6 +5092,7 @@ MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 32fdd37657dd..bb2442e1f705 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -190,6 +190,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
+ const struct mlxsw_sp_acl_bf_ops *acl_bf_ops;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
const struct mlxsw_sp_sb_ops *sb_ops;
@@ -203,6 +204,8 @@ struct mlxsw_sp {
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
+ struct rhashtable ipv6_addr_ht;
+ struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
};
struct mlxsw_sp_ptp_ops {
@@ -217,13 +220,13 @@ struct mlxsw_sp_ptp_ops {
* is responsible for freeing the passed-in SKB.
*/
void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
/* Notify a driver that a timestamped packet was transmitted. Driver
* is responsible for freeing the passed-in SKB.
*/
void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config);
@@ -261,7 +264,7 @@ enum mlxsw_sp_sample_trigger_type {
struct mlxsw_sp_sample_trigger {
enum mlxsw_sp_sample_trigger_type type;
- u8 local_port; /* Reserved when trigger type is not ingress / egress. */
+ u16 local_port; /* Reserved when trigger type is not ingress / egress. */
};
struct mlxsw_sp_sample_params {
@@ -308,7 +311,7 @@ struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
- u8 local_port;
+ u16 local_port;
u8 lagged:1,
split:1;
u16 pvid;
@@ -370,7 +373,7 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin, bool autoneg);
+ u16 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
@@ -441,7 +444,7 @@ static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 local_port;
+ u16 local_port;
local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
lag_id, port_index);
@@ -587,6 +590,11 @@ mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
void
mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_sample_trigger *trigger);
+int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void
+mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
@@ -621,9 +629,9 @@ extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
- u8 local_port, void *priv);
+ u16 local_port, void *priv);
void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
@@ -729,7 +737,7 @@ void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
@@ -1099,6 +1107,11 @@ extern const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops;
/* spectrum_acl_flex_keys.c */
extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
+extern const struct mlxsw_afk_ops mlxsw_sp4_afk_ops;
+
+/* spectrum_acl_bloom_filter.c */
+extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops;
+extern const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops;
/* spectrum_matchall.c */
struct mlxsw_sp_mall_ops {
@@ -1222,7 +1235,7 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
- enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member);
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
@@ -1310,6 +1323,17 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
union mlxsw_sp_l3addr *addr);
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index);
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6);
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6);
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index);
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index a11d911302f1..e4f4cded2b6f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -45,8 +45,8 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_SRC_IP_0_31,
MLXSW_AFK_ELEMENT_DST_IP_0_31,
};
@@ -89,8 +89,8 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_SRC_IP_96_127,
MLXSW_AFK_ELEMENT_SRC_IP_64_95,
MLXSW_AFK_ELEMENT_SRC_IP_32_63,
@@ -189,10 +189,10 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
rulei = mlxsw_sp_acl_rule_rulei(rule);
rulei->priority = priority;
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
key->vrid, GENMASK(7, 0));
mlxsw_sp_acl_rulei_keymask_u32(rulei,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
key->vrid >> 8, GENMASK(2, 0));
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 67cedfa76f78..70c11bfac08f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -406,7 +406,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 local_port;
+ u16 local_port;
bool in_port;
if (out_dev) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index dbd3bebf11ec..e2aced7ab454 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -17,9 +17,9 @@ struct mlxsw_sp_acl_bf {
};
/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
- * blocks, eRP ID and region ID. In Spectrum-2, region key is combined of up to
- * 12 key blocks, so there can be up to 3 chunks in the Bloom filter key,
- * depending on the actual number of key blocks used in the region.
+ * blocks, eRP ID and region ID. In Spectrum-2 and above, region key is combined
+ * of up to 12 key blocks, so there can be up to 3 chunks in the Bloom filter
+ * key, depending on the actual number of key blocks used in the region.
* The layout of the Bloom filter key is as follows:
*
* +-------------------------+------------------------+------------------------+
@@ -27,7 +27,9 @@ struct mlxsw_sp_acl_bf {
* +-------------------------+------------------------+------------------------+
*/
#define MLXSW_BLOOM_KEY_CHUNKS 3
-#define MLXSW_BLOOM_KEY_LEN 69
+
+/* Spectrum-2 and Spectrum-3 chunks */
+#define MLXSW_SP2_BLOOM_KEY_LEN 69
/* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is
* 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero
@@ -42,31 +44,21 @@ struct mlxsw_sp_acl_bf {
* | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) |
* +---------+-----------+----------+-----------------------------------+
*/
-#define MLXSW_BLOOM_CHUNK_PAD_BYTES 3
-#define MLXSW_BLOOM_CHUNK_KEY_BYTES 18
-#define MLXSW_BLOOM_KEY_CHUNK_BYTES 23
+#define MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES 3
+#define MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES 23
/* The offset of the key block within a chunk is 5 bytes as it comes after
* 3 bytes of zero padding and 16 bits of region ID and eRP ID.
*/
-#define MLXSW_BLOOM_CHUNK_KEY_OFFSET 5
+#define MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET 5
-/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
- * and we need to populate it with 4 key blocks copied from the entry encoded
- * key. Since the encoded key contains a padding, key block 11 starts at offset
- * 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks take
- * 18 bytes.
- * This array defines key offsets for easy access when copying key blocks from
- * entry key to Bloom filter chunk.
- */
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
-
-/* This table is just the CRC of each possible byte. It is
- * computed, Msbit first, for the Bloom filter polynomial
- * which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-{2-3}. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
* the implicit x^16).
*/
-static const u16 mlxsw_sp_acl_bf_crc_tab[256] = {
+static const u16 mlxsw_sp2_acl_bf_crc16_tab[256] = {
0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d,
0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a,
0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a,
@@ -101,24 +93,146 @@ static const u16 mlxsw_sp_acl_bf_crc_tab[256] = {
0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1,
};
-static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c)
+/* Spectrum-4 chunks */
+#define MLXSW_SP4_BLOOM_KEY_LEN 60
+
+/* In Spectrum-4, there is no padding. Each chunk size is 20 bytes.
+ * 18 bytes of it contain 4 key blocks, each is 36 bits, and 2 bytes which hold
+ * eRP ID and region ID.
+ * The layout of each chunk is as follows:
+ *
+ * +----------------------+-----------------------------------+
+ * | 2 bytes | 18 bytes |
+ * +-----------+----------+-----------------------------------+
+ * | 157:148 | 147:144 | 143:0 |
+ * +---------+-----------+----------+-------------------------+
+ * | region ID | eRP ID | 4 Key blocks (18 Bytes) |
+ * +-----------+----------+-----------------------------------+
+ */
+
+#define MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES 0
+#define MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES 20
+
+/* The offset of the key block within a chunk is 2 bytes as it comes after
+ * 16 bits of region ID and eRP ID.
+ */
+#define MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET 2
+
+/* For Spectrum-4, two hash functions are used, CRC-10 and CRC-6 based.
+ * The result is combination of the two calculations -
+ * 6 bit column are MSB (result of CRC-6),
+ * 10 bit row are LSB (result of CRC-10).
+ */
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-4. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x1b (1 + x^1 + x^3 + x^4 and the implicit x^10).
+ */
+static const u16 mlxsw_sp4_acl_bf_crc10_tab[256] = {
+0x0000, 0x001b, 0x0036, 0x002d, 0x006c, 0x0077, 0x005a, 0x0041,
+0x00d8, 0x00c3, 0x00ee, 0x00f5, 0x00b4, 0x00af, 0x0082, 0x0099,
+0x01b0, 0x01ab, 0x0186, 0x019d, 0x01dc, 0x01c7, 0x01ea, 0x01f1,
+0x0168, 0x0173, 0x015e, 0x0145, 0x0104, 0x011f, 0x0132, 0x0129,
+0x0360, 0x037b, 0x0356, 0x034d, 0x030c, 0x0317, 0x033a, 0x0321,
+0x03b8, 0x03a3, 0x038e, 0x0395, 0x03d4, 0x03cf, 0x03e2, 0x03f9,
+0x02d0, 0x02cb, 0x02e6, 0x02fd, 0x02bc, 0x02a7, 0x028a, 0x0291,
+0x0208, 0x0213, 0x023e, 0x0225, 0x0264, 0x027f, 0x0252, 0x0249,
+0x02db, 0x02c0, 0x02ed, 0x02f6, 0x02b7, 0x02ac, 0x0281, 0x029a,
+0x0203, 0x0218, 0x0235, 0x022e, 0x026f, 0x0274, 0x0259, 0x0242,
+0x036b, 0x0370, 0x035d, 0x0346, 0x0307, 0x031c, 0x0331, 0x032a,
+0x03b3, 0x03a8, 0x0385, 0x039e, 0x03df, 0x03c4, 0x03e9, 0x03f2,
+0x01bb, 0x01a0, 0x018d, 0x0196, 0x01d7, 0x01cc, 0x01e1, 0x01fa,
+0x0163, 0x0178, 0x0155, 0x014e, 0x010f, 0x0114, 0x0139, 0x0122,
+0x000b, 0x0010, 0x003d, 0x0026, 0x0067, 0x007c, 0x0051, 0x004a,
+0x00d3, 0x00c8, 0x00e5, 0x00fe, 0x00bf, 0x00a4, 0x0089, 0x0092,
+0x01ad, 0x01b6, 0x019b, 0x0180, 0x01c1, 0x01da, 0x01f7, 0x01ec,
+0x0175, 0x016e, 0x0143, 0x0158, 0x0119, 0x0102, 0x012f, 0x0134,
+0x001d, 0x0006, 0x002b, 0x0030, 0x0071, 0x006a, 0x0047, 0x005c,
+0x00c5, 0x00de, 0x00f3, 0x00e8, 0x00a9, 0x00b2, 0x009f, 0x0084,
+0x02cd, 0x02d6, 0x02fb, 0x02e0, 0x02a1, 0x02ba, 0x0297, 0x028c,
+0x0215, 0x020e, 0x0223, 0x0238, 0x0279, 0x0262, 0x024f, 0x0254,
+0x037d, 0x0366, 0x034b, 0x0350, 0x0311, 0x030a, 0x0327, 0x033c,
+0x03a5, 0x03be, 0x0393, 0x0388, 0x03c9, 0x03d2, 0x03ff, 0x03e4,
+0x0376, 0x036d, 0x0340, 0x035b, 0x031a, 0x0301, 0x032c, 0x0337,
+0x03ae, 0x03b5, 0x0398, 0x0383, 0x03c2, 0x03d9, 0x03f4, 0x03ef,
+0x02c6, 0x02dd, 0x02f0, 0x02eb, 0x02aa, 0x02b1, 0x029c, 0x0287,
+0x021e, 0x0205, 0x0228, 0x0233, 0x0272, 0x0269, 0x0244, 0x025f,
+0x0016, 0x000d, 0x0020, 0x003b, 0x007a, 0x0061, 0x004c, 0x0057,
+0x00ce, 0x00d5, 0x00f8, 0x00e3, 0x00a2, 0x00b9, 0x0094, 0x008f,
+0x01a6, 0x01bd, 0x0190, 0x018b, 0x01ca, 0x01d1, 0x01fc, 0x01e7,
+0x017e, 0x0165, 0x0148, 0x0153, 0x0112, 0x0109, 0x0124, 0x013f,
+};
+
+/* This table is just the CRC of each possible byte which is used for
+ * Spectrum-4. It is computed, Msbit first, for the Bloom filter
+ * polynomial which is 0x2d (1 + x^2+ x^3 + x^5 and the implicit x^6).
+ */
+static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
+0x00, 0x2d, 0x37, 0x1a, 0x03, 0x2e, 0x34, 0x19,
+0x06, 0x2b, 0x31, 0x1c, 0x05, 0x28, 0x32, 0x1f,
+0x0c, 0x21, 0x3b, 0x16, 0x0f, 0x22, 0x38, 0x15,
+0x0a, 0x27, 0x3d, 0x10, 0x09, 0x24, 0x3e, 0x13,
+0x18, 0x35, 0x2f, 0x02, 0x1b, 0x36, 0x2c, 0x01,
+0x1e, 0x33, 0x29, 0x04, 0x1d, 0x30, 0x2a, 0x07,
+0x14, 0x39, 0x23, 0x0e, 0x17, 0x3a, 0x20, 0x0d,
+0x12, 0x3f, 0x25, 0x08, 0x11, 0x3c, 0x26, 0x0b,
+0x30, 0x1d, 0x07, 0x2a, 0x33, 0x1e, 0x04, 0x29,
+0x36, 0x1b, 0x01, 0x2c, 0x35, 0x18, 0x02, 0x2f,
+0x3c, 0x11, 0x0b, 0x26, 0x3f, 0x12, 0x08, 0x25,
+0x3a, 0x17, 0x0d, 0x20, 0x39, 0x14, 0x0e, 0x23,
+0x28, 0x05, 0x1f, 0x32, 0x2b, 0x06, 0x1c, 0x31,
+0x2e, 0x03, 0x19, 0x34, 0x2d, 0x00, 0x1a, 0x37,
+0x24, 0x09, 0x13, 0x3e, 0x27, 0x0a, 0x10, 0x3d,
+0x22, 0x0f, 0x15, 0x38, 0x21, 0x0c, 0x16, 0x3b,
+0x0d, 0x20, 0x3a, 0x17, 0x0e, 0x23, 0x39, 0x14,
+0x0b, 0x26, 0x3c, 0x11, 0x08, 0x25, 0x3f, 0x12,
+0x01, 0x2c, 0x36, 0x1b, 0x02, 0x2f, 0x35, 0x18,
+0x07, 0x2a, 0x30, 0x1d, 0x04, 0x29, 0x33, 0x1e,
+0x15, 0x38, 0x22, 0x0f, 0x16, 0x3b, 0x21, 0x0c,
+0x13, 0x3e, 0x24, 0x09, 0x10, 0x3d, 0x27, 0x0a,
+0x19, 0x34, 0x2e, 0x03, 0x1a, 0x37, 0x2d, 0x00,
+0x1f, 0x32, 0x28, 0x05, 0x1c, 0x31, 0x2b, 0x06,
+0x3d, 0x10, 0x0a, 0x27, 0x3e, 0x13, 0x09, 0x24,
+0x3b, 0x16, 0x0c, 0x21, 0x38, 0x15, 0x0f, 0x22,
+0x31, 0x1c, 0x06, 0x2b, 0x32, 0x1f, 0x05, 0x28,
+0x37, 0x1a, 0x00, 0x2d, 0x34, 0x19, 0x03, 0x2e,
+0x25, 0x08, 0x12, 0x3f, 0x26, 0x0b, 0x11, 0x3c,
+0x23, 0x0e, 0x14, 0x39, 0x20, 0x0d, 0x17, 0x3a,
+0x29, 0x04, 0x1e, 0x33, 0x2a, 0x07, 0x1d, 0x30,
+0x2f, 0x02, 0x18, 0x35, 0x2c, 0x01, 0x1b, 0x36,
+};
+
+/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
+ * and we need to populate it with 4 key blocks copied from the entry encoded
+ * key. The original keys layout is same for Spectrum-{2,3,4}.
+ * Since the encoded key contains a 2 bytes padding, key block 11 starts at
+ * offset 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks
+ * take 18 bytes. See 'MLXSW_SP2_AFK_BLOCK_LAYOUT' for more details.
+ * This array defines key offsets for easy access when copying key blocks from
+ * entry key to Bloom filter chunk.
+ */
+static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+
+static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
- return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c];
+ return (crc << 8) ^ mlxsw_sp2_acl_bf_crc16_tab[(crc >> 8) ^ c];
}
-static u16 mlxsw_sp_acl_bf_crc(const u8 *buffer, size_t len)
+static u16 mlxsw_sp2_acl_bf_crc(const u8 *buffer, size_t len)
{
u16 crc = 0;
while (len--)
- crc = mlxsw_sp_acl_bf_crc_byte(crc, *buffer++);
+ crc = mlxsw_sp2_acl_bf_crc16_byte(crc, *buffer++);
return crc;
}
static void
-mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_entry *aentry,
- char *output, u8 *len)
+__mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len, u8 max_chunks, u8 pad_bytes,
+ u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
u8 chunk_index, chunk_count, block_count;
@@ -129,37 +243,168 @@ mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
chunk_count = 1 + ((block_count - 1) >> 2);
erp_region_id = cpu_to_be16(aentry->ht_key.erp_id |
(aregion->region->id << 4));
- for (chunk_index = MLXSW_BLOOM_KEY_CHUNKS - chunk_count;
- chunk_index < MLXSW_BLOOM_KEY_CHUNKS; chunk_index++) {
- memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES);
- memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id,
+ for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
+ chunk_index++) {
+ memset(chunk, 0, pad_bytes);
+ memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET,
+ memcpy(chunk + key_offset,
&aentry->enc_key[chunk_key_offsets[chunk_index]],
- MLXSW_BLOOM_CHUNK_KEY_BYTES);
- chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES;
+ chunk_key_len);
+ chunk += chunk_len;
}
- *len = chunk_count * MLXSW_BLOOM_KEY_CHUNK_BYTES;
+ *len = chunk_count * chunk_len;
+}
+
+static void
+mlxsw_sp2_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len,
+ MLXSW_BLOOM_KEY_CHUNKS,
+ MLXSW_SP2_BLOOM_CHUNK_PAD_BYTES,
+ MLXSW_SP2_BLOOM_CHUNK_KEY_OFFSET,
+ MLXSW_SP2_BLOOM_CHUNK_KEY_BYTES,
+ MLXSW_SP2_BLOOM_KEY_CHUNK_BYTES);
}
static unsigned int
-mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
- unsigned int erp_bank,
- unsigned int bf_index)
+mlxsw_sp2_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
{
- return erp_bank * bf->bank_size + bf_index;
+ char bf_key[MLXSW_SP2_BLOOM_KEY_LEN];
+ u8 bf_size;
+
+ mlxsw_sp2_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp2_acl_bf_crc(bf_key, bf_size);
+}
+
+static u16 mlxsw_sp4_acl_bf_crc10_byte(u16 crc, u8 c)
+{
+ u8 index = ((crc >> 2) ^ c) & 0xff;
+
+ return ((crc << 8) ^ mlxsw_sp4_acl_bf_crc10_tab[index]) & 0x3ff;
+}
+
+static u16 mlxsw_sp4_acl_bf_crc6_byte(u16 crc, u8 c)
+{
+ u8 index = (crc ^ c) & 0xff;
+
+ return ((crc << 6) ^ (mlxsw_sp4_acl_bf_crc6_tab[index] << 2)) & 0xfc;
+}
+
+static u16 mlxsw_sp4_acl_bf_crc(const u8 *buffer, size_t len)
+{
+ u16 crc_row = 0, crc_col = 0;
+
+ while (len--) {
+ crc_row = mlxsw_sp4_acl_bf_crc10_byte(crc_row, *buffer);
+ crc_col = mlxsw_sp4_acl_bf_crc6_byte(crc_col, *buffer);
+ buffer++;
+ }
+
+ crc_col >>= 2;
+
+ /* 6 bit column are MSB, 10 bit row are LSB */
+ return (crc_col << 10) | crc_row;
+}
+
+static void right_shift_array(char *arr, u8 len, u8 shift_bits)
+{
+ u8 byte_mask = 0xff >> shift_bits;
+ int i;
+
+ if (WARN_ON(!shift_bits || shift_bits >= 8))
+ return;
+
+ for (i = len - 1; i >= 0; i--) {
+ /* The first iteration looks like out-of-bounds access,
+ * but actually references a buffer that the array is shifted
+ * into. This move is legal as we never send the last chunk to
+ * this function.
+ */
+ arr[i + 1] &= byte_mask;
+ arr[i + 1] |= arr[i] << (8 - shift_bits);
+ arr[i] = arr[i] >> shift_bits;
+ }
+}
+
+static void mlxsw_sp4_bf_key_shift_chunks(u8 chunk_count, char *output)
+{
+ /* The chunks are suppoosed to be continuous, with no padding.
+ * Since region ID and eRP ID use 14 bits, and not fully 2 bytes,
+ * and in Spectrum-4 there is no padding, it is necessary to shift some
+ * chunks 2 bits right.
+ */
+ switch (chunk_count) {
+ case 2:
+ /* The chunks are copied as follow:
+ * +-------------+-----------------+
+ * | Chunk 0 | Chunk 1 |
+ * | IDs | keys |(**) IDs | keys |
+ * +-------------+-----------------+
+ * In (**), there are two unused bits, therefore, chunk 0 needs
+ * to be shifted two bits right.
+ */
+ right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2);
+ break;
+ case 3:
+ /* The chunks are copied as follow:
+ * +-------------+-----------------+-----------------+
+ * | Chunk 0 | Chunk 1 | Chunk 2 |
+ * | IDs | keys |(**) IDs | keys |(**) IDs | keys |
+ * +-------------+-----------------+-----------------+
+ * In (**), there are two unused bits, therefore, chunk 1 needs
+ * to be shifted two bits right and chunk 0 needs to be shifted
+ * four bits right.
+ */
+ right_shift_array(output + MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES,
+ MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 2);
+ right_shift_array(output, MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES, 4);
+ break;
+ default:
+ WARN_ON(chunk_count > MLXSW_BLOOM_KEY_CHUNKS);
+ }
+}
+
+static void
+mlxsw_sp4_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
+ u8 block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ u8 chunk_count = 1 + ((block_count - 1) >> 2);
+
+ __mlxsw_sp_acl_bf_key_encode(aregion, aentry, output, len,
+ MLXSW_BLOOM_KEY_CHUNKS,
+ MLXSW_SP4_BLOOM_CHUNK_PAD_BYTES,
+ MLXSW_SP4_BLOOM_CHUNK_KEY_OFFSET,
+ MLXSW_SP4_BLOOM_CHUNK_KEY_BYTES,
+ MLXSW_SP4_BLOOM_KEY_CHUNK_BYTES);
+ mlxsw_sp4_bf_key_shift_chunks(chunk_count, output);
}
static unsigned int
-mlxsw_sp_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
- struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_atcam_entry *aentry)
+mlxsw_sp4_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
{
- char bf_key[MLXSW_BLOOM_KEY_LEN];
+ char bf_key[MLXSW_SP4_BLOOM_KEY_LEN] = {};
u8 bf_size;
- mlxsw_sp_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
- return mlxsw_sp_acl_bf_crc(bf_key, bf_size);
+ mlxsw_sp4_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp4_acl_bf_crc(bf_key, bf_size);
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
+ unsigned int erp_bank,
+ unsigned int bf_index)
+{
+ return erp_bank * bf->bank_size + bf_index;
}
int
@@ -176,7 +421,7 @@ mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
mutex_lock(&bf->lock);
- bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
@@ -219,7 +464,7 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
mutex_lock(&bf->lock);
- bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ bf_index = mlxsw_sp->acl_bf_ops->index_get(bf, aregion, aentry);
rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
bf_index);
@@ -267,3 +512,11 @@ void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
mutex_destroy(&bf->lock);
kfree(bf);
}
+
+const struct mlxsw_sp_acl_bf_ops mlxsw_sp2_acl_bf_ops = {
+ .index_get = mlxsw_sp2_acl_bf_index_get,
+};
+
+const struct mlxsw_sp_acl_bf_ops mlxsw_sp4_acl_bf_ops = {
+ .index_get = mlxsw_sp4_acl_bf_index_get,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index c72aa38424dc..50806594d977 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -83,7 +83,7 @@ static int mlxsw_sp2_act_kvdl_set_activity_get(void *priv, u32 kvdl_index,
}
static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
- u8 local_port)
+ u16 local_port)
{
struct mlxsw_sp *mlxsw_sp = priv;
char ppbs_pl[MLXSW_REG_PPBS_LEN];
@@ -132,7 +132,7 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
}
static int
-mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
+mlxsw_sp_act_mirror_add(void *priv, u16 local_in_port,
const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
@@ -159,7 +159,7 @@ err_analyzed_port_get:
}
static void
-mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
+mlxsw_sp_act_mirror_del(void *priv, u16 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = priv;
@@ -192,7 +192,7 @@ static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
policer_index);
}
-static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
+static int mlxsw_sp1_act_sampler_add(void *priv, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress, int *p_span_id,
@@ -202,7 +202,7 @@ static int mlxsw_sp1_act_sampler_add(void *priv, u8 local_port,
return -EOPNOTSUPP;
}
-static void mlxsw_sp1_act_sampler_del(void *priv, u8 local_port, int span_id,
+static void mlxsw_sp1_act_sampler_del(void *priv, u16 local_port, int span_id,
bool ingress)
{
WARN_ON_ONCE(1);
@@ -224,7 +224,7 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
.sampler_del = mlxsw_sp1_act_sampler_del,
};
-static int mlxsw_sp2_act_sampler_add(void *priv, u8 local_port,
+static int mlxsw_sp2_act_sampler_add(void *priv, u16 local_port,
struct psample_group *psample_group,
u32 rate, u32 trunc_size, bool truncate,
bool ingress, int *p_span_id,
@@ -272,7 +272,7 @@ err_span_agent_get:
return err;
}
-static void mlxsw_sp2_act_sampler_del(void *priv, u8 local_port, int span_id,
+static void mlxsw_sp2_act_sampler_del(void *priv, u16 local_port, int span_id,
bool ingress)
{
struct mlxsw_sp_sample_trigger trigger = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 279c241f76f0..00c32320f891 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -168,8 +168,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_7, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_8_10, 0x00, 0, 3),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
@@ -311,3 +311,45 @@ const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.encode_block = mlxsw_sp2_afk_encode_block,
.clear_block = mlxsw_sp2_afk_clear_block,
};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VID, 0x04, 18, 12),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+ MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
+ MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
+ MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
+ MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
+ MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b),
+ MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b),
+ MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
+ MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
+ MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b),
+ MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
+ MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
+ MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
+ MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
+};
+
+const struct mlxsw_afk_ops mlxsw_sp4_afk_ops = {
+ .blocks = mlxsw_sp4_afk_blocks,
+ .blocks_count = ARRAY_SIZE(mlxsw_sp4_afk_blocks),
+ .encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index a41df10ade9b..edbbc89e7a71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -287,6 +287,12 @@ void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_bf;
+struct mlxsw_sp_acl_bf_ops {
+ unsigned int (*index_get)(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+};
+
int
mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_bf *bf,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d78cf5a7220a..98f26f596e30 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -160,7 +160,7 @@ static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
}
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u8 pg_buff,
+ u16 local_port, u8 pg_buff,
enum mlxsw_reg_sbxx_dir dir)
{
struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
@@ -173,7 +173,7 @@ static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
- u8 local_port, u16 pool_index)
+ u16 local_port, u16 pool_index)
{
return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
}
@@ -202,7 +202,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
return 0;
}
-static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u8 pg_buff, u32 min_buff, u32 max_buff,
bool infi_max, u16 pool_index)
{
@@ -232,7 +232,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
}
-static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, u32 min_buff, u32 max_buff)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -253,7 +253,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
}
-static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -279,7 +279,7 @@ static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
}
-static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u16 local_port,
u16 pool_index, struct list_head *bulk_list)
{
const struct mlxsw_sp_sb_pool_des *des =
@@ -919,7 +919,7 @@ mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
}
-static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_cm *cms,
size_t cms_len)
@@ -1037,7 +1037,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
};
-static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const struct mlxsw_sp_sb_pm *pms,
bool skip_ingress)
{
@@ -1416,7 +1416,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool_index);
@@ -1432,7 +1432,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u32 max_buff;
int err;
@@ -1458,7 +1458,7 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
@@ -1479,7 +1479,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
const struct mlxsw_sp_sb_cm *cm;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
@@ -1526,7 +1526,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
u8 masked_count;
- u8 local_port_1;
+ u16 local_port_1;
};
static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
@@ -1536,7 +1536,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count;
- u8 local_port;
+ u16 local_port;
int rec_index = 0;
struct mlxsw_sp_sb_cm *cm;
int i;
@@ -1582,13 +1582,12 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
- u8 masked_count;
- u8 local_port_1;
- u8 local_port;
int i;
int err;
int err2;
@@ -1602,6 +1601,10 @@ next_batch:
local_port_1 = local_port;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@@ -1609,6 +1612,10 @@ next_batch:
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@@ -1651,10 +1658,11 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u16 local_port, last_local_port;
LIST_HEAD(bulk_list);
- char *sbsr_pl;
unsigned int masked_count;
- u8 local_port;
+ u8 current_page = 0;
+ char *sbsr_pl;
int i;
int err;
int err2;
@@ -1667,6 +1675,10 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
+ MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
+
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
@@ -1674,6 +1686,10 @@ next_batch:
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
+ if (local_port > last_local_port) {
+ current_page++;
+ goto do_query;
+ }
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
@@ -1715,7 +1731,7 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool_index);
@@ -1732,7 +1748,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 84d4460f3dcd..20530712eadb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1491,7 +1491,7 @@ static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
static void
mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin, bool autoneg)
+ u16 local_port, u32 proto_admin, bool autoneg)
{
mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
}
@@ -1969,7 +1969,7 @@ static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
static void
mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
- u8 local_port, u32 proto_admin,
+ u16 local_port, u32 proto_admin,
bool autoneg)
{
mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 004c42274e48..ce80931f0402 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -317,13 +317,13 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
}
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
- enum mlxsw_sp_flood_type packet_type, u8 local_port,
+ enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr_pl;
+ char *sftr2_pl;
int err;
if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
@@ -333,16 +333,16 @@ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
if (!flood_table)
return -ESRCH;
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
- if (!sftr_pl)
+ sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
+ if (!sftr2_pl)
return -ENOMEM;
- mlxsw_reg_sftr_pack(sftr_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr),
- sftr_pl);
- kfree(sftr_pl);
+ mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
+ ops->flood_index(fid), flood_table->table_type, 1,
+ local_port, member);
+ err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
+ sftr2_pl);
+ kfree(sftr2_pl);
return err;
}
@@ -439,7 +439,7 @@ static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
}
static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u8 local_port, u16 vid, bool valid)
+ u16 local_port, u16 vid, bool valid)
{
enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
char svfa_pl[MLXSW_REG_SVFA_LEN];
@@ -573,7 +573,7 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
@@ -601,7 +601,7 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
@@ -784,7 +784,7 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err;
/* We only need to transition the port to virtual mode since
@@ -808,7 +808,7 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index be3791ca6069..bb417db773b9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -203,7 +203,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
*/
burst = roundup_pow_of_two(act->police.burst);
err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
- act->police.index,
+ act->hw_index,
act->police.rate_bytes_ps,
burst, extack);
if (err)
@@ -508,7 +508,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
- if (mlxsw_sp_flow_block_is_egress_bound(block)) {
+ if (mlxsw_sp_flow_block_is_egress_bound(block) &&
+ match.mask->vlan_id) {
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index ad3926de88f2..01cf5a6a26bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -568,37 +568,21 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- char rips_pl[MLXSW_REG_RIPS_LEN];
struct __ip6_tnl_parm parms6;
- int err;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
- MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- &ipip_entry->dip_kvdl_index);
- if (err)
- return err;
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
- mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
- &parms6.raddr);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
- if (err)
- goto err_rips_write;
-
- return 0;
-
-err_rips_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- ipip_entry->dip_kvdl_index);
- return err;
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
+ &ipip_entry->dip_kvdl_index);
}
static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
- ipip_entry->dip_kvdl_index);
+ struct __ip6_tnl_parm parms6;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 9eba8fa684ae..d2b57a045aa4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -130,15 +130,25 @@ mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
struct mlxsw_sp_nve_mc_entry *mc_entry,
const union mlxsw_sp_l3addr *addr)
{
- WARN_ON(1);
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp,
+ &addr->addr6, &kvdl_index);
+ if (err)
+ return err;
- return -EINVAL;
+ mc_entry->ipv6_entry.addr6 = addr->addr6;
+ mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index;
+ return 0;
}
static void
mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
const struct mlxsw_sp_nve_mc_entry *mc_entry)
{
+ mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp,
+ &mc_entry->ipv6_entry.addr6);
}
static void
@@ -787,6 +797,142 @@ static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
ops->fdb_clear_offload(nve_dev, vni);
}
+struct mlxsw_sp_nve_ipv6_ht_key {
+ u8 mac[ETH_ALEN];
+ u16 fid_index;
+};
+
+struct mlxsw_sp_nve_ipv6_ht_node {
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct mlxsw_sp_nve_ipv6_ht_key key;
+ struct in6_addr addr6;
+};
+
+static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key),
+ .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key),
+ .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node),
+};
+
+int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6,
+ u32 *p_kvdl_index)
+{
+ return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index);
+}
+
+void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
+ const struct in6_addr *addr6)
+{
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6);
+}
+
+static struct mlxsw_sp_nve_ipv6_ht_node *
+mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_key key = {};
+
+ ether_addr_copy(key.mac, mac);
+ key.fid_index = fid_index;
+ return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key,
+ mlxsw_sp_nve_ipv6_ht_params);
+}
+
+static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid_index,
+ const struct in6_addr *addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+ int err;
+
+ ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL);
+ if (!ipv6_ht_node)
+ return -ENOMEM;
+
+ ether_addr_copy(ipv6_ht_node->key.mac, mac);
+ ipv6_ht_node->key.fid_index = fid_index;
+ ipv6_ht_node->addr6 = *addr6;
+
+ err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list);
+
+ return 0;
+
+err_rhashtable_insert:
+ kfree(ipv6_ht_node);
+ return err;
+}
+
+static void
+mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node)
+{
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_del(&ipv6_ht_node->list);
+ rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
+ mlxsw_sp_nve_ipv6_ht_params);
+ kfree(ipv6_ht_node);
+}
+
+int
+mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index,
+ const struct in6_addr *new_addr6)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (!ipv6_ht_node)
+ return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index,
+ new_addr6);
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ ipv6_ht_node->addr6 = *new_addr6;
+ return 0;
+}
+
+void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
+
+ ASSERT_RTNL();
+
+ ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
+ fid_index);
+ if (WARN_ON(!ipv6_ht_node))
+ return;
+
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+}
+
+static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp;
+ struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
+
+ list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list,
+ list) {
+ if (ipv6_ht_node->key.fid_index != fid_index)
+ continue;
+
+ mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
+ mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
+ }
+}
+
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
@@ -845,6 +991,7 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+ mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
mlxsw_sp_fid_vni(fid, &vni)))
@@ -981,7 +1128,13 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
err = rhashtable_init(&nve->mc_list_ht,
&mlxsw_sp_nve_mc_list_ht_params);
if (err)
- goto err_rhashtable_init;
+ goto err_mc_rhashtable_init;
+
+ err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params);
+ if (err)
+ goto err_ipv6_rhashtable_init;
+
+ INIT_LIST_HEAD(&nve->ipv6_addr_list);
err = mlxsw_sp_nve_qos_init(mlxsw_sp);
if (err)
@@ -1000,8 +1153,10 @@ int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
err_nve_resources_query:
err_nve_ecn_init:
err_nve_qos_init:
+ rhashtable_destroy(&nve->ipv6_ht);
+err_ipv6_rhashtable_init:
rhashtable_destroy(&nve->mc_list_ht);
-err_rhashtable_init:
+err_mc_rhashtable_init:
mlxsw_sp->nve = NULL;
kfree(nve);
return err;
@@ -1010,6 +1165,8 @@ err_rhashtable_init:
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
+ WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list));
+ rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht);
rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
kfree(mlxsw_sp->nve);
mlxsw_sp->nve = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 98d1fdc25eac..0d21de1d0395 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -23,6 +23,8 @@ struct mlxsw_sp_nve_config {
struct mlxsw_sp_nve {
struct mlxsw_sp_nve_config config;
struct rhashtable mc_list_ht;
+ struct rhashtable ipv6_ht;
+ struct list_head ipv6_addr_list; /* Saves hash table nodes. */
struct mlxsw_sp *mlxsw_sp;
const struct mlxsw_sp_nve_ops **nve_ops_arr;
unsigned int num_nve_tunnels; /* Protected by RTNL */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d018d2da5949..d309b77a0194 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -10,8 +10,48 @@
#include "spectrum.h"
#include "spectrum_nve.h"
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
VXLAN_F_LEARN)
+#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX)
+
+static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
+
+static bool mlxsw_sp_nve_vxlan_ipv6_flags_check(const struct vxlan_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for TX");
+ return false;
+ }
+
+ if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Zero UDP checksum must be allowed for RX");
+ return false;
+ }
+
+ if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
+ return false;
+ }
+
+ return true;
+}
static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_params *params,
@@ -20,11 +60,6 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
struct vxlan_dev *vxlan = netdev_priv(params->dev);
struct vxlan_config *cfg = &vxlan->cfg;
- if (cfg->saddr.sa.sa_family != AF_INET) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Only IPv4 underlay is supported");
- return false;
- }
-
if (vxlan_addr_multicast(&cfg->remote_ip)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN: Multicast destination IP is not supported");
return false;
@@ -55,14 +90,15 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return false;
}
- if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
- return false;
- }
-
- if (cfg->flags & ~MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Unsupported flag");
- return false;
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ if (!mlxsw_sp_nve_vxlan_ipv4_flags_check(cfg, extack))
+ return false;
+ break;
+ case AF_INET6:
+ if (!mlxsw_sp_nve_vxlan_ipv6_flags_check(cfg, extack))
+ return false;
+ break;
}
if (cfg->ttl == 0) {
@@ -90,6 +126,22 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack);
}
+static void
+mlxsw_sp_nve_vxlan_ul_proto_sip_config(const struct vxlan_config *cfg,
+ struct mlxsw_sp_nve_config *config)
+{
+ switch (cfg->saddr.sa.sa_family) {
+ case AF_INET:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
+ config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ break;
+ case AF_INET6:
+ config->ul_proto = MLXSW_SP_L3_PROTO_IPV6;
+ config->ul_sip.addr6 = cfg->saddr.sin6.sin6_addr;
+ break;
+ }
+}
+
static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_params *params,
struct mlxsw_sp_nve_config *config)
@@ -102,8 +154,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->flowlabel = cfg->label;
config->learning_en = cfg->flags & VXLAN_F_LEARN ? 1 : 0;
config->ul_tb_id = RT_TABLE_MAIN;
- config->ul_proto = MLXSW_SP_L3_PROTO_IPV4;
- config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr;
+ mlxsw_sp_nve_vxlan_ul_proto_sip_config(cfg, config);
config->udp_dport = cfg->dst_port;
}
@@ -111,6 +162,7 @@ static void
mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
const struct mlxsw_sp_nve_config *config)
{
+ struct in6_addr addr6;
u8 udp_sport;
mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true,
@@ -122,7 +174,18 @@ mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
get_random_bytes(&udp_sport, sizeof(udp_sport));
udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80;
mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport);
- mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4));
+
+ switch (config->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_tngcr_usipv4_set(tngcr_pl,
+ be32_to_cpu(config->ul_sip.addr4));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ addr6 = config->ul_sip.addr6;
+ mlxsw_reg_tngcr_usipv6_memcpy_to(tngcr_pl,
+ (const char *)&addr6);
+ break;
+ }
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 1a180384e7e8..0ff163fbc775 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -36,7 +36,7 @@ struct mlxsw_sp_ptp_state {
};
struct mlxsw_sp1_ptp_key {
- u8 local_port;
+ u16 local_port;
u8 message_type;
u16 sequence_id;
u8 domain_number;
@@ -406,7 +406,7 @@ mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
* This case is similar to 2) above.
*/
static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port,
+ struct sk_buff *skb, u16 local_port,
bool ingress,
struct skb_shared_hwtstamps *hwtstamps)
{
@@ -524,7 +524,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
}
static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port,
+ struct sk_buff *skb, u16 local_port,
bool ingress)
{
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -564,7 +564,7 @@ immediate:
}
void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number, u16 sequence_id,
u64 timestamp)
{
@@ -599,14 +599,14 @@ void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
}
void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port)
+ u16 local_port)
{
skb_reset_mac_header(skb);
mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
}
void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 1d43a3755285..c06cd1384bca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -31,13 +31,13 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port);
+ u16 local_port);
void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port);
+ struct sk_buff *skb, u16 local_port);
void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number, u16 sequence_id,
u64 timestamp);
@@ -80,20 +80,20 @@ static inline void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
}
static inline void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
}
static inline void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
dev_kfree_skb_any(skb);
}
static inline void
mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
- u8 local_port, u8 message_type,
+ u16 local_port, u8 message_type,
u8 domain_number,
u16 sequence_id, u64 timestamp)
{
@@ -159,13 +159,13 @@ static inline void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
}
static inline void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
}
static inline void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
- struct sk_buff *skb, u8 local_port)
+ struct sk_buff *skb, u16 local_port)
{
dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 217e3b351dfe..d40762cfc453 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1307,6 +1307,10 @@ mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
addr_prefix_len = 32;
break;
case MLXSW_SP_L3_PROTO_IPV6:
+ addrp = &addr->addr6;
+ addr_len = 16;
+ addr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -7002,6 +7006,8 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
@@ -7015,6 +7021,14 @@ mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
ipip_entry);
}
+ if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
+ MLXSW_SP_L3_PROTO_IPV6, &dip)) {
+ u32 tunnel_index;
+
+ tunnel_index = router->nve_decap_config.tunnel_index;
+ fib_entry->decap.tunnel_index = tunnel_index;
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
+ }
return 0;
}
@@ -8369,9 +8383,6 @@ mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
int id;
idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
- if (!profile)
- continue;
-
if (ether_addr_equal_masked(profile->mac_prefix, mac,
mlxsw_sp->mac_mask))
return profile;
@@ -8494,7 +8505,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
u8 mac_profile;
int err;
- if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+ !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
@@ -9343,7 +9355,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index f5f819aa9a65..f9671cc53002 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -37,7 +37,7 @@ struct mlxsw_sp_span {
struct mlxsw_sp_span_analyzed_port {
struct list_head list; /* Member of analyzed_ports_list */
refcount_t ref_count;
- u8 local_port;
+ u16 local_port;
bool ingress;
};
@@ -46,7 +46,7 @@ struct mlxsw_sp_span_trigger_entry {
struct mlxsw_sp_span *span;
const struct mlxsw_sp_span_trigger_ops *ops;
refcount_t ref_count;
- u8 local_port;
+ u16 local_port;
enum mlxsw_sp_span_trigger trigger;
struct mlxsw_sp_span_trigger_parms parms;
};
@@ -179,7 +179,7 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -199,7 +199,7 @@ mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -480,7 +480,7 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -584,7 +584,7 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -650,7 +650,7 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
{
struct mlxsw_sp_port *dest_port = sparms.dest_port;
struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
- u8 local_port = dest_port->local_port;
+ u16 local_port = dest_port->local_port;
char mpat_pl[MLXSW_REG_MPAT_LEN];
int pa_id = span_entry->id;
@@ -997,7 +997,7 @@ static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_por
}
static struct mlxsw_sp_span_analyzed_port *
-mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
+mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port,
bool ingress)
{
struct mlxsw_sp_span_analyzed_port *analyzed_port;
@@ -1165,7 +1165,7 @@ int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
int err = 0;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
@@ -1193,7 +1193,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_span_analyzed_port *analyzed_port;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 81c7e8a7fcf5..65c1724c63b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -865,17 +865,17 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
u16 mid_idx, bool add)
{
- char *smid_pl;
+ char *smid2_pl;
int err;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
+ mlxsw_sp_router_port(mlxsw_sp), add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -980,7 +980,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_device *bridge_device;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_fid *fid;
int err;
@@ -1029,7 +1029,7 @@ mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
- u8 local_port = mlxsw_sp_port->local_port;
+ u16 local_port = mlxsw_sp_port->local_port;
u16 vid = mlxsw_sp_port_vlan->vid;
mlxsw_sp_port_vlan->fid = NULL;
@@ -1290,38 +1290,52 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
-static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
- const char *mac, u16 fid,
- enum mlxsw_sp_l3proto proto,
- const union mlxsw_sp_l3addr *addr,
- bool adding, bool dynamic)
+static int
+mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
+ const char *mac, u16 fid, __be32 addr, bool adding)
{
- enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
char *sfd_pl;
u8 num_rec;
u32 uip;
int err;
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- uip = be32_to_cpu(addr->addr4);
- sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON(1);
- return -EOPNOTSUPP;
- }
+ sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+ if (!sfd_pl)
+ return -ENOMEM;
+
+ uip = be32_to_cpu(addr);
+ mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
+ mlxsw_sp_sfd_rec_policy(dynamic), mac,
+ fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
+ kfree(sfd_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ u32 kvdl_index, bool adding)
+{
+ char *sfd_pl;
+ u8 num_rec;
+ int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
if (!sfd_pl)
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
- mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, uip,
- sfd_proto);
+ mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
+ MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1335,7 +1349,80 @@ out:
return err;
}
-static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
+ kvdl_index, true);
+ if (err)
+ goto err_sfd_write;
+
+ err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
+ if (err)
+ /* Replace can fail only for creating new mapping, so removing
+ * the FDB entry in the error path is OK.
+ */
+ goto err_addr_replace;
+
+ return 0;
+
+err_addr_replace:
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
+ false);
+err_sfd_write:
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+ return err;
+}
+
+static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ const struct in6_addr *addr)
+{
+ mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
+ mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
+ mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
+}
+
+static int
+mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ u16 fid, const struct in6_addr *addr, bool adding)
+{
+ if (adding)
+ return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
+ addr);
+
+ mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
+ return 0;
+}
+
+static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u16 fid,
+ enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ bool adding, bool dynamic)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
+ addr->addr4, adding);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
+ &addr->addr6, adding);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
@@ -1363,7 +1450,7 @@ out:
return err;
}
-static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
const char *mac, u16 fid, bool adding,
bool dynamic)
{
@@ -1477,30 +1564,30 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
long *ports_bitmap,
bool set_router_port)
{
- char *smid_pl;
+ char *smid2_pl;
int err, i;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
if (mlxsw_sp->ports[i])
- mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
}
- mlxsw_reg_smid_port_mask_set(smid_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl,
+ mlxsw_sp_router_port(mlxsw_sp), 1);
for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid_port_set(smid_pl, i, 1);
+ mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
- mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
+ mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
+ set_router_port);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -1508,16 +1595,16 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 mid_idx, bool add)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid_pl;
+ char *smid2_pl;
int err;
- smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
- if (!smid_pl)
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
- kfree(smid_pl);
+ mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+ kfree(smid2_pl);
return err;
}
@@ -2536,7 +2623,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port *mlxsw_sp_port;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u8 local_port;
+ u16 local_port;
u16 vid, fid;
bool do_notification = true;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 26d01adbedad..47b061b99160 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -60,7 +60,7 @@ enum {
};
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
- u8 local_port,
+ u16 local_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
@@ -85,7 +85,7 @@ static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
return 0;
}
-static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct devlink_port *in_devlink_port;
@@ -109,7 +109,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
u32 cookie_index = mlxsw_skb_cb(skb)->rx_md_info.cookie_index;
@@ -138,7 +138,7 @@ static void mlxsw_sp_rx_acl_drop_listener(struct sk_buff *skb, u8 local_port,
consume_skb(skb);
}
-static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct devlink_port *in_devlink_port;
@@ -164,7 +164,7 @@ static int __mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
return 0;
}
-static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
int err;
@@ -176,14 +176,14 @@ static void mlxsw_sp_rx_no_mark_listener(struct sk_buff *skb, u8 local_port,
netif_receive_skb(skb);
}
-static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
skb->offload_fwd_mark = 1;
mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
}
-static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
skb->offload_l3_fwd_mark = 1;
@@ -191,7 +191,7 @@ static void mlxsw_sp_rx_l3_mark_listener(struct sk_buff *skb, u8 local_port,
mlxsw_sp_rx_no_mark_listener(skb, local_port, trap_ctx);
}
-static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_ptp_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
@@ -212,7 +212,7 @@ static struct mlxsw_sp_port *
mlxsw_sp_sample_tx_port_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_rx_md_info *rx_md_info)
{
- u8 local_port;
+ u16 local_port;
if (!rx_md_info->tx_port_valid)
return NULL;
@@ -257,7 +257,7 @@ static void mlxsw_sp_psample_md_init(struct mlxsw_sp *mlxsw_sp,
md->latency <<= MLXSW_SP_MIRROR_LATENCY_SHIFT;
}
-static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
@@ -293,7 +293,7 @@ out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_tx_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_rx_md_info *rx_md_info = &mlxsw_skb_cb(skb)->rx_md_info;
@@ -343,7 +343,7 @@ out:
consume_skb(skb);
}
-static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u8 local_port,
+static void mlxsw_sp_rx_sample_acl_listener(struct sk_buff *skb, u16 local_port,
void *trap_ctx)
{
struct mlxsw_sp *mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e25798c610e..7f49042484bd 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
return ret;
netdev->irq = platform_get_irq(pdev, 0);
+ if (netdev->irq < 0)
+ return netdev->irq;
return ks8851_probe_common(netdev, dev, msg_enable);
}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 99c0c1491af2..d024983815da 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6317,11 +6317,15 @@ static int netdev_set_pauseparam(struct net_device *dev,
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
* @ring: Ethtool RING settings data structure.
+ * @kernel_ring: Ethtool external RING settings data structure.
+ * @extack: Netlink handle.
*
* This procedure returns the TX/RX ring settings.
*/
static void netdev_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 735eea1dacf1..ed7a35c3ceac 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -55,6 +55,7 @@ config LAN743X
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index c77dc0379bfd..9faa41436198 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 7d7647481f70..8c6390d95158 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1739,13 +1739,10 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
}
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
DMA_BIT_MASK(64))) {
- if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
- DMA_BIT_MASK(32))) {
- dev_warn(&tx->adapter->pdev->dev,
- "lan743x_: No suitable DMA available\n");
- ret = -ENOMEM;
- goto cleanup;
- }
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
}
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
@@ -2284,13 +2281,10 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
}
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
DMA_BIT_MASK(64))) {
- if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
- DMA_BIT_MASK(32))) {
- dev_warn(&rx->adapter->pdev->dev,
- "lan743x_: No suitable DMA available\n");
- ret = -ENOMEM;
- goto cleanup;
- }
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
}
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 9380e396f648..8b7a8d879083 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1305,12 +1305,6 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) {
- netif_warn(adapter, drv, adapter->netdev,
- "ignoring hwtstamp_config.flags == 0x%08X, expected 0\n",
- config.flags);
- }
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
new file mode 100644
index 000000000000..ac273f84b69e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -0,0 +1,9 @@
+config LAN966X_SWITCH
+ tristate "Lan966x switch driver"
+ depends on HAS_IOMEM
+ depends on OF
+ depends on NET_SWITCHDEV
+ select PHYLINK
+ select PACKING
+ help
+ This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
new file mode 100644
index 000000000000..040cfff9f577
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Lan966x network device drivers.
+#
+
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
+
+lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
+ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
new file mode 100644
index 000000000000..614f12c2fe6a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+
+#include "lan966x_main.h"
+
+/* Number of traffic classes */
+#define LAN966X_NUM_TC 8
+#define LAN966X_STATS_CHECK_DELAY (2 * HZ)
+
+static const struct lan966x_stat_layout lan966x_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02 },
+ { .name = "rx_broadcast", .offset = 0x03 },
+ { .name = "rx_short", .offset = 0x04 },
+ { .name = "rx_frag", .offset = 0x05 },
+ { .name = "rx_jabber", .offset = 0x06 },
+ { .name = "rx_crc", .offset = 0x07 },
+ { .name = "rx_symbol_err", .offset = 0x08 },
+ { .name = "rx_sz_64", .offset = 0x09 },
+ { .name = "rx_sz_65_127", .offset = 0x0a},
+ { .name = "rx_sz_128_255", .offset = 0x0b},
+ { .name = "rx_sz_256_511", .offset = 0x0c },
+ { .name = "rx_sz_512_1023", .offset = 0x0d },
+ { .name = "rx_sz_1024_1526", .offset = 0x0e },
+ { .name = "rx_sz_jumbo", .offset = 0x0f },
+ { .name = "rx_pause", .offset = 0x10 },
+ { .name = "rx_control", .offset = 0x11 },
+ { .name = "rx_long", .offset = 0x12 },
+ { .name = "rx_cat_drop", .offset = 0x13 },
+ { .name = "rx_red_prio_0", .offset = 0x14 },
+ { .name = "rx_red_prio_1", .offset = 0x15 },
+ { .name = "rx_red_prio_2", .offset = 0x16 },
+ { .name = "rx_red_prio_3", .offset = 0x17 },
+ { .name = "rx_red_prio_4", .offset = 0x18 },
+ { .name = "rx_red_prio_5", .offset = 0x19 },
+ { .name = "rx_red_prio_6", .offset = 0x1a },
+ { .name = "rx_red_prio_7", .offset = 0x1b },
+ { .name = "rx_yellow_prio_0", .offset = 0x1c },
+ { .name = "rx_yellow_prio_1", .offset = 0x1d },
+ { .name = "rx_yellow_prio_2", .offset = 0x1e },
+ { .name = "rx_yellow_prio_3", .offset = 0x1f },
+ { .name = "rx_yellow_prio_4", .offset = 0x20 },
+ { .name = "rx_yellow_prio_5", .offset = 0x21 },
+ { .name = "rx_yellow_prio_6", .offset = 0x22 },
+ { .name = "rx_yellow_prio_7", .offset = 0x23 },
+ { .name = "rx_green_prio_0", .offset = 0x24 },
+ { .name = "rx_green_prio_1", .offset = 0x25 },
+ { .name = "rx_green_prio_2", .offset = 0x26 },
+ { .name = "rx_green_prio_3", .offset = 0x27 },
+ { .name = "rx_green_prio_4", .offset = 0x28 },
+ { .name = "rx_green_prio_5", .offset = 0x29 },
+ { .name = "rx_green_prio_6", .offset = 0x2a },
+ { .name = "rx_green_prio_7", .offset = 0x2b },
+ { .name = "rx_assembly_err", .offset = 0x2c },
+ { .name = "rx_smd_err", .offset = 0x2d },
+ { .name = "rx_assembly_ok", .offset = 0x2e },
+ { .name = "rx_merge_frag", .offset = 0x2f },
+ { .name = "rx_pmac_octets", .offset = 0x30, },
+ { .name = "rx_pmac_unicast", .offset = 0x31, },
+ { .name = "rx_pmac_multicast", .offset = 0x32 },
+ { .name = "rx_pmac_broadcast", .offset = 0x33 },
+ { .name = "rx_pmac_short", .offset = 0x34 },
+ { .name = "rx_pmac_frag", .offset = 0x35 },
+ { .name = "rx_pmac_jabber", .offset = 0x36 },
+ { .name = "rx_pmac_crc", .offset = 0x37 },
+ { .name = "rx_pmac_symbol_err", .offset = 0x38 },
+ { .name = "rx_pmac_sz_64", .offset = 0x39 },
+ { .name = "rx_pmac_sz_65_127", .offset = 0x3a },
+ { .name = "rx_pmac_sz_128_255", .offset = 0x3b },
+ { .name = "rx_pmac_sz_256_511", .offset = 0x3c },
+ { .name = "rx_pmac_sz_512_1023", .offset = 0x3d },
+ { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e },
+ { .name = "rx_pmac_sz_jumbo", .offset = 0x3f },
+ { .name = "rx_pmac_pause", .offset = 0x40 },
+ { .name = "rx_pmac_control", .offset = 0x41 },
+ { .name = "rx_pmac_long", .offset = 0x42 },
+
+ { .name = "tx_octets", .offset = 0x80, },
+ { .name = "tx_unicast", .offset = 0x81, },
+ { .name = "tx_multicast", .offset = 0x82 },
+ { .name = "tx_broadcast", .offset = 0x83 },
+ { .name = "tx_col", .offset = 0x84 },
+ { .name = "tx_drop", .offset = 0x85 },
+ { .name = "tx_pause", .offset = 0x86 },
+ { .name = "tx_sz_64", .offset = 0x87 },
+ { .name = "tx_sz_65_127", .offset = 0x88 },
+ { .name = "tx_sz_128_255", .offset = 0x89 },
+ { .name = "tx_sz_256_511", .offset = 0x8a },
+ { .name = "tx_sz_512_1023", .offset = 0x8b },
+ { .name = "tx_sz_1024_1526", .offset = 0x8c },
+ { .name = "tx_sz_jumbo", .offset = 0x8d },
+ { .name = "tx_yellow_prio_0", .offset = 0x8e },
+ { .name = "tx_yellow_prio_1", .offset = 0x8f },
+ { .name = "tx_yellow_prio_2", .offset = 0x90 },
+ { .name = "tx_yellow_prio_3", .offset = 0x91 },
+ { .name = "tx_yellow_prio_4", .offset = 0x92 },
+ { .name = "tx_yellow_prio_5", .offset = 0x93 },
+ { .name = "tx_yellow_prio_6", .offset = 0x94 },
+ { .name = "tx_yellow_prio_7", .offset = 0x95 },
+ { .name = "tx_green_prio_0", .offset = 0x96 },
+ { .name = "tx_green_prio_1", .offset = 0x97 },
+ { .name = "tx_green_prio_2", .offset = 0x98 },
+ { .name = "tx_green_prio_3", .offset = 0x99 },
+ { .name = "tx_green_prio_4", .offset = 0x9a },
+ { .name = "tx_green_prio_5", .offset = 0x9b },
+ { .name = "tx_green_prio_6", .offset = 0x9c },
+ { .name = "tx_green_prio_7", .offset = 0x9d },
+ { .name = "tx_aged", .offset = 0x9e },
+ { .name = "tx_llct", .offset = 0x9f },
+ { .name = "tx_ct", .offset = 0xa0 },
+ { .name = "tx_mm_hold", .offset = 0xa1 },
+ { .name = "tx_merge_frag", .offset = 0xa2 },
+ { .name = "tx_pmac_octets", .offset = 0xa3, },
+ { .name = "tx_pmac_unicast", .offset = 0xa4, },
+ { .name = "tx_pmac_multicast", .offset = 0xa5 },
+ { .name = "tx_pmac_broadcast", .offset = 0xa6 },
+ { .name = "tx_pmac_pause", .offset = 0xa7 },
+ { .name = "tx_pmac_sz_64", .offset = 0xa8 },
+ { .name = "tx_pmac_sz_65_127", .offset = 0xa9 },
+ { .name = "tx_pmac_sz_128_255", .offset = 0xaa },
+ { .name = "tx_pmac_sz_256_511", .offset = 0xab },
+ { .name = "tx_pmac_sz_512_1023", .offset = 0xac },
+ { .name = "tx_pmac_sz_1024_1526", .offset = 0xad },
+ { .name = "tx_pmac_sz_jumbo", .offset = 0xae },
+
+ { .name = "dr_local", .offset = 0x100 },
+ { .name = "dr_tail", .offset = 0x101 },
+ { .name = "dr_yellow_prio_0", .offset = 0x102 },
+ { .name = "dr_yellow_prio_1", .offset = 0x103 },
+ { .name = "dr_yellow_prio_2", .offset = 0x104 },
+ { .name = "dr_yellow_prio_3", .offset = 0x105 },
+ { .name = "dr_yellow_prio_4", .offset = 0x106 },
+ { .name = "dr_yellow_prio_5", .offset = 0x107 },
+ { .name = "dr_yellow_prio_6", .offset = 0x108 },
+ { .name = "dr_yellow_prio_7", .offset = 0x109 },
+ { .name = "dr_green_prio_0", .offset = 0x10a },
+ { .name = "dr_green_prio_1", .offset = 0x10b },
+ { .name = "dr_green_prio_2", .offset = 0x10c },
+ { .name = "dr_green_prio_3", .offset = 0x10d },
+ { .name = "dr_green_prio_4", .offset = 0x10e },
+ { .name = "dr_green_prio_5", .offset = 0x10f },
+ { .name = "dr_green_prio_6", .offset = 0x110 },
+ { .name = "dr_green_prio_7", .offset = 0x111 },
+};
+
+/* The following numbers are indexes into lan966x_stats_layout[] */
+#define SYS_COUNT_RX_OCT 0
+#define SYS_COUNT_RX_UC 1
+#define SYS_COUNT_RX_MC 2
+#define SYS_COUNT_RX_BC 3
+#define SYS_COUNT_RX_SHORT 4
+#define SYS_COUNT_RX_FRAG 5
+#define SYS_COUNT_RX_JABBER 6
+#define SYS_COUNT_RX_CRC 7
+#define SYS_COUNT_RX_SYMBOL_ERR 8
+#define SYS_COUNT_RX_SZ_64 9
+#define SYS_COUNT_RX_SZ_65_127 10
+#define SYS_COUNT_RX_SZ_128_255 11
+#define SYS_COUNT_RX_SZ_256_511 12
+#define SYS_COUNT_RX_SZ_512_1023 13
+#define SYS_COUNT_RX_SZ_1024_1526 14
+#define SYS_COUNT_RX_SZ_JUMBO 15
+#define SYS_COUNT_RX_PAUSE 16
+#define SYS_COUNT_RX_CONTROL 17
+#define SYS_COUNT_RX_LONG 18
+#define SYS_COUNT_RX_CAT_DROP 19
+#define SYS_COUNT_RX_RED_PRIO_0 20
+#define SYS_COUNT_RX_RED_PRIO_1 21
+#define SYS_COUNT_RX_RED_PRIO_2 22
+#define SYS_COUNT_RX_RED_PRIO_3 23
+#define SYS_COUNT_RX_RED_PRIO_4 24
+#define SYS_COUNT_RX_RED_PRIO_5 25
+#define SYS_COUNT_RX_RED_PRIO_6 26
+#define SYS_COUNT_RX_RED_PRIO_7 27
+#define SYS_COUNT_RX_YELLOW_PRIO_0 28
+#define SYS_COUNT_RX_YELLOW_PRIO_1 29
+#define SYS_COUNT_RX_YELLOW_PRIO_2 30
+#define SYS_COUNT_RX_YELLOW_PRIO_3 31
+#define SYS_COUNT_RX_YELLOW_PRIO_4 32
+#define SYS_COUNT_RX_YELLOW_PRIO_5 33
+#define SYS_COUNT_RX_YELLOW_PRIO_6 34
+#define SYS_COUNT_RX_YELLOW_PRIO_7 35
+#define SYS_COUNT_RX_GREEN_PRIO_0 36
+#define SYS_COUNT_RX_GREEN_PRIO_1 37
+#define SYS_COUNT_RX_GREEN_PRIO_2 38
+#define SYS_COUNT_RX_GREEN_PRIO_3 39
+#define SYS_COUNT_RX_GREEN_PRIO_4 40
+#define SYS_COUNT_RX_GREEN_PRIO_5 41
+#define SYS_COUNT_RX_GREEN_PRIO_6 42
+#define SYS_COUNT_RX_GREEN_PRIO_7 43
+#define SYS_COUNT_RX_ASSEMBLY_ERR 44
+#define SYS_COUNT_RX_SMD_ERR 45
+#define SYS_COUNT_RX_ASSEMBLY_OK 46
+#define SYS_COUNT_RX_MERGE_FRAG 47
+#define SYS_COUNT_RX_PMAC_OCT 48
+#define SYS_COUNT_RX_PMAC_UC 49
+#define SYS_COUNT_RX_PMAC_MC 50
+#define SYS_COUNT_RX_PMAC_BC 51
+#define SYS_COUNT_RX_PMAC_SHORT 52
+#define SYS_COUNT_RX_PMAC_FRAG 53
+#define SYS_COUNT_RX_PMAC_JABBER 54
+#define SYS_COUNT_RX_PMAC_CRC 55
+#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56
+#define SYS_COUNT_RX_PMAC_SZ_64 57
+#define SYS_COUNT_RX_PMAC_SZ_65_127 58
+#define SYS_COUNT_RX_PMAC_SZ_128_255 59
+#define SYS_COUNT_RX_PMAC_SZ_256_511 60
+#define SYS_COUNT_RX_PMAC_SZ_512_1023 61
+#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62
+#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63
+#define SYS_COUNT_RX_PMAC_PAUSE 64
+#define SYS_COUNT_RX_PMAC_CONTROL 65
+#define SYS_COUNT_RX_PMAC_LONG 66
+
+#define SYS_COUNT_TX_OCT 67
+#define SYS_COUNT_TX_UC 68
+#define SYS_COUNT_TX_MC 69
+#define SYS_COUNT_TX_BC 70
+#define SYS_COUNT_TX_COL 71
+#define SYS_COUNT_TX_DROP 72
+#define SYS_COUNT_TX_PAUSE 73
+#define SYS_COUNT_TX_SZ_64 74
+#define SYS_COUNT_TX_SZ_65_127 75
+#define SYS_COUNT_TX_SZ_128_255 76
+#define SYS_COUNT_TX_SZ_256_511 77
+#define SYS_COUNT_TX_SZ_512_1023 78
+#define SYS_COUNT_TX_SZ_1024_1526 79
+#define SYS_COUNT_TX_SZ_JUMBO 80
+#define SYS_COUNT_TX_YELLOW_PRIO_0 81
+#define SYS_COUNT_TX_YELLOW_PRIO_1 82
+#define SYS_COUNT_TX_YELLOW_PRIO_2 83
+#define SYS_COUNT_TX_YELLOW_PRIO_3 84
+#define SYS_COUNT_TX_YELLOW_PRIO_4 85
+#define SYS_COUNT_TX_YELLOW_PRIO_5 86
+#define SYS_COUNT_TX_YELLOW_PRIO_6 87
+#define SYS_COUNT_TX_YELLOW_PRIO_7 88
+#define SYS_COUNT_TX_GREEN_PRIO_0 89
+#define SYS_COUNT_TX_GREEN_PRIO_1 90
+#define SYS_COUNT_TX_GREEN_PRIO_2 91
+#define SYS_COUNT_TX_GREEN_PRIO_3 92
+#define SYS_COUNT_TX_GREEN_PRIO_4 93
+#define SYS_COUNT_TX_GREEN_PRIO_5 94
+#define SYS_COUNT_TX_GREEN_PRIO_6 95
+#define SYS_COUNT_TX_GREEN_PRIO_7 96
+#define SYS_COUNT_TX_AGED 97
+#define SYS_COUNT_TX_LLCT 98
+#define SYS_COUNT_TX_CT 99
+#define SYS_COUNT_TX_MM_HOLD 100
+#define SYS_COUNT_TX_MERGE_FRAG 101
+#define SYS_COUNT_TX_PMAC_OCT 102
+#define SYS_COUNT_TX_PMAC_UC 103
+#define SYS_COUNT_TX_PMAC_MC 104
+#define SYS_COUNT_TX_PMAC_BC 105
+#define SYS_COUNT_TX_PMAC_PAUSE 106
+#define SYS_COUNT_TX_PMAC_SZ_64 107
+#define SYS_COUNT_TX_PMAC_SZ_65_127 108
+#define SYS_COUNT_TX_PMAC_SZ_128_255 109
+#define SYS_COUNT_TX_PMAC_SZ_256_511 110
+#define SYS_COUNT_TX_PMAC_SZ_512_1023 111
+#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112
+#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113
+
+#define SYS_COUNT_DR_LOCAL 114
+#define SYS_COUNT_DR_TAIL 115
+#define SYS_COUNT_DR_YELLOW_PRIO_0 116
+#define SYS_COUNT_DR_YELLOW_PRIO_1 117
+#define SYS_COUNT_DR_YELLOW_PRIO_2 118
+#define SYS_COUNT_DR_YELLOW_PRIO_3 119
+#define SYS_COUNT_DR_YELLOW_PRIO_4 120
+#define SYS_COUNT_DR_YELLOW_PRIO_5 121
+#define SYS_COUNT_DR_YELLOW_PRIO_6 122
+#define SYS_COUNT_DR_YELLOW_PRIO_7 123
+#define SYS_COUNT_DR_GREEN_PRIO_0 124
+#define SYS_COUNT_DR_GREEN_PRIO_1 125
+#define SYS_COUNT_DR_GREEN_PRIO_2 126
+#define SYS_COUNT_DR_GREEN_PRIO_3 127
+#define SYS_COUNT_DR_GREEN_PRIO_4 128
+#define SYS_COUNT_DR_GREEN_PRIO_5 129
+#define SYS_COUNT_DR_GREEN_PRIO_6 130
+#define SYS_COUNT_DR_GREEN_PRIO_7 131
+
+/* Add a possibly wrapping 32 bit value to a 64 bit counter */
+static void lan966x_add_cnt(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+static void lan966x_stats_update(struct lan966x *lan966x)
+{
+ int i, j;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ uint idx = i * lan966x->num_stats;
+
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i),
+ lan966x, SYS_STAT_CFG);
+
+ for (j = 0; j < lan966x->num_stats; j++) {
+ u32 offset = lan966x->stats_layout[j].offset;
+
+ lan966x_add_cnt(&lan966x->stats[idx++],
+ lan_rd(lan966x, SYS_CNT(offset)));
+ }
+ }
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static int lan966x_get_sset_count(struct net_device *dev, int sset)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return lan966x->num_stats;
+}
+
+static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct lan966x_port *port = netdev_priv(netdev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < lan966x->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ lan966x->stats_layout[i].name, ETH_GSTRING_LEN);
+}
+
+static void lan966x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ /* check and update now */
+ lan966x_stats_update(lan966x);
+
+ /* Copy all counters */
+ for (i = 0; i < lan966x->num_stats; i++)
+ *data++ = lan966x->stats[port->chip_port *
+ lan966x->num_stats + i];
+}
+
+static void lan966x_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ mac_stats->FramesTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->SingleCollisionFrames =
+ lan966x->stats[idx + SYS_COUNT_TX_COL];
+ mac_stats->MultipleCollisionFrames = 0;
+ mac_stats->FramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_UC] +
+ lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->FrameCheckSequenceErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC];
+ mac_stats->AlignmentErrors = 0;
+ mac_stats->OctetsTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+ mac_stats->FramesWithDeferredXmissions =
+ lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
+ mac_stats->LateCollisions = 0;
+ mac_stats->FramesAbortedDueToXSColls = 0;
+ mac_stats->FramesLostDueToIntMACXmitError = 0;
+ mac_stats->CarrierSenseErrors = 0;
+ mac_stats->OctetsReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_OCT];
+ mac_stats->FramesLostDueToIntMACRcvError = 0;
+ mac_stats->MulticastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
+ mac_stats->BroadcastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->FramesWithExcessiveDeferral = 0;
+ mac_stats->MulticastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_MC];
+ mac_stats->BroadcastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->InRangeLengthErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC];
+ mac_stats->OutOfRangeLengthField =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ mac_stats->FrameTooLongErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void lan966x_get_eth_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ rmon_stats->undersize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT];
+ rmon_stats->oversize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ rmon_stats->fragments =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG];
+ rmon_stats->jabbers =
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER];
+ rmon_stats->hist[0] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64];
+ rmon_stats->hist[1] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127];
+ rmon_stats->hist[2] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255];
+ rmon_stats->hist[3] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511];
+ rmon_stats->hist[4] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023];
+ rmon_stats->hist[5] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+ rmon_stats->hist[6] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+
+ rmon_stats->hist_tx[0] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64];
+ rmon_stats->hist_tx[1] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127];
+ rmon_stats->hist_tx[2] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255];
+ rmon_stats->hist_tx[3] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511];
+ rmon_stats->hist_tx[4] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023];
+ rmon_stats->hist_tx[5] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+ rmon_stats->hist_tx[6] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+
+ mutex_unlock(&lan966x->stats_lock);
+
+ *ranges = lan966x_rmon_ranges;
+}
+
+static int lan966x_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int lan966x_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void lan966x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+const struct ethtool_ops lan966x_ethtool_ops = {
+ .get_link_ksettings = lan966x_get_link_ksettings,
+ .set_link_ksettings = lan966x_set_link_ksettings,
+ .get_pauseparam = lan966x_get_pauseparam,
+ .set_pauseparam = lan966x_set_pauseparam,
+ .get_sset_count = lan966x_get_sset_count,
+ .get_strings = lan966x_get_strings,
+ .get_ethtool_stats = lan966x_get_ethtool_stats,
+ .get_eth_mac_stats = lan966x_get_eth_mac_stats,
+ .get_rmon_stats = lan966x_get_eth_rmon_stats,
+ .get_link = ethtool_op_get_link,
+};
+
+static void lan966x_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct lan966x *lan966x = container_of(del_work, struct lan966x,
+ stats_work);
+
+ lan966x_stats_update(lan966x);
+
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+}
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+ int i;
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
+
+ stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO];
+
+ stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC];
+
+ stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG];
+
+ stats->rx_dropped = dev->stats.rx_dropped +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
+ lan966x->stats[idx + SYS_COUNT_DR_TAIL];
+
+ for (i = 0; i < LAN966X_NUM_TC; i++) {
+ stats->rx_dropped +=
+ (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] +
+ lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]);
+ }
+
+ /* Get Tx stats */
+ stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+
+ stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO];
+
+ stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] +
+ lan966x->stats[idx + SYS_COUNT_TX_AGED];
+
+ stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+int lan966x_stats_init(struct lan966x *lan966x)
+{
+ char queue_name[32];
+
+ lan966x->stats_layout = lan966x_stats_layout;
+ lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout);
+ lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports *
+ lan966x->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!lan966x->stats)
+ return -ENOMEM;
+
+ /* Init stats worker */
+ mutex_init(&lan966x->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(lan966x->dev));
+ lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+ INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
new file mode 100644
index 000000000000..da5ca7188679
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct lan966x *lan966x;
+ unsigned long event;
+};
+
+struct lan966x_fdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u32 references;
+};
+
+static struct lan966x_fdb_entry *
+lan966x_fdb_find_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr))
+ return fdb_entry;
+ }
+
+ return NULL;
+}
+
+static void lan966x_fdb_add_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info);
+ if (fdb_entry) {
+ fdb_entry->references++;
+ return;
+ }
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry)
+ return;
+
+ ether_addr_copy(fdb_entry->mac, fdb_info->addr);
+ fdb_entry->vid = fdb_info->vid;
+ fdb_entry->references = 1;
+ list_add_tail(&fdb_entry->list, &lan966x->fdb_entries);
+}
+
+static bool lan966x_fdb_del_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries,
+ list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr)) {
+ fdb_entry->references--;
+ if (!fdb_entry->references) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+static void lan966x_fdb_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ }
+}
+
+int lan966x_fdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->fdb_entries);
+ lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0);
+ if (!lan966x->fdb_work)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void lan966x_fdb_deinit(struct lan966x *lan966x)
+{
+ destroy_workqueue(lan966x->fdb_work);
+ lan966x_fdb_purge_entries(lan966x);
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = fdb_work->dev;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+ int ret;
+
+ fdb_info = &fdb_work->fdb_info;
+ lan966x = fdb_work->lan966x;
+
+ if (lan966x_netdevice_check(dev)) {
+ port = netdev_priv(dev);
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ } else {
+ if (!netif_is_bridge_master(dev))
+ goto out;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ }
+
+out:
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+ dev_put(dev);
+}
+
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_fdb_event_work *fdb_work;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (lan966x_netdevice_check(orig_dev) &&
+ !fdb_info->added_by_user)
+ break;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (!fdb_work)
+ return -ENOMEM;
+
+ fdb_work->dev = orig_dev;
+ fdb_work->lan966x = lan966x;
+ fdb_work->event = event;
+ INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
+ memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info));
+ fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!fdb_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
+ dev_hold(orig_dev);
+
+ queue_work(lan966x->fdb_work, &fdb_work->work);
+ break;
+ }
+
+ return 0;
+err_addr_alloc:
+ kfree(fdb_work);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
new file mode 100644
index 000000000000..ca3314789d18
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_IFH_H__
+#define __LAN966X_IFH_H__
+
+/* Fields with description (*) should just be cleared upon injection
+ * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte)
+ */
+
+#define IFH_LEN 7
+
+/* Timestamp for frame */
+#define IFH_POS_TIMESTAMP 192
+
+/* Bypass analyzer with a prefilled IFH */
+#define IFH_POS_BYPASS 191
+
+/* Masqueraded injection with masq_port defining logical source port */
+#define IFH_POS_MASQ 190
+
+/* Masqueraded port number for injection */
+#define IFH_POS_MASQ_PORT 186
+
+/* Frame length (*) */
+#define IFH_POS_LEN 178
+
+/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */
+#define IFH_POS_WRDMODE 176
+
+/* Frame has 16 bits rtag removed compared to line data */
+#define IFH_POS_RTAG48 175
+
+/* Frame has a redundancy tag */
+#define IFH_POS_HAS_RED_TAG 174
+
+/* Frame has been cut through forwarded (*) */
+#define IFH_POS_CUTTHRU 173
+
+/* Rewriter command */
+#define IFH_POS_REW_CMD 163
+
+/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */
+#define IFH_POS_REW_OAM 162
+
+/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN,
+ * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM).
+ */
+#define IFH_POS_PDU_TYPE 158
+
+/* Update FCS before transmission */
+#define IFH_POS_FCS_UPD 157
+
+/* Classified DSCP value of frame */
+#define IFH_POS_DSCP 151
+
+/* Yellow indication */
+#define IFH_POS_DP 150
+
+/* Process in RTE/inbound */
+#define IFH_POS_RTE_INB_UPDATE 149
+
+/* Number of tags to pop from frame */
+#define IFH_POS_POP_CNT 147
+
+/* Number of tags in front of the ethertype */
+#define IFH_POS_ETYPE_OFS 145
+
+/* Logical source port of frame (*) */
+#define IFH_POS_SRCPORT 141
+
+/* Sequence number in redundancy tag */
+#define IFH_POS_SEQ_NUM 120
+
+/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */
+#define IFH_POS_TCI 103
+
+/* Classified internal priority for queuing */
+#define IFH_POS_QOS_CLASS 100
+
+/* Bit mask with eight cpu copy classses */
+#define IFH_POS_CPUQ 92
+
+/* Relearn + learn flags (*) */
+#define IFH_POS_LEARN_FLAGS 90
+
+/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */
+#define IFH_POS_SFLOW_ID 86
+
+/* Set if an ACL/S2 rule was hit (*).
+ * Super priority: acl_hit=0 and acl_hit(4)=1.
+ */
+#define IFH_POS_ACL_HIT 85
+
+/* S2 rule index hit (*) */
+#define IFH_POS_ACL_IDX 79
+
+/* ISDX as classified by S1 */
+#define IFH_POS_ISDX 71
+
+/* Destination ports for frame */
+#define IFH_POS_DSTS 62
+
+/* Storm policer to be applied: None/Uni/Multi/Broad (*) */
+#define IFH_POS_FLOOD 60
+
+/* Redundancy tag operation */
+#define IFH_POS_SEQ_OP 58
+
+/* Classified internal priority for resourcemgt, tagging etc */
+#define IFH_POS_IPV 55
+
+/* Frame is for AFI use */
+#define IFH_POS_AFI 54
+
+/* Internal aging value (*) */
+#define IFH_POS_AGED 52
+
+/* RTP Identifier */
+#define IFH_POS_RTP_ID 42
+
+/* RTP MRPD flow */
+#define IFH_POS_RTP_SUBID 41
+
+/* Profinet DataStatus or opcua GroupVersion MSB */
+#define IFH_POS_PN_DATA_STATUS 33
+
+/* Profinet transfer status (1 iff the status is 0) */
+#define IFH_POS_PN_TRANSF_STATUS_ZERO 32
+
+/* Profinet cycle counter or opcua NetworkMessageNumber */
+#define IFH_POS_PN_CC 16
+
+#define IFH_WID_TIMESTAMP 32
+#define IFH_WID_BYPASS 1
+#define IFH_WID_MASQ 1
+#define IFH_WID_MASQ_PORT 4
+#define IFH_WID_LEN 14
+#define IFH_WID_WRDMODE 2
+#define IFH_WID_RTAG48 1
+#define IFH_WID_HAS_RED_TAG 1
+#define IFH_WID_CUTTHRU 1
+#define IFH_WID_REW_CMD 10
+#define IFH_WID_REW_OAM 1
+#define IFH_WID_PDU_TYPE 4
+#define IFH_WID_FCS_UPD 1
+#define IFH_WID_DSCP 6
+#define IFH_WID_DP 1
+#define IFH_WID_RTE_INB_UPDATE 1
+#define IFH_WID_POP_CNT 2
+#define IFH_WID_ETYPE_OFS 2
+#define IFH_WID_SRCPORT 4
+#define IFH_WID_SEQ_NUM 16
+#define IFH_WID_TCI 17
+#define IFH_WID_QOS_CLASS 3
+#define IFH_WID_CPUQ 8
+#define IFH_WID_LEARN_FLAGS 2
+#define IFH_WID_SFLOW_ID 4
+#define IFH_WID_ACL_HIT 1
+#define IFH_WID_ACL_IDX 6
+#define IFH_WID_ISDX 8
+#define IFH_WID_DSTS 9
+#define IFH_WID_FLOOD 2
+#define IFH_WID_SEQ_OP 2
+#define IFH_WID_IPV 3
+#define IFH_WID_AFI 1
+#define IFH_WID_AGED 2
+#define IFH_WID_RTP_ID 10
+#define IFH_WID_RTP_SUBID 1
+#define IFH_WID_PN_DATA_STATUS 8
+#define IFH_WID_PN_TRANSF_STATUS_ZERO 1
+#define IFH_WID_PN_CC 16
+
+#endif /* __LAN966X_IFH_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
new file mode 100644
index 000000000000..ce5970bdcc6a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+#include "lan966x_main.h"
+
+#define LAN966X_MAC_COLUMNS 4
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+#define MACACCESS_CMD_SYNC_GET_NEXT 8
+
+#define LAN966X_MAC_INVALID_ROW -1
+
+struct lan966x_mac_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u16 port_index;
+ int row;
+};
+
+struct lan966x_mac_raw_entry {
+ u32 mach;
+ u32 macl;
+ u32 maca;
+ bool processed;
+};
+
+static int lan966x_mac_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_MACACCESS);
+}
+
+static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_mac_select(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ lan_wr(macl, lan966x, ANA_MACLDATA);
+ lan_wr(mach, lan966x, ANA_MACHDATA);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a write command */
+ lan_wr(ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_CHANGE2SW_SET(0) |
+ ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
+ ANA_MACACCESS_DEST_IDX_SET(pgid) |
+ ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+/* The mask of the front ports is encoded inside the mac parameter via a call
+ * to lan966x_mdb_encode_mac().
+ */
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
+
+ return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
+}
+
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
+}
+
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a forget command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing)
+{
+ lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
+ ANA_AUTOAGE_AGE_PERIOD,
+ lan966x, ANA_AUTOAGE);
+}
+
+void lan966x_mac_init(struct lan966x *lan966x)
+{
+ /* Clear the MAC table */
+ lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ spin_lock_init(&lan966x->mac_lock);
+ INIT_LIST_HEAD(&lan966x->mac_entries);
+}
+
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ if (!mac_entry)
+ return NULL;
+
+ memcpy(mac_entry->mac, mac, ETH_ALEN);
+ mac_entry->vid = vid;
+ mac_entry->port_index = port_index;
+ mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ return mac_entry;
+}
+
+static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *res = NULL;
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac, mac_entry->mac) &&
+ mac_entry->port_index == port_index) {
+ res = mac_entry;
+ break;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return res;
+}
+
+static int lan966x_mac_lookup(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a read command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
+ lan966x, ANA_MACACCESS);
+
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ if (ret)
+ return ret;
+
+ return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
+}
+
+static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info = { 0 };
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ return 0;
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before we
+ * get the interrupt the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
+ if (mac_entry)
+ return lan966x_mac_learn(lan966x, port->chip_port,
+ addr, vid, ENTRYTYPE_LOCKED);
+
+ mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
+ if (!mac_entry)
+ return -ENOMEM;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+
+ return 0;
+}
+
+int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(addr, mac_entry->mac)) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return 0;
+}
+
+void lan966x_mac_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
+ unsigned char *mac, u32 vid,
+ struct net_device *dev)
+{
+ rtnl_lock();
+ lan966x_fdb_call_notifiers(type, mac, vid, dev);
+ rtnl_unlock();
+}
+
+static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
+ u8 *mac, u16 *vid, u32 *dest_idx)
+{
+ mac[0] = (raw_entry->mach >> 8) & 0xff;
+ mac[1] = (raw_entry->mach >> 0) & 0xff;
+ mac[2] = (raw_entry->macl >> 24) & 0xff;
+ mac[3] = (raw_entry->macl >> 16) & 0xff;
+ mac[4] = (raw_entry->macl >> 8) & 0xff;
+ mac[5] = (raw_entry->macl >> 0) & 0xff;
+
+ *vid = (raw_entry->mach >> 16) & 0xfff;
+ *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
+}
+
+static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ struct lan966x_mac_raw_entry *raw_entries)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u32 dest_idx;
+ u32 column;
+ u16 vid;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
+ bool found = false;
+
+ if (mac_entry->row != row)
+ continue;
+
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row,
+ * so when get one invalid entry it can just skip the
+ * rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ /* If the entry in SW is found, then there is nothing
+ * to do
+ */
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac_entry->mac, mac) &&
+ mac_entry->port_index == dest_idx) {
+ raw_entries[column].processed = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW and remove the entry from the SW
+ * list
+ */
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ lan966x->ports[mac_entry->port_index]->dev);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ /* Now go to the list of columns and see if any entry was not in the SW
+ * list, then that means that the entry is new so it needs to notify the
+ * bridge.
+ */
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row, so when
+ * get one invalid entry it can just skip the rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ /* If the entry already exists then don't do anything */
+ if (raw_entries[column].processed)
+ continue;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ if (!mac_entry)
+ return;
+
+ mac_entry->row = row;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, lan966x->ports[dest_idx]->dev);
+ }
+}
+
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
+{
+ struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
+ u32 index, column;
+ bool stop = true;
+ u32 val;
+
+ /* Start the scan from 0, 0 */
+ lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
+ ANA_MACTINDX_BUCKET_SET(0),
+ lan966x, ANA_MACTINDX);
+
+ while (1) {
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
+ ANA_MACACCESS_MAC_TABLE_CMD,
+ lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ val = lan_rd(lan966x, ANA_MACTINDX);
+ index = ANA_MACTINDX_M_INDEX_GET(val);
+ column = ANA_MACTINDX_BUCKET_GET(val);
+
+ /* The SYNC-GET-NEXT returns all the entries(4) in a row in
+ * which is suffered a change. By change it means that new entry
+ * was added or an entry was removed because of ageing.
+ * It would return all the columns for that row. And after that
+ * it would return the next row The stop conditions of the
+ * SYNC-GET-NEXT is when it reaches 'directly' to row 0
+ * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
+ * then it is required to continue to read more even if it
+ * reaches row 0 and column 3.
+ */
+ if (index == 0 && column == 0)
+ stop = false;
+
+ if (column == LAN966X_MAC_COLUMNS - 1 &&
+ index == 0 && stop)
+ break;
+
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+
+ /* Once all the columns are read process them */
+ if (column == LAN966X_MAC_COLUMNS - 1) {
+ lan966x_mac_irq_process(lan966x, index, entry);
+ /* A row was processed so it is safe to assume that the
+ * next row/column can be the stop condition
+ */
+ stop = true;
+ }
+ }
+
+ lan_rmw(ANA_ANAINTR_INTR_SET(0),
+ ANA_ANAINTR_INTR,
+ lan966x, ANA_ANAINTR);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
new file mode 100644
index 000000000000..1f60fd125a1d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/packing.h>
+#include <linux/phy/phy.h>
+#include <linux/reset.h>
+
+#include "lan966x_main.h"
+
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
+#define IO_RANGES 2
+
+static const struct of_device_id lan966x_match[] = {
+ { .compatible = "microchip,lan966x-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_match);
+
+struct lan966x_main_io_resource {
+ enum lan966x_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */
+ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
+ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
+ { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
+ { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
+ { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
+ { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
+ { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */
+ { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */
+ { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */
+ { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */
+ { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */
+ { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */
+ { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */
+ { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */
+ { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */
+};
+
+static int lan966x_create_targets(struct platform_device *pdev,
+ struct lan966x *lan966x)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int idx;
+
+ /* Initially map the entire range and after that update each target to
+ * point inside the region at the correct offset. It is possible that
+ * other devices access the same region so don't add any checks about
+ * this.
+ */
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+
+ begin[idx] = devm_ioremap(&pdev->dev,
+ iores[idx]->start,
+ resource_size(iores[idx]));
+ if (!begin[idx]) {
+ dev_err(&pdev->dev, "Unable to get registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
+ const struct lan966x_main_io_resource *iomap =
+ &lan966x_main_iomap[idx];
+
+ lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+
+ return 0;
+}
+
+static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct sockaddr *addr = p;
+ int ret;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+}
+
+static int lan966x_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_port_open(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
+ ANA_PORT_CFG_RECV_ENA_SET(1) |
+ ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
+ ANA_PORT_CFG_LEARNAUTO |
+ ANA_PORT_CFG_RECV_ENA |
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ phylink_start(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_stop(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ lan966x_port_config_down(port);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_inj_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QS_INJ_STATUS);
+}
+
+static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+}
+
+static int lan966x_port_ifh_xmit(struct sk_buff *skb,
+ __be32 *ifh,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, count, last;
+ u8 grp = 0;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
+ (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
+ return NETDEV_TX_BUSY;
+
+ /* Write start of frame */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_SOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Write IFH header */
+ for (i = 0; i < IFH_LEN; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Write frame */
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ ++i;
+ }
+
+ /* Inidcate EOF and valid bytes in the last word */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
+ 0 : last) |
+ QS_INJ_CTRL_EOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
+ IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_port(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
+ IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
+ IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
+ IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_vid(void *ifh, u64 vid)
+{
+ packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
+ IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
+}
+
+static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ __be32 ifh[IFH_LEN];
+
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+ lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+
+ return lan966x_port_ifh_xmit(skb, ifh, dev);
+}
+
+static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
+}
+
+static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
+}
+
+static void lan966x_port_set_rx_mode(struct net_device *dev)
+{
+ __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
+}
+
+static int lan966x_port_get_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ ppid->id_len = sizeof(lan966x->base_mac);
+ memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops lan966x_port_netdev_ops = {
+ .ndo_open = lan966x_port_open,
+ .ndo_stop = lan966x_port_stop,
+ .ndo_start_xmit = lan966x_port_xmit,
+ .ndo_change_mtu = lan966x_port_change_mtu,
+ .ndo_set_rx_mode = lan966x_port_set_rx_mode,
+ .ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
+ .ndo_get_stats64 = lan966x_stats_get,
+ .ndo_set_mac_address = lan966x_port_set_mac_address,
+ .ndo_get_port_parent_id = lan966x_port_get_parent_id,
+};
+
+bool lan966x_netdevice_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &lan966x_port_netdev_ops;
+}
+
+static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
+{
+ return lan_rd(lan966x, QS_XTR_RD(grp));
+}
+
+static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return read_poll_timeout(lan966x_port_xtr_status, val,
+ val != XTR_NOT_READY,
+ READL_SLEEP_US, READL_TIMEOUT_US, false,
+ lan966x, grp);
+}
+
+static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
+{
+ u32 bytes_valid;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_NOT_READY) {
+ err = lan966x_port_xtr_ready(lan966x, grp);
+ if (err)
+ return -EIO;
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_ESCAPE)
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+{
+ packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
+ IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
+}
+
+static void lan966x_ifh_get_len(void *ifh, u64 *len)
+{
+ packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
+ IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
+}
+
+static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ int i, grp = 0, err = 0;
+
+ if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int sz = 0, buf_len;
+ u64 src_port, len;
+ u32 ifh[IFH_LEN];
+ u32 *buf;
+ u32 val;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
+ if (err != 4)
+ goto recover;
+ }
+
+ err = 0;
+
+ lan966x_ifh_get_src_port(ifh, &src_port);
+ lan966x_ifh_get_len(ifh, &len);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ dev = lan966x->ports[src_port]->dev;
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (lan966x->bridge_mask & BIT(src_port))
+ skb->offload_fwd_mark = 1;
+
+ netif_rx_ni(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+
+recover:
+ if (sz < 0 || err)
+ lan_rd(lan966x, QS_XTR_RD(grp));
+
+ } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+
+ return lan966x_mac_irq_handler(lan966x);
+}
+
+static void lan966x_cleanup_ports(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->dev)
+ unregister_netdev(port->dev);
+
+ if (port->phylink) {
+ rtnl_lock();
+ lan966x_port_stop(port->dev);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+
+ if (port->fwnode)
+ fwnode_handle_put(port->fwnode);
+ }
+
+ disable_irq(lan966x->xtr_irq);
+ lan966x->xtr_irq = -ENXIO;
+
+ if (lan966x->ana_irq) {
+ disable_irq(lan966x->ana_irq);
+ lan966x->ana_irq = -ENXIO;
+ }
+}
+
+static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
+ phy_interface_t phy_mode,
+ struct fwnode_handle *portnp)
+{
+ struct lan966x_port *port;
+ struct phylink *phylink;
+ struct net_device *dev;
+ int err;
+
+ if (p >= lan966x->num_phys_ports)
+ return -EINVAL;
+
+ dev = devm_alloc_etherdev_mqs(lan966x->dev,
+ sizeof(struct lan966x_port), 8, 1);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, lan966x->dev);
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->lan966x = lan966x;
+ port->chip_port = p;
+ lan966x->ports[p] = port;
+
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->netdev_ops = &lan966x_port_netdev_ops;
+ dev->ethtool_ops = &lan966x_ethtool_ops;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->needed_headroom = IFH_LEN * sizeof(u32);
+
+ eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
+
+ lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
+ ENTRYTYPE_LOCKED);
+
+ port->phylink_config.dev = &port->dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_pcs.poll = true;
+ port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
+
+ port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->phylink_config,
+ portnp,
+ phy_mode,
+ &lan966x_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ port->dev = NULL;
+ return PTR_ERR(phylink);
+ }
+
+ port->phylink = phylink;
+ phylink_set_pcs(phylink, &port->phylink_pcs);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(lan966x->dev, "register_netdev failed\n");
+ return err;
+ }
+
+ lan966x_vlan_port_set_vlan_aware(port, 0);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+
+ return 0;
+}
+
+static void lan966x_init(struct lan966x *lan966x)
+{
+ u32 p, i;
+
+ /* MAC table initialization */
+ lan966x_mac_init(lan966x);
+
+ lan966x_vlan_init(lan966x);
+
+ /* Flush queues */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
+ GENMASK(1, 0),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
+ ~(GENMASK(1, 0)),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Set MAC age time to default value, the entry is aged after
+ * 2 * AGE_PERIOD
+ */
+ lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ lan966x, ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
+ ANA_ADVLEARN_VLAN_CHK,
+ lan966x, ANA_ADVLEARN);
+
+ /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
+ lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
+ (20000000 / 65),
+ lan966x, SYS_FRM_AGING);
+
+ /* Map the 8 CPU extraction queues to CPU port */
+ lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
+
+ /* Do byte-swap and expect status after last data word
+ * Extraction: Mode: manual extraction) | Byte_swap
+ */
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_XTR_GRP_CFG(0));
+
+ /* Injection: Mode: manual injection | Byte_swap */
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_INJ_GRP_CFG(0));
+
+ lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
+ QS_INJ_CTRL_GAP_SIZE,
+ lan966x, QS_INJ_CTRL(0));
+
+ /* Enable IFH insertion/parsing on CPU ports */
+ lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
+ SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
+ lan966x, SYS_PORT_MODE(CPU_PORT));
+
+ /* Setup flooding PGIDs */
+ lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
+ lan966x, ANA_FLOODING_IPMC);
+
+ /* There are 8 priorities */
+ for (i = 0; i < 8; ++i)
+ lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
+ ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
+ ANA_FLOODING_FLD_MULTICAST |
+ ANA_FLOODING_FLD_UNICAST |
+ ANA_FLOODING_FLD_BROADCAST,
+ lan966x, ANA_FLOODING(i));
+
+ for (i = 0; i < PGID_ENTRIES; ++i)
+ /* Set all the entries to obey VLAN_VLAN */
+ lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
+ ANA_PGID_CFG_OBEY_VLAN,
+ lan966x, ANA_PGID_CFG(i));
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ /* Disable bridging by default */
+ lan_rmw(ANA_PGID_PGID_SET(0x0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(p + PGID_SRC));
+
+ /* Do not forward BPDU frames to the front ports and copy them
+ * to CPU
+ */
+ lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
+ }
+
+ /* Set source buffer size for each priority and each port to 1500 bytes */
+ for (i = 0; i <= QSYS_Q_RSRV; ++i) {
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
+ }
+
+ /* Enable switching to/from cpu port */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Configure and enable the CPU port */
+ lan_rmw(ANA_PGID_PGID_SET(0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(CPU_PORT));
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_CPU));
+
+ /* Multicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MC));
+
+ /* This will be controlled by mrouter ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV4));
+
+ /* Unicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_UC));
+
+ /* Broadcast to the CPU port and to other ports */
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_BC));
+
+ lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
+ lan966x, REW_PORT_CFG(CPU_PORT));
+
+ lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
+ ANA_ANAINTR_INTR_ENA,
+ lan966x, ANA_ANAINTR);
+}
+
+static int lan966x_ram_init(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, SYS_RAM_INIT);
+}
+
+static int lan966x_reset_switch(struct lan966x *lan966x)
+{
+ struct reset_control *switch_reset, *phy_reset;
+ int val = 0;
+ int ret;
+
+ switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
+ if (IS_ERR(switch_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
+ "Could not obtain switch reset");
+
+ phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
+ "Could not obtain phy reset\n");
+
+ reset_control_reset(switch_reset);
+ reset_control_reset(phy_reset);
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
+ lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
+ ret = readx_poll_timeout(lan966x_ram_init, lan966x,
+ val, (val & BIT(1)) == 0, READL_SLEEP_US,
+ READL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
+
+ return 0;
+}
+
+static int lan966x_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *ports, *portnp;
+ struct lan966x *lan966x;
+ u8 mac_addr[ETH_ALEN];
+ int err, i;
+
+ lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
+ if (!lan966x)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lan966x);
+ lan966x->dev = &pdev->dev;
+
+ if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ ether_addr_copy(lan966x->base_mac, mac_addr);
+ } else {
+ pr_info("MAC addr was not set, use random MAC\n");
+ eth_random_addr(lan966x->base_mac);
+ lan966x->base_mac[5] &= 0xf0;
+ }
+
+ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "no ethernet-ports child found\n");
+
+ err = lan966x_create_targets(pdev, lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Failed to create targets");
+
+ err = lan966x_reset_switch(lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Reset failed");
+
+ i = 0;
+ fwnode_for_each_available_child_node(ports, portnp)
+ ++i;
+
+ lan966x->num_phys_ports = i;
+ lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
+ sizeof(struct lan966x_port *),
+ GFP_KERNEL);
+ if (!lan966x->ports)
+ return -ENOMEM;
+
+ /* There QS system has 32KB of memory */
+ lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
+
+ /* set irq */
+ lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
+ if (lan966x->xtr_irq <= 0)
+ return -EINVAL;
+
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
+ lan966x_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", lan966x);
+ if (err) {
+ pr_err("Unable to use xtr irq");
+ return -ENODEV;
+ }
+
+ lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
+ if (lan966x->ana_irq) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
+ lan966x_ana_irq_handler, IRQF_ONESHOT,
+ "ana irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
+ }
+
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+
+ /* go over the child nodes */
+ fwnode_for_each_available_child_node(ports, portnp) {
+ phy_interface_t phy_mode;
+ struct phy *serdes;
+ u32 p;
+
+ if (fwnode_property_read_u32(portnp, "reg", &p))
+ continue;
+
+ phy_mode = fwnode_get_phy_mode(portnp);
+ err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
+ if (err)
+ goto cleanup_ports;
+
+ /* Read needed configuration */
+ lan966x->ports[p]->config.portmode = phy_mode;
+ lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
+
+ serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
+ if (!IS_ERR(serdes))
+ lan966x->ports[p]->serdes = serdes;
+
+ lan966x_port_init(lan966x->ports[p]);
+ }
+
+ lan966x_mdb_init(lan966x);
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+ goto cleanup_ports;
+
+ return 0;
+
+cleanup_ports:
+ fwnode_handle_put(portnp);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ return err;
+}
+
+static int lan966x_remove(struct platform_device *pdev)
+{
+ struct lan966x *lan966x = platform_get_drvdata(pdev);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ lan966x_mac_purge_entries(lan966x);
+ lan966x_mdb_deinit(lan966x);
+ lan966x_fdb_deinit(lan966x);
+
+ return 0;
+}
+
+static struct platform_driver lan966x_driver = {
+ .probe = lan966x_probe,
+ .remove = lan966x_remove,
+ .driver = {
+ .name = "lan966x-switch",
+ .of_match_table = lan966x_match,
+ },
+};
+
+static int __init lan966x_switch_driver_init(void)
+{
+ int ret;
+
+ lan966x_register_notifier_blocks();
+
+ ret = platform_driver_register(&lan966x_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ lan966x_unregister_notifier_blocks();
+ return ret;
+}
+
+static void __exit lan966x_switch_driver_exit(void)
+{
+ platform_driver_unregister(&lan966x_driver);
+ lan966x_unregister_notifier_blocks();
+}
+
+module_init(lan966x_switch_driver_init);
+module_exit(lan966x_switch_driver_exit);
+
+MODULE_DESCRIPTION("Microchip LAN966X switch driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
new file mode 100644
index 000000000000..99c6d0a9f946
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_MAIN_H__
+#define __LAN966X_MAIN_H__
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/switchdev.h>
+
+#include "lan966x_regs.h"
+#include "lan966x_ifh.h"
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+#define LAN966X_BUFFER_CELL_SZ 64
+#define LAN966X_BUFFER_MEMORY (160 * 1024)
+#define LAN966X_BUFFER_MIN_SZ 60
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+#define PGID_ENTRIES 89
+
+#define UNAWARE_PVID 0
+#define HOST_PVID 4095
+
+/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
+#define QSYS_Q_RSRV 95
+
+#define CPU_PORT 8
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 6)
+#define PGID_UC (PGID_AGGR - 5)
+#define PGID_BC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+/* Non-reserved PGIDs, used for general purpose */
+#define PGID_GP_START (CPU_PORT + 1)
+#define PGID_GP_END PGID_CPU
+
+#define LAN966X_SPEED_NONE 0
+#define LAN966X_SPEED_2500 1
+#define LAN966X_SPEED_1000 1
+#define LAN966X_SPEED_100 2
+#define LAN966X_SPEED_10 3
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACV4,
+ ENTRYTYPE_MACV6,
+};
+
+struct lan966x_port;
+
+struct lan966x_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct lan966x {
+ struct device *dev;
+
+ u8 num_phys_ports;
+ struct lan966x_port **ports;
+
+ void __iomem *regs[NUM_TARGETS];
+
+ int shared_queue_sz;
+
+ u8 base_mac[ETH_ALEN];
+
+ struct net_device *bridge;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct list_head mac_entries;
+ spinlock_t mac_lock; /* lock for mac_entries list */
+
+ u16 vlan_mask[VLAN_N_VID];
+ DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID);
+
+ /* stats */
+ const struct lan966x_stat_layout *stats_layout;
+ u32 num_stats;
+
+ /* workqueue for reading stats */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ /* interrupts */
+ int xtr_irq;
+ int ana_irq;
+
+ /* worqueue for fdb */
+ struct workqueue_struct *fdb_work;
+ struct list_head fdb_entries;
+
+ /* mdb */
+ struct list_head mdb_entries;
+ struct list_head pgid_entries;
+};
+
+struct lan966x_port_config {
+ phy_interface_t portmode;
+ const unsigned long *advertising;
+ int speed;
+ int duplex;
+ u32 pause;
+ bool inband;
+ bool autoneg;
+};
+
+struct lan966x_port {
+ struct net_device *dev;
+ struct lan966x *lan966x;
+
+ u8 chip_port;
+ u16 pvid;
+ u16 vid;
+ bool vlan_aware;
+
+ bool learn_ena;
+
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+ struct lan966x_port_config config;
+ struct phylink *phylink;
+ struct phy *serdes;
+ struct fwnode_handle *fwnode;
+};
+
+extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
+extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
+extern const struct ethtool_ops lan966x_ethtool_ops;
+
+bool lan966x_netdevice_check(const struct net_device *dev);
+
+void lan966x_register_notifier_blocks(void);
+void lan966x_unregister_notifier_blocks(void);
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+int lan966x_stats_init(struct lan966x *lan966x);
+
+void lan966x_port_config_down(struct lan966x_port *port);
+void lan966x_port_config_up(struct lan966x_port *port);
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state);
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config);
+void lan966x_port_init(struct lan966x_port *port);
+
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
+void lan966x_mac_init(struct lan966x *lan966x);
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing);
+int lan966x_mac_del_entry(struct lan966x *lan966x,
+ const unsigned char *addr,
+ u16 vid);
+int lan966x_mac_add_entry(struct lan966x *lan966x,
+ struct lan966x_port *port,
+ const unsigned char *addr,
+ u16 vid);
+void lan966x_mac_purge_entries(struct lan966x *lan966x);
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
+
+void lan966x_vlan_init(struct lan966x *lan966x);
+void lan966x_vlan_port_apply(struct lan966x_port *port);
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware);
+int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid);
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid);
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
+int lan966x_fdb_init(struct lan966x *lan966x);
+void lan966x_fdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info);
+
+void lan966x_mdb_init(struct lan966x *lan966x);
+void lan966x_mdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+
+static inline void __iomem *lan_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_wr(u32 val, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, lan_addr(lan966x->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+#endif /* __LAN966X_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
new file mode 100644
index 000000000000..c68d0a99d292
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_pgid_entry {
+ struct list_head list;
+ int index;
+ refcount_t refcount;
+ u16 ports;
+};
+
+struct lan966x_mdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct lan966x_pgid_entry *pgid;
+ u8 cpu_copy;
+};
+
+void lan966x_mdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->mdb_entries);
+ INIT_LIST_HEAD(&lan966x->pgid_entries);
+}
+
+static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry, *tmp;
+
+ list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ }
+}
+
+static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x)
+{
+ struct lan966x_pgid_entry *pgid_entry, *tmp;
+
+ list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) {
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+ }
+}
+
+void lan966x_mdb_deinit(struct lan966x *lan966x)
+{
+ lan966x_mdb_purge_mdb_entries(lan966x);
+ lan966x_mdb_purge_pgid_entries(lan966x);
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_get(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (ether_addr_equal(mdb_entry->mac, mac) &&
+ mdb_entry->vid == vid)
+ return mdb_entry;
+ }
+
+ return NULL;
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_add(struct lan966x *lan966x,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(mdb_entry->mac, mdb->addr);
+ mdb_entry->vid = mdb->vid;
+
+ list_add_tail(&mdb_entry->list, &lan966x->mdb_entries);
+
+ return mdb_entry;
+}
+
+static void lan966x_mdb_encode_mac(unsigned char *mac,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ ether_addr_copy(mac, mdb_entry->mac);
+
+ if (type == ENTRYTYPE_MACV4) {
+ mac[0] = 0;
+ mac[1] = mdb_entry->ports >> 8;
+ mac[2] = mdb_entry->ports & 0xff;
+ } else if (type == ENTRYTYPE_MACV6) {
+ mac[0] = mdb_entry->ports >> 8;
+ mac[1] = mdb_entry->ports & 0xff;
+ }
+}
+
+static int lan966x_mdb_ip_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port)
+ mdb_entry->cpu_copy++;
+ else
+ mdb_entry->ports |= BIT(port->chip_port);
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_ip_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports && !mdb_entry->cpu_copy) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+
+ pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL);
+ if (!pgid_entry)
+ return ERR_PTR(-ENOMEM);
+
+ pgid_entry->ports = ports;
+ pgid_entry->index = index;
+ refcount_set(&pgid_entry->refcount, 1);
+
+ list_add_tail(&pgid_entry->list, &lan966x->pgid_entries);
+
+ return pgid_entry;
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_get(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ int index;
+
+ /* Try to find an existing pgid that uses the same ports as the
+ * mdb_entry
+ */
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->ports == mdb_entry->ports) {
+ refcount_inc(&pgid_entry->refcount);
+ return pgid_entry;
+ }
+ }
+
+ /* Try to find an empty pgid entry and allocate one in case it finds it,
+ * otherwise it means that there are no more resources
+ */
+ for (index = PGID_GP_START; index < PGID_GP_END; index++) {
+ bool used = false;
+
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->index == index) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return lan966x_pgid_entry_add(lan966x, index,
+ mdb_entry->ports);
+ }
+
+ return ERR_PTR(-ENOSPC);
+}
+
+static void lan966x_pgid_entry_del(struct lan966x *lan966x,
+ struct lan966x_pgid_entry *pgid_entry)
+{
+ if (!refcount_dec_and_test(&pgid_entry->refcount))
+ return;
+
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+}
+
+static int lan966x_mdb_l2_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port) {
+ mdb_entry->ports |= BIT(CPU_PORT);
+ mdb_entry->cpu_copy++;
+ } else {
+ mdb_entry->ports |= BIT(port->chip_port);
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ mdb_entry->ports &= BIT(CPU_PORT);
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_l2_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+
+ ports &= ~BIT(CPU_PORT);
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static enum macaccess_entry_type
+lan966x_mdb_classify(const unsigned char *mac)
+{
+ if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e)
+ return ENTRYTYPE_MACV4;
+ if (mac[0] == 0x33 && mac[1] == 0x33)
+ return ENTRYTYPE_MACV6;
+ return ENTRYTYPE_LOCKED;
+}
+
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are added for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_add(port, mdb, type);
+
+ return lan966x_mdb_l2_add(port, mdb, type);
+}
+
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are removed for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_del(port, mdb, type);
+
+ return lan966x_mdb_l2_del(port, mdb, type);
+}
+
+static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports |= BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type);
+ }
+}
+
+static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports &= ~BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
new file mode 100644
index 000000000000..b66a9aa00ea4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+#include <linux/sfp.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static int lan966x_phylink_mac_prepare(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t iface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ int err;
+
+ if (port->serdes) {
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
+ iface);
+ if (err) {
+ netdev_err(to_net_dev(config->dev),
+ "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void lan966x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x_port_config *port_config = &port->config;
+
+ port_config->duplex = duplex;
+ port_config->speed = speed;
+ port_config->pause = 0;
+ port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+
+ lan966x_port_config_up(port);
+}
+
+static void lan966x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_port_config_down(port);
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
+
+static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct lan966x_port, phylink_pcs);
+}
+
+static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+
+ lan966x_port_status_get(port, state);
+}
+
+static int lan966x_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+ struct lan966x_port_config config;
+ int ret;
+
+ config = port->config;
+ config.portmode = interface;
+ config.inband = phylink_autoneg_inband(mode);
+ config.autoneg = phylink_test(advertising, Autoneg);
+ config.advertising = advertising;
+
+ ret = lan966x_port_pcs_set(port, &config);
+ if (ret)
+ netdev_err(port->dev, "port PCS config failed: %d\n", ret);
+
+ return ret;
+}
+
+static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_mac_ops lan966x_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = lan966x_phylink_mac_config,
+ .mac_prepare = lan966x_phylink_mac_prepare,
+ .mac_link_down = lan966x_phylink_mac_link_down,
+ .mac_link_up = lan966x_phylink_mac_link_up,
+};
+
+const struct phylink_pcs_ops lan966x_phylink_pcs_ops = {
+ .pcs_get_state = lan966x_pcs_get_state,
+ .pcs_config = lan966x_pcs_config,
+ .pcs_an_restart = lan966x_pcs_aneg_restart,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
new file mode 100644
index 000000000000..237555845a52
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+/* Watermark encode */
+#define MULTIPLIER_BIT BIT(8)
+static u32 lan966x_wm_enc(u32 value)
+{
+ value /= LAN966X_BUFFER_CELL_SZ;
+
+ if (value >= MULTIPLIER_BIT) {
+ value /= 16;
+ if (value >= MULTIPLIER_BIT)
+ value = (MULTIPLIER_BIT - 1);
+
+ value |= MULTIPLIER_BIT;
+ }
+
+ return value;
+}
+
+static void lan966x_port_link_down(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val, delay = 0;
+
+ /* 0.5: Disable any AFI */
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(0),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+
+ /* wait for reg afi_port_frm_out to become 0 for the port */
+ while (true) {
+ val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port));
+ if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("AFI timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ delay = 0;
+
+ /* 1: Reset the PCS Rx clock domain */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1),
+ DEV_CLOCK_CFG_PCS_RX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 2: Disable MAC frame reception */
+ lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_RX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* 3: Disable traffic being sent to or from switch port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 4: Disable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 5: Disable Flowcontrol */
+ lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0),
+ SYS_PAUSE_CFG_PAUSE_ENA,
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* 5.1: Disable PFC */
+ lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0),
+ QSYS_SW_PORT_MODE_TX_PFC_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */
+ usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC);
+
+ /* 7: Disable HDX backpressure */
+ lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0),
+ SYS_FRONT_PORT_MODE_HDX_MODE,
+ lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
+
+ /* 8: Flush the queues accociated with the port */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 9: Enable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 10: Wait until flushing is complete */
+ while (true) {
+ val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port));
+ if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("Flush timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ /* 11: Reset the Port and MAC clock domains */
+ lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_TX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) |
+ DEV_CLOCK_CFG_MAC_RX_RST_SET(1) |
+ DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 12: Clear flushing */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* The port is disabled and flushed, now set up the port in the
+ * new operating mode
+ */
+}
+
+static void lan966x_port_link_up(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+ int speed = 0, mode = 0;
+ int atop_wm = 0;
+
+ switch (config->speed) {
+ case SPEED_10:
+ speed = LAN966X_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = LAN966X_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = LAN966X_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ case SPEED_2500:
+ speed = LAN966X_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ }
+
+ /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+ * port speed for QSGMII ports.
+ */
+ if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+
+ lan_wr(config->duplex | mode,
+ lan966x, DEV_MAC_MODE_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) |
+ DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) |
+ DEV_MAC_IFG_CFG_RX_IFG2_SET(2),
+ DEV_MAC_IFG_CFG_TX_IFG |
+ DEV_MAC_IFG_CFG_RX_IFG1 |
+ DEV_MAC_IFG_CFG_RX_IFG2,
+ lan966x, DEV_MAC_IFG_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) |
+ DEV_MAC_HDX_CFG_SEED_LOAD_SET(1),
+ DEV_MAC_HDX_CFG_SEED |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ lan966x, DEV_MAC_HDX_CFG(port->chip_port));
+
+ if (config->portmode == PHY_INTERFACE_MODE_GMII) {
+ if (config->speed == SPEED_1000)
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ else
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ }
+
+ /* No PFC */
+ lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed),
+ lan966x, ANA_PFC_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ DEV_PCS1G_CFG_PCS_ENA,
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0),
+ DEV_PCS1G_SD_CFG_SD_ENA,
+ lan966x, DEV_PCS1G_SD_CFG(port->chip_port));
+
+ /* Set Pause WM hysteresis, start/stop are in 1518 byte units */
+ lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) |
+ SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) |
+ SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)),
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port));
+ lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port));
+
+ /* Flow control */
+ lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) |
+ SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0),
+ SYS_MAC_FC_CFG_FC_LINK_SPEED |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG |
+ SYS_MAC_FC_CFG_RX_FC_ENA |
+ SYS_MAC_FC_CFG_TX_FC_ENA,
+ lan966x, SYS_MAC_FC_CFG(port->chip_port));
+
+ /* Tail dropping watermark */
+ atop_wm = lan966x->shared_queue_sz;
+
+ /* The total memory size is diveded by number of front ports plus CPU
+ * port
+ */
+ lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x,
+ SYS_ATOP(port->chip_port));
+ lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG);
+
+ /* This needs to be at the end */
+ /* Enable MAC module */
+ lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV_MAC_ENA_CFG_TX_ENA_SET(1),
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* Take out the clock from reset */
+ lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed),
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* Core: Enable port for frame transfer */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(16),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+}
+
+void lan966x_port_config_down(struct lan966x_port *port)
+{
+ lan966x_port_link_down(port);
+}
+
+void lan966x_port_config_up(struct lan966x_port *port)
+{
+ lan966x_port_link_up(port);
+}
+
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool link_down;
+ u16 bmsr = 0;
+ u16 lp_adv;
+ u32 val;
+
+ val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port));
+ link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val);
+ if (link_down)
+ lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port));
+
+ /* Get both current Link and Sync status */
+ val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port));
+ state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) &&
+ DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val);
+ state->link &= !link_down;
+
+ /* Get PCS ANEG status register */
+ val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port));
+ /* Aneg complete provides more information */
+ if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) {
+ state->an_complete = true;
+
+ bmsr |= state->link ? BMSR_LSTATUS : 0;
+ bmsr |= BMSR_ANEGCOMPLETE;
+
+ lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ } else {
+ if (!state->link)
+ return;
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool inband_aneg = false;
+ bool outband;
+
+ if (config->inband) {
+ if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
+ config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ config->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ outband = false;
+ } else {
+ outband = true;
+ }
+
+ /* Disable or enable inband */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
+
+ /* Enable PCS */
+ lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ if (inband_aneg) {
+ int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode,
+ config->advertising);
+ if (adv >= 0)
+ /* Enable in-band aneg */
+ lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) |
+ DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1),
+ lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ } else {
+ lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ }
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
+ DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_LINK_SPEED |
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ port->config = *config;
+
+ return 0;
+}
+
+void lan966x_port_init(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_port_config_down(port);
+
+ if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
+ return;
+
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0) |
+ DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
new file mode 100644
index 000000000000..797560172aca
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -0,0 +1,871 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200.
+ * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty)
+ */
+
+#ifndef _LAN966X_REGS_H_
+#define _LAN966X_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_AFI = 2,
+ TARGET_ANA = 3,
+ TARGET_CHIP_TOP = 5,
+ TARGET_CPU = 6,
+ TARGET_DEV = 13,
+ TARGET_GCB = 27,
+ TARGET_ORG = 36,
+ TARGET_QS = 42,
+ TARGET_QSYS = 46,
+ TARGET_REW = 47,
+ TARGET_SYS = 52,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* AFI:PORT_TBL:PORT_FRM_OUT */
+#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4)
+
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\
+ FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\
+ FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+
+/* AFI:PORT_TBL:PORT_CFG */
+#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4)
+
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+
+#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0)
+#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x)
+#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x)
+
+/* ANA:ANA:ADVLEARN */
+#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4)
+
+#define ANA_ADVLEARN_VLAN_CHK BIT(0)
+#define ANA_ADVLEARN_VLAN_CHK_SET(x)\
+ FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x)
+#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
+ FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
+
+/* ANA:ANA:VLANMASK */
+#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4)
+
+/* ANA:ANA:ANAINTR */
+#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
+
+#define ANA_ANAINTR_INTR BIT(1)
+#define ANA_ANAINTR_INTR_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR, x)
+#define ANA_ANAINTR_INTR_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR, x)
+
+#define ANA_ANAINTR_INTR_ENA BIT(0)
+#define ANA_ANAINTR_INTR_ENA_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR_ENA, x)
+#define ANA_ANAINTR_INTR_ENA_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR_ENA, x)
+
+/* ANA:ANA:AUTOAGE */
+#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4)
+
+#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\
+ FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x)
+#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
+ FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+
+/* ANA:ANA:FLOODING */
+#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
+
+#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x)
+#define ANA_FLOODING_FLD_UNICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_UNICAST, x)
+
+#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
+#define ANA_FLOODING_FLD_BROADCAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x)
+
+#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0)
+#define ANA_FLOODING_FLD_MULTICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x)
+#define ANA_FLOODING_FLD_MULTICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x)
+
+/* ANA:ANA:FLOODING_IPMC */
+#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+
+/* ANA:PGID:PGID */
+#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4)
+
+#define ANA_PGID_PGID GENMASK(8, 0)
+#define ANA_PGID_PGID_SET(x)\
+ FIELD_PREP(ANA_PGID_PGID, x)
+#define ANA_PGID_PGID_GET(x)\
+ FIELD_GET(ANA_PGID_PGID, x)
+
+/* ANA:PGID:PGID_CFG */
+#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4)
+
+#define ANA_PGID_CFG_OBEY_VLAN BIT(0)
+#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\
+ FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x)
+#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\
+ FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x)
+
+/* ANA:ANA_TABLES:MACHDATA */
+#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACLDATA */
+#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACACCESS */
+#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4)
+
+#define ANA_MACACCESS_CHANGE2SW BIT(17)
+#define ANA_MACACCESS_CHANGE2SW_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x)
+#define ANA_MACACCESS_CHANGE2SW_GET(x)\
+ FIELD_GET(ANA_MACACCESS_CHANGE2SW, x)
+
+#define ANA_MACACCESS_MAC_CPU_COPY BIT(16)
+#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x)
+#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x)
+
+#define ANA_MACACCESS_VALID BIT(12)
+#define ANA_MACACCESS_VALID_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_VALID, x)
+#define ANA_MACACCESS_VALID_GET(x)\
+ FIELD_GET(ANA_MACACCESS_VALID, x)
+
+#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10)
+#define ANA_MACACCESS_ENTRYTYPE_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x)
+#define ANA_MACACCESS_ENTRYTYPE_GET(x)\
+ FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x)
+
+#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4)
+#define ANA_MACACCESS_DEST_IDX_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_DEST_IDX, x)
+#define ANA_MACACCESS_DEST_IDX_GET(x)\
+ FIELD_GET(ANA_MACACCESS_DEST_IDX, x)
+
+#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0)
+#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x)
+#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
+
+/* ANA:ANA_TABLES:MACTINDX */
+#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4)
+
+#define ANA_MACTINDX_BUCKET GENMASK(12, 11)
+#define ANA_MACTINDX_BUCKET_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_BUCKET, x)
+#define ANA_MACTINDX_BUCKET_GET(x)\
+ FIELD_GET(ANA_MACTINDX_BUCKET, x)
+
+#define ANA_MACTINDX_M_INDEX GENMASK(10, 0)
+#define ANA_MACTINDX_M_INDEX_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_M_INDEX, x)
+#define ANA_MACTINDX_M_INDEX_GET(x)\
+ FIELD_GET(ANA_MACTINDX_M_INDEX, x)
+
+/* ANA:ANA_TABLES:VLAN_PORT_MASK */
+#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\
+ FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\
+ FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+
+/* ANA:ANA_TABLES:VLANACCESS */
+#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4)
+
+#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\
+ FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\
+ FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+
+/* ANA:ANA_TABLES:VLANTIDX */
+#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4)
+
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+
+#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0)
+#define ANA_VLANTIDX_V_INDEX_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_V_INDEX, x)
+#define ANA_VLANTIDX_V_INDEX_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_V_INDEX, x)
+
+/* ANA:PORT:VLAN_CFG */
+#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4)
+
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+
+#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+
+#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0)
+#define ANA_VLAN_CFG_VLAN_VID_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x)
+#define ANA_VLAN_CFG_VLAN_VID_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x)
+
+/* ANA:PORT:DROP_CFG */
+#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4)
+
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+
+/* ANA:PORT:CPU_FWD_CFG */
+#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+
+/* ANA:PORT:CPU_FWD_BPDU_CFG */
+#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4)
+
+/* ANA:PORT:PORT_CFG */
+#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+
+#define ANA_PORT_CFG_LEARNAUTO BIT(6)
+#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
+#define ANA_PORT_CFG_LEARNAUTO_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x)
+
+#define ANA_PORT_CFG_LEARN_ENA BIT(5)
+#define ANA_PORT_CFG_LEARN_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x)
+#define ANA_PORT_CFG_LEARN_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x)
+
+#define ANA_PORT_CFG_RECV_ENA BIT(4)
+#define ANA_PORT_CFG_RECV_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x)
+#define ANA_PORT_CFG_RECV_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_RECV_ENA, x)
+
+#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0)
+#define ANA_PORT_CFG_PORTID_VAL_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x)
+#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+
+/* ANA:PFC:PFC_CFG */
+#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
+
+#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0)
+#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x)
+#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+
+/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
+#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
+
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\
+ FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\
+ FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+
+/* DEV:PORT_MODE:CLOCK_CFG */
+#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4)
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x)
+#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x)
+
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x)
+#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x)
+#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x)
+#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PORT_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x)
+#define DEV_CLOCK_CFG_PORT_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x)
+
+#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0)
+#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x)
+#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4)
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x)
+#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x)
+#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4)
+
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4)
+
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
+
+#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x)
+#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4)
+
+#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x)
+#define DEV_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED, x)
+
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */
+#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */
+#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x)
+#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+
+#define DEV_PCS1G_ANEG_CFG_ENA BIT(0)
+#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+/* QSYS:SYSTEM:PORT_MODE */
+#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4)
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+
+/* QSYS:SYSTEM:SWITCH_PORT_MODE */
+#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4)
+
+#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18)
+#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x)
+#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x)
+
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+
+#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0)
+#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x)
+#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x)
+
+/* QSYS:SYSTEM:SW_STATUS */
+#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4)
+
+#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0)
+#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\
+ FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x)
+#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\
+ FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x)
+
+/* QSYS:SYSTEM:CPU_GROUP_MAP */
+#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4)
+
+/* QSYS:RES_CTRL:RES_CFG */
+#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x)
+#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CFG */
+#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4)
+
+#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_CFG, x)
+#define REW_TAG_CFG_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_CFG, x)
+
+#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x)
+#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x)
+
+/* REW:PORT:PORT_CFG */
+#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
+
+#define REW_PORT_CFG_NO_REWRITE BIT(0)
+#define REW_PORT_CFG_NO_REWRITE_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x)
+#define REW_PORT_CFG_NO_REWRITE_GET(x)\
+ FIELD_GET(REW_PORT_CFG_NO_REWRITE, x)
+
+/* SYS:SYSTEM:RESET_CFG */
+#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4)
+
+#define SYS_RESET_CFG_CORE_ENA BIT(0)
+#define SYS_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x)
+#define SYS_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(SYS_RESET_CFG_CORE_ENA, x)
+
+/* SYS:SYSTEM:PORT_MODE */
+#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4)
+
+#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4)
+#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x)
+#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x)
+
+#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2)
+#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x)
+#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x)
+
+/* SYS:SYSTEM:FRONT_PORT_MODE */
+#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4)
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\
+ FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\
+ FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+
+/* SYS:SYSTEM:FRM_AGING */
+#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\
+ FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x)
+#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\
+ FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x)
+
+/* SYS:SYSTEM:STAT_CFG */
+#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4)
+
+#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0)
+#define SYS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x)
+#define SYS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x)
+
+/* SYS:PAUSE_CFG:PAUSE_CFG */
+#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4)
+
+#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x)
+#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x)
+
+#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x)
+#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x)
+#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x)
+
+/* SYS:PAUSE_CFG:ATOP */
+#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4)
+
+/* SYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4)
+
+/* SYS:PAUSE_CFG:MAC_FC_CFG */
+#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4)
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+
+/* SYS:STAT:CNT */
+#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4)
+
+/* SYS:RAM_CTRL:RAM_INIT */
+#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x)
+#define SYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
+
+#endif /* _LAN966X_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
new file mode 100644
index 000000000000..7de55f6a4da8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_MC));
+}
+
+static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_UC));
+}
+
+static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_BC));
+}
+
+static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
+{
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
+ ANA_PORT_CFG_LEARN_ENA,
+ port->lan966x, ANA_PORT_CFG(port->chip_port));
+
+ port->learn_ena = enabled;
+}
+
+static void lan966x_port_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ lan966x_port_set_mcast_flood(port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_FLOOD)
+ lan966x_port_set_ucast_flood(port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ lan966x_port_set_bcast_flood(port,
+ !!(flags.val & BR_BCAST_FLOOD));
+
+ if (flags.mask & BR_LEARNING)
+ lan966x_port_set_learning(port,
+ !!(flags.val & BR_LEARNING));
+}
+
+static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
+ BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+{
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ struct lan966x_port *port = lan966x->ports[i];
+ unsigned long mask = 0;
+
+ if (port && lan966x->bridge_fwd_mask & BIT(i))
+ mask = lan966x->bridge_fwd_mask & ~BIT(i);
+
+ mask |= BIT(CPU_PORT);
+
+ lan_wr(ANA_PGID_PGID_SET(mask),
+ lan966x, ANA_PGID(PGID_SRC + i));
+ }
+}
+
+static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool learn_ena = false;
+
+ if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
+ port->learn_ena)
+ learn_ena = true;
+
+ if (state == BR_STATE_FORWARDING)
+ lan966x->bridge_fwd_mask |= BIT(port->chip_port);
+ else
+ lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_update_fwd_mask(lan966x);
+}
+
+static void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ lan966x_mac_set_ageing(port->lan966x, ageing_time);
+}
+
+static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ lan966x_port_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ lan966x_port_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ lan966x_port_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
+ lan966x_vlan_port_apply(port);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ int err;
+
+ if (!lan966x->bridge_mask) {
+ lan966x->bridge = bridge;
+ } else {
+ if (lan966x->bridge != bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
+ return -ENODEV;
+ }
+ }
+
+ err = switchdev_bridge_port_offload(dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ lan966x->bridge_mask |= BIT(port->chip_port);
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ lan966x_port_bridge_flags(port, flags);
+
+ return 0;
+}
+
+static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+ lan966x_port_bridge_flags(port, flags);
+
+ lan966x->bridge_mask &= ~BIT(port->chip_port);
+
+ if (!lan966x->bridge_mask)
+ lan966x->bridge = NULL;
+
+ /* Set the port back to host mode */
+ lan966x_vlan_port_set_vlan_aware(port, false);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+}
+
+static int lan966x_port_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_port_bridge_join(port, info->upper_dev,
+ extack);
+ else
+ lan966x_port_bridge_leave(port, info->upper_dev);
+ }
+
+ return err;
+}
+
+static int lan966x_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ switchdev_bridge_port_unoffload(port->dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb);
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_foreign_bridging_check(struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = NULL;
+ bool has_foreign = false;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(bridge))
+ return 0;
+
+ netdev_for_each_lower_dev(bridge, dev, iter) {
+ if (lan966x_netdevice_check(dev)) {
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (lan966x) {
+ /* Bridge already has at least one port of a
+ * lan966x switch inside it, check that it's
+ * the same instance of the driver.
+ */
+ if (port->lan966x != lan966x) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging between multiple lan966x switches disallowed");
+ return -EINVAL;
+ }
+ } else {
+ /* This is the first lan966x port inside this
+ * bridge
+ */
+ lan966x = port->lan966x;
+ }
+ } else {
+ has_foreign = true;
+ }
+
+ if (lan966x && has_foreign) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging lan966x ports with foreign interfaces disallowed");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int lan966x_bridge_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ return lan966x_foreign_bridging_check(info->upper_dev,
+ info->info.extack);
+}
+
+static int lan966x_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!lan966x_netdevice_check(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ return lan966x_bridge_check(dev, ptr);
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_port_prechangeupper(dev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ err = lan966x_port_changeupper(dev, ptr);
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static bool lan966x_foreign_dev_check(const struct net_device *dev,
+ const struct net_device *foreign_dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (netif_is_bridge_master(foreign_dev))
+ if (lan966x->bridge != foreign_dev)
+ return true;
+
+ return false;
+}
+
+static int lan966x_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ lan966x_netdevice_check,
+ lan966x_foreign_dev_check,
+ lan966x_handle_fdb,
+ NULL);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ /* When adding a port to a vlan, we get a callback for the port but
+ * also for the bridge. When get the callback for the bridge just bail
+ * out. Then when the bridge is added to the vlan, then we get a
+ * callback here but in this case the flags has set:
+ * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
+ * port is added to the vlan, so the broadcast frames and unicast frames
+ * with dmac of the bridge should be foward to CPU.
+ */
+ if (netif_is_bridge_master(obj->orig_dev) &&
+ !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_add_vlan(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ else
+ lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_add(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_add(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_del_vlan(port, v->vid);
+ else
+ lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_del(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_del(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly = {
+ .notifier_call = lan966x_netdevice_event,
+};
+
+static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_event,
+};
+
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_blocking_event,
+};
+
+void lan966x_register_notifier_blocks(void)
+{
+ register_netdevice_notifier(&lan966x_netdevice_nb);
+ register_switchdev_notifier(&lan966x_switchdev_nb);
+ register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+}
+
+void lan966x_unregister_notifier_blocks(void)
+{
+ unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&lan966x_switchdev_nb);
+ unregister_netdevice_notifier(&lan966x_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
new file mode 100644
index 000000000000..8d7260cd7da9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define VLANACCESS_CMD_IDLE 0
+#define VLANACCESS_CMD_READ 1
+#define VLANACCESS_CMD_WRITE 2
+#define VLANACCESS_CMD_INIT 3
+
+static int lan966x_vlan_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_VLANACCESS);
+}
+
+static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout(lan966x_vlan_get_status,
+ lan966x, val,
+ (val & ANA_VLANACCESS_VLAN_TBL_CMD) ==
+ VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid)
+{
+ u16 mask = lan966x->vlan_mask[vid];
+ bool cpu_dis;
+
+ cpu_dis = !(mask & BIT(CPU_PORT));
+
+ /* Set flags and the VID to configure */
+ lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) |
+ ANA_VLANTIDX_V_INDEX_SET(vid),
+ ANA_VLANTIDX_VLAN_PGID_CPU_DIS |
+ ANA_VLANTIDX_V_INDEX,
+ lan966x, ANA_VLANTIDX);
+
+ /* Set the vlan port members mask */
+ lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask),
+ ANA_VLAN_PORT_MASK_VLAN_PORT_MASK,
+ lan966x, ANA_VLAN_PORT_MASK);
+
+ /* Issue a write command */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+
+ if (lan966x_vlan_wait_for_completion(lan966x))
+ dev_err(lan966x->dev, "Vlan set mask failed\n");
+}
+
+static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] |= BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] &= ~BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT));
+}
+
+static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] |= BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __set_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __clear_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return test_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->bridge_mask & BIT(port->chip_port)))
+ return HOST_PVID;
+
+ return port->vlan_aware ? port->pvid : UNAWARE_PVID;
+}
+
+int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(lan966x->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ return 0;
+}
+
+static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid)
+{
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ if (port->vid == vid)
+ port->vid = 0;
+}
+
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware)
+{
+ port->vlan_aware = vlan_aware;
+}
+
+void lan966x_vlan_port_apply(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u16 pvid;
+ u32 val;
+
+ pvid = lan966x_vlan_port_get_pvid(port);
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to classify for untagged frames (may be zero) */
+ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
+ if (port->vlan_aware)
+ val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1);
+
+ lan_rmw(val,
+ ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_VLAN_CFG_VLAN_POP_CNT,
+ lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ /* Drop frames with multicast source address */
+ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
+ if (port->vlan_aware && !pvid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1);
+
+ lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CFG_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CFG_TAG_CFG_SET(3);
+ }
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Set default VLAN and tag type to 8021Q */
+ lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_TPID |
+ REW_PORT_VLAN_CFG_PORT_VID,
+ lan966x, REW_PORT_VLAN_CFG(port->chip_port));
+}
+
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* If the CPU(br) is already part of the vlan then add the fdb
+ * entries in MAC table to copy the frames to the CPU(br).
+ * If the CPU(br) is not part of the vlan then it would
+ * just drop the frames.
+ */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_port_set_vid(port, vid, pvid, untagged);
+ lan966x_vlan_port_add_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+}
+
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_vlan_port_remove_vid(port, vid);
+ lan966x_vlan_port_del_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+
+ /* In case there are no other ports in vlan then remove the CPU from
+ * that vlan but still keep it in the mask because it may be needed
+ * again then another port gets added in that vlan
+ */
+ if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+ }
+}
+
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Add an entry in the MAC table for the CPU
+ * Add the CPU part of the vlan only if there is another port in that
+ * vlan otherwise all the broadcast frames in that vlan will go to CPU
+ * even if none of the ports are in the vlan and then the CPU will just
+ * need to discard these frames. It is required to store this
+ * information so when a front port is added then it would add also the
+ * CPU port.
+ */
+ if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+}
+
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Remove the CPU part of the vlan */
+ lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid);
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+}
+
+void lan966x_vlan_init(struct lan966x *lan966x)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANS */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+ lan966x_vlan_wait_for_completion(lan966x);
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ lan966x->vlan_mask[vid] = 0;
+ lan966x_vlan_set_mask(lan966x, vid);
+ }
+
+ /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */
+ lan966x->vlan_mask[HOST_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, HOST_PVID);
+
+ lan966x->vlan_mask[UNAWARE_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, UNAWARE_PVID);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID);
+
+ /* Configure the CPU port to be vlan aware */
+ lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) |
+ ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1),
+ lan966x, ANA_VLAN_CFG(CPU_PORT));
+
+ /* Set vlan ingress filter mask to all ports */
+ lan_wr(GENMASK(lan966x->num_phys_ports, 0),
+ lan966x, ANA_VLANMASK);
+
+ for (port = 0; port < lan966x->num_phys_ports; port++) {
+ lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port));
+ lan_wr(0, lan966x, REW_TAG_CFG(port));
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 4625d4fb4cde..16266275dd36 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -292,6 +292,33 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->phylink_config.dev = &spx5_port->ndev->dev;
spx5_port->phylink_config.type = PHYLINK_NETDEV;
spx5_port->phylink_config.pcs_poll = true;
+ spx5_port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD | MAC_25000FD;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_5000 ||
+ spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_10000 ||
+ spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ spx5_port->phylink_config.supported_interfaces);
+
+ if (spx5_port->conf.bandwidth == SPEED_25000)
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ spx5_port->phylink_config.supported_interfaces);
phylink = phylink_create(&spx5_port->phylink_config,
of_fwnode_handle(config->node),
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index fb74752de0ca..8ba33bc1a001 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -26,79 +26,6 @@ static bool port_conf_has_changed(struct sparx5_port_config *a, struct sparx5_po
return false;
}
-static void sparx5_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct sparx5_port *port = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_5GBASER:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_25GBASER:
- case PHY_INTERFACE_MODE_NA:
- if (port->conf.bandwidth == SPEED_5000)
- phylink_set(mask, 5000baseT_Full);
- if (port->conf.bandwidth == SPEED_10000) {
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- }
- if (port->conf.bandwidth == SPEED_25000) {
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 25000baseCR_Full);
- phylink_set(mask, 25000baseSR_Full);
- }
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_NA) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- break;
- default:
- linkmode_zero(supported);
- return;
- }
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void sparx5_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
@@ -202,7 +129,7 @@ const struct phylink_pcs_ops sparx5_phylink_pcs_ops = {
};
const struct phylink_mac_ops sparx5_phylink_mac_ops = {
- .validate = sparx5_phylink_validate,
+ .validate = phylink_generic_validate,
.mac_config = sparx5_phylink_mac_config,
.mac_link_down = sparx5_phylink_mac_link_down,
.mac_link_up = sparx5_phylink_mac_link_up,
diff --git a/drivers/net/ethernet/microsoft/mana/Makefile b/drivers/net/ethernet/microsoft/mana/Makefile
index 0edd5bb685f3..e16a4221f571 100644
--- a/drivers/net/ethernet/microsoft/mana/Makefile
+++ b/drivers/net/ethernet/microsoft/mana/Makefile
@@ -3,4 +3,4 @@
# Makefile for the Microsoft Azure Network Adapter driver
obj-$(CONFIG_MICROSOFT_MANA) += mana.o
-mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o
+mana-objs := gdma_main.o shm_channel.o hw_channel.o mana_en.o mana_ethtool.o mana_bpf.o
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index d047ee876f12..9a12607fb511 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -289,6 +289,8 @@ struct mana_rxq {
struct mana_cq rx_cq;
+ struct completion fence_event;
+
struct net_device *ndev;
/* Total number of receive buffers to be allocated */
@@ -298,6 +300,9 @@ struct mana_rxq {
struct mana_stats stats;
+ struct bpf_prog __rcu *bpf_prog;
+ struct xdp_rxq_info xdp_rxq;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
@@ -353,6 +358,8 @@ struct mana_port_context {
/* This points to an array of num_queues of RQ pointers. */
struct mana_rxq **rxqs;
+ struct bpf_prog *bpf_prog;
+
/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
unsigned int max_queues;
unsigned int num_queues;
@@ -367,6 +374,7 @@ struct mana_port_context {
struct mana_ethtool_stats eth_stats;
};
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
bool update_hash, bool update_tab);
@@ -377,6 +385,13 @@ int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len);
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+
extern const struct ethtool_ops mana_ethtool_ops;
struct mana_obj_spec {
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
new file mode 100644
index 000000000000..1d2f948b5c00
--- /dev/null
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2021, Microsoft Corporation. */
+
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mm.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+
+#include "mana.h"
+
+void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 txq_idx = skb_get_queue_mapping(skb);
+ struct netdev_queue *ndevtxq;
+ int rc;
+
+ __skb_push(skb, ETH_HLEN);
+
+ ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
+ __netif_tx_lock(ndevtxq, smp_processor_id());
+
+ rc = mana_start_xmit(skb, ndev);
+
+ __netif_tx_unlock(ndevtxq);
+
+ if (dev_xmit_complete(rc))
+ return;
+
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+}
+
+u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
+ struct xdp_buff *xdp, void *buf_va, uint pkt_len)
+{
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+ prog = rcu_dereference(rxq->bpf_prog);
+
+ if (!prog)
+ goto out;
+
+ xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ case XDP_TX:
+ case XDP_DROP:
+ break;
+
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ }
+
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
+static unsigned int mana_xdp_fraglen(unsigned int len)
+{
+ return SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
+{
+ ASSERT_RTNL();
+
+ return apc->bpf_prog;
+}
+
+static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
+{
+ return rtnl_dereference(apc->rxqs[0]->bpf_prog);
+}
+
+/* Set xdp program on channels */
+void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
+ unsigned int num_queues = apc->num_queues;
+ int i;
+
+ ASSERT_RTNL();
+
+ if (old_prog == prog)
+ return;
+
+ if (prog)
+ bpf_prog_add(prog, num_queues);
+
+ for (i = 0; i < num_queues; i++)
+ rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
+
+ if (old_prog)
+ for (i = 0; i < num_queues; i++)
+ bpf_prog_put(old_prog);
+}
+
+static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+ int buf_max;
+
+ old_prog = mana_xdp_get(apc);
+
+ if (!old_prog && !prog)
+ return 0;
+
+ buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
+ if (prog && buf_max > PAGE_SIZE) {
+ netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
+ ndev->mtu, buf_max);
+ NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
+
+ return -EOPNOTSUPP;
+ }
+
+ /* One refcnt of the prog is hold by the caller already, so
+ * don't increase refcnt for this one.
+ */
+ apc->bpf_prog = prog;
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (apc->port_is_up)
+ mana_chn_setxdp(apc, prog);
+
+ return 0;
+}
+
+int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct netlink_ext_ack *extack = bpf->extack;
+ int ret;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return mana_xdp_set(ndev, bpf->prog, extack);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 72cbf45c42d8..498d0f999275 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */
+#include <uapi/linux/bpf.h>
+
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -125,7 +127,7 @@ frag_err:
return -ENOMEM;
}
-static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
@@ -378,6 +380,7 @@ static const struct net_device_ops mana_devops = {
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
+ .ndo_bpf = mana_bpf,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -749,6 +752,61 @@ out:
return err;
}
+static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
+{
+ struct mana_fence_rq_resp resp = {};
+ struct mana_fence_rq_req req = {};
+ int err;
+
+ init_completion(&rxq->fence_event);
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
+ sizeof(req), sizeof(resp));
+ req.wq_obj_handle = rxq->rxobj;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
+ rxq->rxq_idx, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
+ rxq->rxq_idx, err, resp.hdr.status);
+ if (!err)
+ err = -EPROTO;
+
+ return err;
+ }
+
+ if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
+ netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
+ rxq->rxq_idx);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void mana_fence_rqs(struct mana_port_context *apc)
+{
+ unsigned int rxq_idx;
+ struct mana_rxq *rxq;
+ int err;
+
+ for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
+ rxq = apc->rxqs[rxq_idx];
+ err = mana_fence_rq(apc, rxq);
+
+ /* In case of any error, use sleep instead. */
+ if (err)
+ msleep(100);
+ }
+}
+
static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
{
u32 used_space_old;
@@ -906,6 +964,25 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq)
WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
}
+static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
+ struct xdp_buff *xdp)
+{
+ struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
+
+ if (!skb)
+ return NULL;
+
+ if (xdp->data_hard_start) {
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ } else {
+ skb_reserve(skb, XDP_PACKET_HEADROOM);
+ skb_put(skb, pkt_len);
+ }
+
+ return skb;
+}
+
static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
struct mana_rxq *rxq)
{
@@ -914,8 +991,10 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
uint pkt_len = cqe->ppi[0].pkt_len;
u16 rxq_idx = rxq->rxq_idx;
struct napi_struct *napi;
+ struct xdp_buff xdp = {};
struct sk_buff *skb;
u32 hash_value;
+ u32 act;
rxq->rx_cq.work_done++;
napi = &rxq->rx_cq.napi;
@@ -925,15 +1004,16 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
return;
}
- skb = build_skb(buf_va, PAGE_SIZE);
+ act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
- if (!skb) {
- free_page((unsigned long)buf_va);
- ++ndev->stats.rx_dropped;
- return;
- }
+ if (act != XDP_PASS && act != XDP_TX)
+ goto drop;
+
+ skb = mana_build_skb(buf_va, pkt_len, &xdp);
+
+ if (!skb)
+ goto drop;
- skb_put(skb, pkt_len);
skb->dev = napi->dev;
skb->protocol = eth_type_trans(skb, ndev);
@@ -954,12 +1034,24 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
}
+ if (act == XDP_TX) {
+ skb_set_queue_mapping(skb, rxq_idx);
+ mana_xdp_tx(skb, ndev);
+ return;
+ }
+
napi_gro_receive(napi, skb);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += pkt_len;
u64_stats_update_end(&rx_stats->syncp);
+ return;
+
+drop:
+ free_page((unsigned long)buf_va);
+ ++ndev->stats.rx_dropped;
+ return;
}
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
@@ -988,7 +1080,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
return;
case CQE_RX_OBJECT_FENCE:
- netdev_err(ndev, "RX Fencing is unsupported\n");
+ complete(&rxq->fence_event);
return;
default:
@@ -1016,7 +1108,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
new_page = alloc_page(GFP_ATOMIC);
if (new_page) {
- da = dma_map_page(dev, new_page, 0, rxq->datasize,
+ da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
@@ -1291,6 +1383,9 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
napi_synchronize(napi);
napi_disable(napi);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
netif_napi_del(napi);
mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
@@ -1342,7 +1437,8 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc,
if (!page)
return -ENOMEM;
- da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
+ da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
__free_page(page);
@@ -1485,6 +1581,12 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
netif_napi_add(ndev, &cq->napi, mana_poll, 1);
+
+ WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
+ cq->napi.napi_id));
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
napi_enable(&cq->napi);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -1572,6 +1674,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
bool update_hash, bool update_tab)
{
u32 queue_idx;
+ int err;
int i;
if (update_tab) {
@@ -1581,7 +1684,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
}
}
- return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
+ err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
+ if (err)
+ return err;
+
+ mana_fence_rqs(apc);
+
+ return 0;
}
static int mana_init_port(struct net_device *ndev)
@@ -1650,6 +1759,8 @@ int mana_alloc_queues(struct net_device *ndev)
if (err)
goto destroy_vport;
+ mana_chn_setxdp(apc, mana_xdp_get(apc));
+
return 0;
destroy_vport:
@@ -1698,6 +1809,8 @@ static int mana_dealloc_queues(struct net_device *ndev)
if (apc->port_is_up)
return -EINVAL;
+ mana_chn_setxdp(apc, NULL);
+
/* No packet can be transmitted now since apc->port_is_up is false.
* There is still a tiny chance that mana_poll_tx_cq() can re-enable
* a txq because it may not timely see apc->port_is_up being cleared
@@ -1724,9 +1837,6 @@ static int mana_dealloc_queues(struct net_device *ndev)
return err;
}
- /* TODO: Implement RX fencing */
- ssleep(1);
-
mana_destroy_vport(apc);
return 0;
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 722c27694b21..41b34a509308 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -7,9 +7,11 @@ mscc_ocelot_switch_lib-y := \
ocelot_vcap.o \
ocelot_flower.o \
ocelot_ptp.o \
- ocelot_devlink.o
+ ocelot_devlink.o \
+ vsc7514_regs.o
mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
+ ocelot_fdma.o \
ocelot_vsc7514.o \
ocelot_net.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1e4ad953cffb..455293aa6343 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -61,9 +61,9 @@ static void ocelot_mact_select(struct ocelot *ocelot,
}
-int ocelot_mact_learn(struct ocelot *ocelot, int port,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid, enum macaccess_entry_type type)
+static int __ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
{
u32 cmd = ANA_TABLES_MACACCESS_VALID |
ANA_TABLES_MACACCESS_DEST_IDX(port) |
@@ -83,8 +83,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
- mutex_lock(&ocelot->mact_lock);
-
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
@@ -92,9 +90,20 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
err = ocelot_mact_wait_for_completion(ocelot);
+ return err;
+}
+
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ mutex_lock(&ocelot->mact_lock);
+ ret = __ocelot_mact_learn(ocelot, port, mac, vid, type);
mutex_unlock(&ocelot->mact_lock);
- return err;
+ return ret;
}
EXPORT_SYMBOL(ocelot_mact_learn);
@@ -120,6 +129,66 @@ int ocelot_mact_forget(struct ocelot *ocelot,
}
EXPORT_SYMBOL(ocelot_mact_forget);
+int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type *type)
+{
+ int val;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ ocelot_mact_select(ocelot, mac, vid);
+
+ /* Issue a read command with MACACCESS_VALID=1. */
+ ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
+ ANA_TABLES_MACACCESS);
+
+ if (ocelot_mact_wait_for_completion(ocelot)) {
+ mutex_unlock(&ocelot->mact_lock);
+ return -ETIMEDOUT;
+ }
+
+ /* Read back the entry flags */
+ val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ if (!(val & ANA_TABLES_MACACCESS_VALID))
+ return -ENOENT;
+
+ *dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val);
+ *type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mact_lookup);
+
+int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type,
+ int sfid, int ssid)
+{
+ int ret;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ ocelot_write(ocelot,
+ (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) |
+ ANA_TABLES_STREAMDATA_SFID(sfid) |
+ (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) |
+ ANA_TABLES_STREAMDATA_SSID(ssid),
+ ANA_TABLES_STREAMDATA);
+
+ ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ocelot_mact_learn_streamdata);
+
static void ocelot_mact_init(struct ocelot *ocelot)
{
/* Configure the learning mode entries attributes:
@@ -594,9 +663,17 @@ void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
+ ocelot_port->speed = SPEED_UNKNOWN;
+
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
DEV_MAC_ENA_CFG);
+ if (ocelot->ops->cut_through_fwd) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ops->cut_through_fwd(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ }
+
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
err = ocelot_port_flush(ocelot, port);
@@ -628,6 +705,8 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
int mac_speed, mode = 0;
u32 mac_fc_cfg;
+ ocelot_port->speed = speed;
+
/* The MAC might be integrated in systems where the MAC speed is fixed
* and it's the PCS who is performing the rate adaptation, so we have
* to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
@@ -692,7 +771,10 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
+ /* Don't attempt to send PAUSE frames on the NPI port, it's broken */
+ if (port != ocelot->npi)
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
+ tx_pause);
/* Undo the effects of ocelot_phylink_mac_link_down:
* enable MAC module
@@ -700,6 +782,15 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
+ /* If the port supports cut-through forwarding, update the masks before
+ * enabling forwarding on the port.
+ */
+ if (ocelot->ops->cut_through_fwd) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ops->cut_through_fwd(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+ }
+
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -966,14 +1057,34 @@ static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
return 0;
}
-int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
+ u64 timestamp)
{
struct skb_shared_hwtstamps *shhwtstamps;
u64 tod_in_ns, full_ts_in_ns;
+ struct timespec64 ts;
+
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+}
+EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+{
u64 timestamp, src_port, len;
u32 xfh[OCELOT_TAG_LEN / 4];
struct net_device *dev;
- struct timespec64 ts;
struct sk_buff *skb;
int sz, buf_len;
u32 val, *buf;
@@ -1029,21 +1140,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
*buf = val;
}
- if (ocelot->ptp) {
- ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
-
- tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
- if ((tod_in_ns & 0xffffffff) < timestamp)
- full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
- timestamp;
- else
- full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
- timestamp;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamps->hwtstamp = full_ts_in_ns;
- }
+ if (ocelot->ptp)
+ ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
@@ -1076,6 +1174,18 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_can_inject);
+void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
+{
+ ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+ if (vlan_tag)
+ ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
+ if (rew_op)
+ ocelot_ifh_set_rew_op(ifh, rew_op);
+}
+EXPORT_SYMBOL(ocelot_ifh_port_set);
+
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
@@ -1085,11 +1195,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- ocelot_ifh_set_bypass(ifh, 1);
- ocelot_ifh_set_dest(ifh, BIT_ULL(port));
- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
- ocelot_ifh_set_rew_op(ifh, rew_op);
+ ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
@@ -1238,6 +1344,43 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
return 0;
}
+int ocelot_mact_flush(struct ocelot *ocelot, int port)
+{
+ int err;
+
+ mutex_lock(&ocelot->mact_lock);
+
+ /* Program ageing filter for a single port */
+ ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port),
+ ANA_ANAGEFIL);
+
+ /* Flushing dynamic FDB entries requires two successive age scans */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
+ ANA_TABLES_MACACCESS);
+
+ err = ocelot_mact_wait_for_completion(ocelot);
+ if (err) {
+ mutex_unlock(&ocelot->mact_lock);
+ return err;
+ }
+
+ /* And second... */
+ ocelot_write(ocelot,
+ ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
+ ANA_TABLES_MACACCESS);
+
+ err = ocelot_mact_wait_for_completion(ocelot);
+
+ /* Restore ageing filter */
+ ocelot_write(ocelot, 0, ANA_ANAGEFIL);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ocelot_mact_flush);
+
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -1514,10 +1657,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
@@ -1688,8 +1827,7 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
-static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
- bool only_active_ports)
+static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
{
u32 mask = 0;
int port;
@@ -1700,26 +1838,25 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
if (!ocelot_port)
continue;
- if (ocelot_port->bond == bond) {
- if (only_active_ports && !ocelot_port->lag_tx_active)
- continue;
-
+ if (ocelot_port->bond == bond)
mask |= BIT(port);
- }
}
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
- struct net_device *bridge)
+u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
+ const struct net_device *bridge;
u32 mask = 0;
int port;
- if (!ocelot_port || ocelot_port->bridge != bridge ||
- ocelot_port->stp_state != BR_STATE_FORWARDING)
+ if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ bridge = ocelot_port->bridge;
+ if (!bridge)
return 0;
for (port = 0; port < ocelot->num_phys_ports; port++) {
@@ -1735,8 +1872,9 @@ static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
return mask;
}
+EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
-static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
{
u32 mask = 0;
int port;
@@ -1753,12 +1891,22 @@ static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
return mask;
}
+EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask);
-void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
+void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
{
unsigned long cpu_fwd_mask;
int port;
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ /* If cut-through forwarding is supported, update the masks before a
+ * port joins the forwarding domain, to avoid potential underruns if it
+ * has the highest speed from the new domain.
+ */
+ if (joining && ocelot->ops->cut_through_fwd)
+ ocelot->ops->cut_through_fwd(ocelot);
+
/* If a DSA tag_8021q CPU exists, it needs to be included in the
* regular forwarding path of the front ports regardless of whether
* those are bridged or standalone.
@@ -1786,16 +1934,13 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
mask = GENMASK(ocelot->num_phys_ports - 1, 0);
mask &= ~cpu_fwd_mask;
} else if (ocelot_port->bridge) {
- struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
- if (bond) {
- mask &= ~ocelot_get_bond_mask(ocelot, bond,
- false);
- }
+ if (bond)
+ mask &= ~ocelot_get_bond_mask(ocelot, bond);
} else {
/* Standalone ports forward only to DSA tag_8021q CPU
* ports (if those exist), or to the hardware CPU port
@@ -1806,6 +1951,16 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
}
+
+ /* If cut-through forwarding is supported and a port is leaving, there
+ * is a chance that cut-through was disabled on the other ports due to
+ * the port which is leaving (it has a higher link speed). We need to
+ * update the cut-through masks of the remaining ports no earlier than
+ * after the port has left, to prevent underruns from happening between
+ * the cut-through update and the forwarding domain update.
+ */
+ if (!joining && ocelot->ops->cut_through_fwd)
+ ocelot->ops->cut_through_fwd(ocelot);
}
EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
@@ -1814,6 +1969,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 learn_ena = 0;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->stp_state = state;
if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
@@ -1823,7 +1980,9 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
ANA_PORT_PORT_CFG, port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
@@ -2053,9 +2212,13 @@ void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->bridge = bridge;
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
@@ -2064,11 +2227,15 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot_port->bridge = NULL;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, false);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2112,13 +2279,17 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
if (!bond || (visited & BIT(lag)))
continue;
- bond_mask = ocelot_get_bond_mask(ocelot, bond, true);
+ bond_mask = ocelot_get_bond_mask(ocelot, bond);
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
// Destination mask
ocelot_write_rix(ocelot, bond_mask,
ANA_PGID_PGID, port);
- aggr_idx[num_active_ports++] = port;
+
+ if (ocelot_port->lag_tx_active)
+ aggr_idx[num_active_ports++] = port;
}
for_each_aggr_pgid(ocelot, i) {
@@ -2167,8 +2338,7 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
bond = ocelot_port->bond;
if (bond) {
- int lag = __ffs(ocelot_get_bond_mask(ocelot, bond,
- false));
+ int lag = __ffs(ocelot_get_bond_mask(ocelot, bond));
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
@@ -2190,12 +2360,16 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port,
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
return -EOPNOTSUPP;
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->bond = bond;
ocelot_setup_logical_port_ids(ocelot);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
ocelot_set_aggr_pgids(ocelot);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
return 0;
}
EXPORT_SYMBOL(ocelot_port_lag_join);
@@ -2203,11 +2377,15 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
+ mutex_lock(&ocelot->fwd_domain_lock);
+
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
@@ -2498,6 +2676,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
+ mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
@@ -2521,6 +2700,9 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_vcap_init(ocelot);
ocelot_cpu_port_init(ocelot);
+ if (ocelot->ops->psfp_init)
+ ocelot->ops->psfp_init(ocelot);
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e43da09b8f91..bf4eff6d7086 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -32,6 +32,8 @@
#define OCELOT_PTP_QUEUE_SZ 128
+#define OCELOT_JUMBO_MTU 9000
+
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
@@ -55,19 +57,6 @@ struct ocelot_dump_ctx {
int idx;
};
-/* MAC table entry types.
- * ENTRYTYPE_NORMAL is subject to aging.
- * ENTRYTYPE_LOCKED is not subject to aging.
- * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
- * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
- */
-enum macaccess_entry_type {
- ENTRYTYPE_NORMAL = 0,
- ENTRYTYPE_LOCKED,
- ENTRYTYPE_MACv4,
- ENTRYTYPE_MACv6,
-};
-
/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
* possibilities of egress port masks for L2 multicast traffic.
* For a switch with 9 user ports, there are 512 possible port masks, but the
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
new file mode 100644
index 000000000000..dffa597bffe6
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi SoCs FDMA driver
+ *
+ * Copyright (c) 2021 Microchip
+ *
+ * Page recycling code is mostly taken from gianfar driver.
+ */
+
+#include <linux/align.h>
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <linux/skbuff.h>
+
+#include "ocelot_fdma.h"
+#include "ocelot_qs.h"
+
+DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
+
+static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data)
+{
+ regmap_write(ocelot->targets[FDMA], reg, data);
+}
+
+static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg)
+{
+ u32 retval;
+
+ regmap_read(ocelot->targets[FDMA], reg, &retval);
+
+ return retval;
+}
+
+static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx)
+{
+ return base + idx * sizeof(struct ocelot_fdma_dcb);
+}
+
+static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma)
+{
+ return (dma - base) / sizeof(struct ocelot_fdma_dcb);
+}
+
+static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz)
+{
+ return unlikely(idx == ring_sz - 1) ? 0 : idx + 1;
+}
+
+static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz)
+{
+ return unlikely(idx == 0) ? ring_sz - 1 : idx - 1;
+}
+
+static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring;
+
+ if (rx_ring->next_to_use >= rx_ring->next_to_clean)
+ return OCELOT_FDMA_RX_RING_SIZE -
+ (rx_ring->next_to_use - rx_ring->next_to_clean) - 1;
+ else
+ return rx_ring->next_to_clean - rx_ring->next_to_use - 1;
+}
+
+static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+
+ if (tx_ring->next_to_use >= tx_ring->next_to_clean)
+ return OCELOT_FDMA_TX_RING_SIZE -
+ (tx_ring->next_to_use - tx_ring->next_to_clean) - 1;
+ else
+ return tx_ring->next_to_clean - tx_ring->next_to_use - 1;
+}
+
+static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+
+ return tx_ring->next_to_clean == tx_ring->next_to_use;
+}
+
+static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
+ int chan)
+{
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma);
+ /* Barrier to force memory writes to DCB to be completed before starting
+ * the channel.
+ */
+ wmb();
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
+}
+
+static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
+{
+ unsigned long timeout;
+ u32 safe;
+
+ timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
+ do {
+ safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
+ if (safe & BIT(chan))
+ return 0;
+ } while (time_after(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
+ dma_addr_t dma_addr,
+ size_t size)
+{
+ u32 offset = dma_addr & 0x3;
+
+ dcb->llp = 0;
+ dcb->datap = ALIGN_DOWN(dma_addr, 4);
+ dcb->datal = ALIGN_DOWN(size, 4);
+ dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset);
+}
+
+static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot,
+ struct ocelot_fdma_rx_buf *rxb)
+{
+ dma_addr_t mapping;
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return false;
+
+ mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
+ __free_page(page);
+ return false;
+ }
+
+ rxb->page = page;
+ rxb->page_offset = 0;
+ rxb->dma_addr = mapping;
+
+ return true;
+}
+
+static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ struct ocelot_fdma_dcb *dcb;
+ dma_addr_t dma_addr;
+ int ret = 0;
+ u16 idx;
+
+ rx_ring = &fdma->rx_ring;
+ idx = rx_ring->next_to_use;
+
+ while (alloc_cnt--) {
+ rxb = &rx_ring->bufs[idx];
+ /* try reuse page */
+ if (unlikely(!rxb->page)) {
+ if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) {
+ dev_err_ratelimited(ocelot->dev,
+ "Failed to allocate rx\n");
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ dcb = &rx_ring->dcbs[idx];
+ dma_addr = rxb->dma_addr + rxb->page_offset;
+ ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE);
+
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ /* Chain the DCB to the next one */
+ dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
+ }
+
+ rx_ring->next_to_use = idx;
+ rx_ring->next_to_alloc = idx;
+
+ return ret;
+}
+
+static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot,
+ struct ocelot_fdma_tx_buf *tx_buf,
+ struct ocelot_fdma_dcb *dcb,
+ struct sk_buff *skb)
+{
+ dma_addr_t mapping;
+
+ mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
+ return false;
+
+ dma_unmap_addr_set(tx_buf, dma_addr, mapping);
+
+ ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
+ tx_buf->skb = skb;
+ dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len);
+ dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF;
+
+ return true;
+}
+
+static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot)
+{
+ u32 llp;
+
+ /* Check if the FDMA hits the DCB with LLP == NULL */
+ llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN));
+ if (unlikely(llp))
+ return false;
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE,
+ BIT(MSCC_FDMA_XTR_CHAN));
+
+ return true;
+}
+
+static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring)
+{
+ struct ocelot_fdma_dcb *dcb;
+ unsigned int idx;
+
+ idx = ocelot_fdma_idx_prev(rx_ring->next_to_use,
+ OCELOT_FDMA_RX_RING_SIZE);
+ dcb = &rx_ring->dcbs[idx];
+ dcb->llp = 0;
+}
+
+static void ocelot_fdma_rx_restart(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ const u8 chan = MSCC_FDMA_XTR_CHAN;
+ dma_addr_t new_llp, dma_base;
+ unsigned int idx;
+ u32 llp_prev;
+ int ret;
+
+ rx_ring = &fdma->rx_ring;
+ ret = ocelot_fdma_wait_chan_safe(ocelot, chan);
+ if (ret) {
+ dev_err_ratelimited(ocelot->dev,
+ "Unable to stop RX channel\n");
+ return;
+ }
+
+ ocelot_fdma_rx_set_llp(rx_ring);
+
+ /* FDMA stopped on the last DCB that contained a NULL LLP, since
+ * we processed some DCBs in RX, there is free space, and we must set
+ * DCB_LLP to point to the next DCB
+ */
+ llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
+ dma_base = rx_ring->dcbs_dma;
+
+ /* Get the next DMA addr located after LLP == NULL DCB */
+ idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ new_llp = ocelot_fdma_idx_dma(dma_base, idx);
+
+ /* Finally reactivate the channel */
+ ocelot_fdma_activate_chan(ocelot, new_llp, chan);
+}
+
+static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat,
+ struct sk_buff *skb, bool first)
+{
+ int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat);
+ struct page *page = rxb->page;
+
+ if (likely(first)) {
+ skb_put(skb, size);
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset, size, OCELOT_FDMA_RX_SIZE);
+ }
+
+ /* Try to reuse page */
+ if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page)))
+ return false;
+
+ /* Change offset to the other half */
+ rxb->page_offset ^= OCELOT_FDMA_RX_SIZE;
+
+ page_ref_inc(page);
+
+ return true;
+}
+
+static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot,
+ struct ocelot_fdma_rx_buf *old_rxb)
+{
+ struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring;
+ struct ocelot_fdma_rx_buf *new_rxb;
+
+ new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc];
+ rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc,
+ OCELOT_FDMA_RX_RING_SIZE);
+
+ /* Copy page reference */
+ *new_rxb = *old_rxb;
+
+ /* Sync for use by the device */
+ dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
+ old_rxb->page_offset,
+ OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat,
+ struct ocelot_fdma_rx_buf *rxb,
+ struct sk_buff *skb)
+{
+ bool first = false;
+
+ /* Allocate skb head and data */
+ if (likely(!skb)) {
+ void *buff_addr = page_address(rxb->page) +
+ rxb->page_offset;
+
+ skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE);
+ if (unlikely(!skb)) {
+ dev_err_ratelimited(ocelot->dev,
+ "build_skb failed !\n");
+ return NULL;
+ }
+ first = true;
+ }
+
+ dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr,
+ rxb->page_offset, OCELOT_FDMA_RX_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) {
+ /* Reuse the free half of the page for the next_to_alloc DCB*/
+ ocelot_fdma_reuse_rx_page(ocelot, rxb);
+ } else {
+ /* page cannot be reused, unmap it */
+ dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear rx buff content */
+ rxb->page = NULL;
+
+ return skb;
+}
+
+static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ void *xfh = skb->data;
+ u64 timestamp;
+ u64 src_port;
+
+ skb_pull(skb, OCELOT_TAG_LEN);
+
+ ocelot_xfh_get_src_port(xfh, &src_port);
+ if (unlikely(src_port >= ocelot->num_phys_ports))
+ return false;
+
+ ndev = ocelot_port_to_netdev(ocelot, src_port);
+ if (unlikely(!ndev))
+ return false;
+
+ pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
+
+ if (ocelot->ptp) {
+ ocelot_xfh_get_rew_val(xfh, &timestamp);
+ ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
+ }
+
+ if (likely(!skb_defer_rx_timestamp(skb)))
+ netif_receive_skb(skb);
+
+ return true;
+}
+
+static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ struct ocelot_fdma_dcb *dcb;
+ struct sk_buff *skb;
+ int work_done = 0;
+ int cleaned_cnt;
+ u32 stat;
+ u16 idx;
+
+ cleaned_cnt = ocelot_fdma_rx_ring_free(fdma);
+ rx_ring = &fdma->rx_ring;
+ skb = rx_ring->skb;
+
+ while (budget--) {
+ idx = rx_ring->next_to_clean;
+ dcb = &rx_ring->dcbs[idx];
+ stat = dcb->stat;
+ if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0)
+ break;
+
+ /* New packet is a start of frame but we already got a skb set,
+ * we probably lost an EOF packet, free skb
+ */
+ if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+
+ rxb = &rx_ring->bufs[idx];
+ /* Fetch next to clean buffer from the rx_ring */
+ skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb);
+ if (unlikely(!skb))
+ break;
+
+ work_done++;
+ cleaned_cnt++;
+
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ rx_ring->next_to_clean = idx;
+
+ if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
+ stat & MSCC_FDMA_DCB_STAT_PD)) {
+ dev_err_ratelimited(ocelot->dev,
+ "DCB aborted or pruned\n");
+ dev_kfree_skb(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* We still need to process the other fragment of the packet
+ * before delivering it to the network stack
+ */
+ if (!(stat & MSCC_FDMA_DCB_STAT_EOF))
+ continue;
+
+ if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
+ dev_kfree_skb(skb);
+
+ skb = NULL;
+ }
+
+ rx_ring->skb = skb;
+
+ if (cleaned_cnt)
+ ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
+
+ return work_done;
+}
+
+static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot)
+{
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct net_device *dev;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ if (!ocelot_port)
+ continue;
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+ dev = priv->dev;
+
+ if (unlikely(netif_queue_stopped(dev)))
+ netif_wake_queue(dev);
+ }
+}
+
+static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_tx_ring *tx_ring;
+ struct ocelot_fdma_tx_buf *buf;
+ unsigned int new_null_llp_idx;
+ struct ocelot_fdma_dcb *dcb;
+ bool end_of_list = false;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u32 dcb_llp;
+ u16 ntc;
+ int ret;
+
+ tx_ring = &fdma->tx_ring;
+
+ /* Purge the TX packets that have been sent up to the NULL llp or the
+ * end of done list.
+ */
+ while (!ocelot_fdma_tx_ring_empty(fdma)) {
+ ntc = tx_ring->next_to_clean;
+ dcb = &tx_ring->dcbs[ntc];
+ if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD))
+ break;
+
+ buf = &tx_ring->bufs[ntc];
+ skb = buf->skb;
+ dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr),
+ skb->len, DMA_TO_DEVICE);
+ napi_consume_skb(skb, budget);
+ dcb_llp = dcb->llp;
+
+ /* Only update after accessing all dcb fields */
+ tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
+ OCELOT_FDMA_TX_RING_SIZE);
+
+ /* If we hit the NULL LLP, stop, we might need to reload FDMA */
+ if (dcb_llp == 0) {
+ end_of_list = true;
+ break;
+ }
+ }
+
+ /* No need to try to wake if there were no TX cleaned_cnt up. */
+ if (ocelot_fdma_tx_ring_free(fdma))
+ ocelot_fdma_wakeup_netdev(ocelot);
+
+ /* If there is still some DCBs to be processed by the FDMA or if the
+ * pending list is empty, there is no need to restart the FDMA.
+ */
+ if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma))
+ return;
+
+ ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN);
+ if (ret) {
+ dev_warn(ocelot->dev,
+ "Failed to wait for TX channel to stop\n");
+ return;
+ }
+
+ /* Set NULL LLP to be the last DCB used */
+ new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
+ OCELOT_FDMA_TX_RING_SIZE);
+ dcb = &tx_ring->dcbs[new_null_llp_idx];
+ dcb->llp = 0;
+
+ dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean);
+ ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
+}
+
+static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi);
+ struct ocelot *ocelot = fdma->ocelot;
+ int work_done = 0;
+ bool rx_stopped;
+
+ ocelot_fdma_tx_cleanup(ocelot, budget);
+
+ rx_stopped = ocelot_fdma_check_stop_rx(ocelot);
+
+ work_done = ocelot_fdma_rx_get(ocelot, budget);
+
+ if (rx_stopped)
+ ocelot_fdma_rx_restart(ocelot);
+
+ if (work_done < budget) {
+ napi_complete_done(&fdma->napi, work_done);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) |
+ BIT(MSCC_FDMA_XTR_CHAN));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id)
+{
+ u32 ident, llp, frm, err, err_code;
+ struct ocelot *ocelot = dev_id;
+
+ ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT);
+ frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM);
+ llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident);
+ if (frm || llp) {
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+ napi_schedule(&ocelot->fdma->napi);
+ }
+
+ err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR);
+ if (unlikely(err)) {
+ err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE);
+ dev_err_ratelimited(ocelot->dev,
+ "Error ! chans mask: %#x, code: %#x\n",
+ err, err_code);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ocelot_fdma_send_skb(struct ocelot *ocelot,
+ struct ocelot_fdma *fdma, struct sk_buff *skb)
+{
+ struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
+ struct ocelot_fdma_tx_buf *tx_buf;
+ struct ocelot_fdma_dcb *dcb;
+ dma_addr_t dma;
+ u16 next_idx;
+
+ dcb = &tx_ring->dcbs[tx_ring->next_to_use];
+ tx_buf = &tx_ring->bufs[tx_ring->next_to_use];
+ if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use,
+ OCELOT_FDMA_TX_RING_SIZE);
+ skb_tx_timestamp(skb);
+
+ /* If the FDMA TX chan is empty, then enqueue the DCB directly */
+ if (ocelot_fdma_tx_ring_empty(fdma)) {
+ dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma,
+ tx_ring->next_to_use);
+ ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
+ } else {
+ /* Chain the DCBs */
+ dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx);
+ }
+
+ tx_ring->next_to_use = next_idx;
+}
+
+static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0);
+ int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
+ void *ifh;
+ int err;
+
+ if (unlikely(needed_headroom || needed_tailroom ||
+ skb_header_cloned(skb))) {
+ err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
+ GFP_ATOMIC);
+ if (unlikely(err)) {
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+ }
+
+ err = skb_linearize(skb);
+ if (err) {
+ net_err_ratelimited("%s: skb_linearize error (%d)!\n",
+ dev->name, err);
+ dev_kfree_skb_any(skb);
+ return 1;
+ }
+
+ ifh = skb_push(skb, OCELOT_TAG_LEN);
+ skb_put(skb, ETH_FCS_LEN);
+ memset(ifh, 0, OCELOT_TAG_LEN);
+ ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+
+ return 0;
+}
+
+int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ int ret = NETDEV_TX_OK;
+
+ spin_lock(&fdma->tx_ring.xmit_lock);
+
+ if (ocelot_fdma_tx_ring_free(fdma) == 0) {
+ netif_stop_queue(dev);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev))
+ goto out;
+
+ ocelot_fdma_send_skb(ocelot, fdma, skb);
+
+out:
+ spin_unlock(&fdma->tx_ring.xmit_lock);
+
+ return ret;
+}
+
+static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_rx_ring *rx_ring;
+ struct ocelot_fdma_rx_buf *rxb;
+ u16 idx;
+
+ rx_ring = &fdma->rx_ring;
+ idx = rx_ring->next_to_clean;
+
+ /* Free the pages held in the RX ring */
+ while (idx != rx_ring->next_to_use) {
+ rxb = &rx_ring->bufs[idx];
+ dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(rxb->page);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
+ }
+
+ if (fdma->rx_ring.skb)
+ dev_kfree_skb_any(fdma->rx_ring.skb);
+}
+
+static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_tx_ring *tx_ring;
+ struct ocelot_fdma_tx_buf *txb;
+ struct sk_buff *skb;
+ u16 idx;
+
+ tx_ring = &fdma->tx_ring;
+ idx = tx_ring->next_to_clean;
+
+ while (idx != tx_ring->next_to_use) {
+ txb = &tx_ring->bufs[idx];
+ skb = txb->skb;
+ dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE);
+ }
+}
+
+static int ocelot_fdma_rings_alloc(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+ struct ocelot_fdma_dcb *dcbs;
+ unsigned int adjust;
+ dma_addr_t dcbs_dma;
+ int ret;
+
+ /* Create a pool of consistent memory blocks for hardware descriptors */
+ fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
+ OCELOT_DCBS_HW_ALLOC_SIZE,
+ &fdma->dcbs_dma_base, GFP_KERNEL);
+ if (!fdma->dcbs_base)
+ return -ENOMEM;
+
+ /* DCBs must be aligned on a 32bit boundary */
+ dcbs = fdma->dcbs_base;
+ dcbs_dma = fdma->dcbs_dma_base;
+ if (!IS_ALIGNED(dcbs_dma, 4)) {
+ adjust = dcbs_dma & 0x3;
+ dcbs_dma = ALIGN(dcbs_dma, 4);
+ dcbs = (void *)dcbs + adjust;
+ }
+
+ /* TX queue */
+ fdma->tx_ring.dcbs = dcbs;
+ fdma->tx_ring.dcbs_dma = dcbs_dma;
+ spin_lock_init(&fdma->tx_ring.xmit_lock);
+
+ /* RX queue */
+ fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE;
+ fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE;
+ ret = ocelot_fdma_alloc_rx_buffs(ocelot,
+ ocelot_fdma_tx_ring_free(fdma));
+ if (ret) {
+ ocelot_fdma_free_rx_ring(ocelot);
+ return ret;
+ }
+
+ /* Set the last DCB LLP as NULL, this is normally done when restarting
+ * the RX chan, but this is for the first run
+ */
+ ocelot_fdma_rx_set_llp(&fdma->rx_ring);
+
+ return 0;
+}
+
+void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ dev->needed_headroom = OCELOT_TAG_LEN;
+ dev->needed_tailroom = ETH_FCS_LEN;
+
+ if (fdma->ndev)
+ return;
+
+ fdma->ndev = dev;
+ netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll,
+ OCELOT_FDMA_WEIGHT);
+}
+
+void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ if (fdma->ndev == dev) {
+ netif_napi_del(&fdma->napi);
+ fdma->ndev = NULL;
+ }
+}
+
+void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot)
+{
+ struct device *dev = ocelot->dev;
+ struct ocelot_fdma *fdma;
+ int ret;
+
+ fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL);
+ if (!fdma)
+ return;
+
+ ocelot->fdma = fdma;
+ ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+
+ fdma->ocelot = ocelot;
+ fdma->irq = platform_get_irq_byname(pdev, "fdma");
+ ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0,
+ dev_name(dev), ocelot);
+ if (ret)
+ goto err_free_fdma;
+
+ ret = ocelot_fdma_rings_alloc(ocelot);
+ if (ret)
+ goto err_free_irq;
+
+ static_branch_enable(&ocelot_fdma_enabled);
+
+ return;
+
+err_free_irq:
+ devm_free_irq(dev, fdma->irq, fdma);
+err_free_fdma:
+ devm_kfree(dev, fdma);
+
+ ocelot->fdma = NULL;
+}
+
+void ocelot_fdma_start(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ /* Reconfigure for extraction and injection using DMA */
+ ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0);
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0);
+
+ ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff);
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA,
+ BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
+ BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
+
+ napi_enable(&fdma->napi);
+
+ ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma,
+ MSCC_FDMA_XTR_CHAN);
+}
+
+void ocelot_fdma_deinit(struct ocelot *ocelot)
+{
+ struct ocelot_fdma *fdma = ocelot->fdma;
+
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
+ BIT(MSCC_FDMA_XTR_CHAN));
+ ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
+ BIT(MSCC_FDMA_INJ_CHAN));
+ napi_synchronize(&fdma->napi);
+ napi_disable(&fdma->napi);
+
+ ocelot_fdma_free_rx_ring(ocelot);
+ ocelot_fdma_free_tx_ring(ocelot);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.h b/drivers/net/ethernet/mscc/ocelot_fdma.h
new file mode 100644
index 000000000000..2fc8e1dd7230
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi SoCs FDMA driver
+ *
+ * Copyright (c) 2021 Microchip
+ */
+#ifndef _MSCC_OCELOT_FDMA_H_
+#define _MSCC_OCELOT_FDMA_H_
+
+#include "ocelot.h"
+
+#define MSCC_FDMA_DCB_STAT_BLOCKO(x) (((x) << 20) & GENMASK(31, 20))
+#define MSCC_FDMA_DCB_STAT_BLOCKO_M GENMASK(31, 20)
+#define MSCC_FDMA_DCB_STAT_BLOCKO_X(x) (((x) & GENMASK(31, 20)) >> 20)
+#define MSCC_FDMA_DCB_STAT_PD BIT(19)
+#define MSCC_FDMA_DCB_STAT_ABORT BIT(18)
+#define MSCC_FDMA_DCB_STAT_EOF BIT(17)
+#define MSCC_FDMA_DCB_STAT_SOF BIT(16)
+#define MSCC_FDMA_DCB_STAT_BLOCKL_M GENMASK(15, 0)
+#define MSCC_FDMA_DCB_STAT_BLOCKL(x) ((x) & GENMASK(15, 0))
+
+#define MSCC_FDMA_DCB_LLP(x) ((x) * 4 + 0x0)
+#define MSCC_FDMA_DCB_LLP_PREV(x) ((x) * 4 + 0xA0)
+#define MSCC_FDMA_CH_SAFE 0xcc
+#define MSCC_FDMA_CH_ACTIVATE 0xd0
+#define MSCC_FDMA_CH_DISABLE 0xd4
+#define MSCC_FDMA_CH_FORCEDIS 0xd8
+#define MSCC_FDMA_EVT_ERR 0x164
+#define MSCC_FDMA_EVT_ERR_CODE 0x168
+#define MSCC_FDMA_INTR_LLP 0x16c
+#define MSCC_FDMA_INTR_LLP_ENA 0x170
+#define MSCC_FDMA_INTR_FRM 0x174
+#define MSCC_FDMA_INTR_FRM_ENA 0x178
+#define MSCC_FDMA_INTR_ENA 0x184
+#define MSCC_FDMA_INTR_IDENT 0x188
+
+#define MSCC_FDMA_INJ_CHAN 2
+#define MSCC_FDMA_XTR_CHAN 0
+
+#define OCELOT_FDMA_WEIGHT 32
+
+#define OCELOT_FDMA_CH_SAFE_TIMEOUT_US 10
+
+#define OCELOT_FDMA_RX_RING_SIZE 512
+#define OCELOT_FDMA_TX_RING_SIZE 128
+
+#define OCELOT_FDMA_RX_DCB_SIZE (OCELOT_FDMA_RX_RING_SIZE * \
+ sizeof(struct ocelot_fdma_dcb))
+#define OCELOT_FDMA_TX_DCB_SIZE (OCELOT_FDMA_TX_RING_SIZE * \
+ sizeof(struct ocelot_fdma_dcb))
+/* +4 allows for word alignment after allocation */
+#define OCELOT_DCBS_HW_ALLOC_SIZE (OCELOT_FDMA_RX_DCB_SIZE + \
+ OCELOT_FDMA_TX_DCB_SIZE + \
+ 4)
+
+#define OCELOT_FDMA_RX_SIZE (PAGE_SIZE / 2)
+
+#define OCELOT_FDMA_SKBFRAG_OVR (4 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define OCELOT_FDMA_RXB_SIZE ALIGN_DOWN(OCELOT_FDMA_RX_SIZE - OCELOT_FDMA_SKBFRAG_OVR, 4)
+#define OCELOT_FDMA_SKBFRAG_SIZE (OCELOT_FDMA_RXB_SIZE + OCELOT_FDMA_SKBFRAG_OVR)
+
+DECLARE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
+
+struct ocelot_fdma_dcb {
+ u32 llp;
+ u32 datap;
+ u32 datal;
+ u32 stat;
+} __packed;
+
+/**
+ * struct ocelot_fdma_tx_buf - TX buffer structure
+ * @skb: SKB currently used in the corresponding DCB.
+ * @dma_addr: SKB DMA mapped address.
+ */
+struct ocelot_fdma_tx_buf {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+};
+
+/**
+ * struct ocelot_fdma_tx_ring - TX ring description of DCBs
+ *
+ * @dcbs: DCBs allocated for the ring
+ * @dcbs_dma: DMA base address of the DCBs
+ * @bufs: List of TX buffer associated to the DCBs
+ * @xmit_lock: lock for concurrent xmit access
+ * @next_to_clean: Next DCB to be cleaned in tx_cleanup
+ * @next_to_use: Next available DCB to send SKB
+ */
+struct ocelot_fdma_tx_ring {
+ struct ocelot_fdma_dcb *dcbs;
+ dma_addr_t dcbs_dma;
+ struct ocelot_fdma_tx_buf bufs[OCELOT_FDMA_TX_RING_SIZE];
+ /* Protect concurrent xmit calls */
+ spinlock_t xmit_lock;
+ u16 next_to_clean;
+ u16 next_to_use;
+};
+
+/**
+ * struct ocelot_fdma_rx_buf - RX buffer structure
+ * @page: Struct page used in this buffer
+ * @page_offset: Current page offset (either 0 or PAGE_SIZE/2)
+ * @dma_addr: DMA address of the page
+ */
+struct ocelot_fdma_rx_buf {
+ struct page *page;
+ u32 page_offset;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct ocelot_fdma_rx_ring - TX ring description of DCBs
+ *
+ * @dcbs: DCBs allocated for the ring
+ * @dcbs_dma: DMA base address of the DCBs
+ * @bufs: List of RX buffer associated to the DCBs
+ * @skb: SKB currently received by the netdev
+ * @next_to_clean: Next DCB to be cleaned NAPI polling
+ * @next_to_use: Next available DCB to send SKB
+ * @next_to_alloc: Next buffer that needs to be allocated (page reuse or alloc)
+ */
+struct ocelot_fdma_rx_ring {
+ struct ocelot_fdma_dcb *dcbs;
+ dma_addr_t dcbs_dma;
+ struct ocelot_fdma_rx_buf bufs[OCELOT_FDMA_RX_RING_SIZE];
+ struct sk_buff *skb;
+ u16 next_to_clean;
+ u16 next_to_use;
+ u16 next_to_alloc;
+};
+
+/**
+ * struct ocelot_fdma - FDMA context
+ *
+ * @irq: FDMA interrupt
+ * @ndev: Net device used to initialize NAPI
+ * @dcbs_base: Memory coherent DCBs
+ * @dcbs_dma_base: DMA base address of memory coherent DCBs
+ * @tx_ring: Injection ring
+ * @rx_ring: Extraction ring
+ * @napi: NAPI context
+ * @ocelot: Back-pointer to ocelot struct
+ */
+struct ocelot_fdma {
+ int irq;
+ struct net_device *ndev;
+ struct ocelot_fdma_dcb *dcbs_base;
+ dma_addr_t dcbs_dma_base;
+ struct ocelot_fdma_tx_ring tx_ring;
+ struct ocelot_fdma_rx_ring rx_ring;
+ struct napi_struct napi;
+ struct ocelot *ocelot;
+};
+
+void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot);
+void ocelot_fdma_start(struct ocelot *ocelot);
+void ocelot_fdma_deinit(struct ocelot *ocelot);
+int ocelot_fdma_inject_frame(struct ocelot *fdma, int port, u32 rew_op,
+ struct sk_buff *skb, struct net_device *dev);
+void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev);
+void ocelot_fdma_netdev_deinit(struct ocelot *ocelot,
+ struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 769a8159373e..949858891973 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -20,6 +20,9 @@
(1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
#define VCAP_IS2_CHAIN(lookup, pag) \
(2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
+/* PSFP chain and block ID */
+#define PSFP_BLOCK_ID OCELOT_NUM_VCAP_BLOCKS
+#define OCELOT_PSFP_CHAIN (3 * VCAP_BLOCK)
static int ocelot_chain_to_block(int chain, bool ingress)
{
@@ -46,6 +49,9 @@ static int ocelot_chain_to_block(int chain, bool ingress)
if (chain == VCAP_IS2_CHAIN(lookup, pag))
return VCAP_IS2;
+ if (chain == OCELOT_PSFP_CHAIN)
+ return PSFP_BLOCK_ID;
+
return -EOPNOTSUPP;
}
@@ -84,7 +90,8 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain,
goto_target == VCAP_IS1_CHAIN(1) ||
goto_target == VCAP_IS1_CHAIN(2) ||
goto_target == VCAP_IS2_CHAIN(0, 0) ||
- goto_target == VCAP_IS2_CHAIN(1, 0));
+ goto_target == VCAP_IS2_CHAIN(1, 0) ||
+ goto_target == OCELOT_PSFP_CHAIN);
if (chain == VCAP_IS1_CHAIN(0))
return (goto_target == VCAP_IS1_CHAIN(1));
@@ -111,7 +118,11 @@ static bool ocelot_is_goto_target_valid(int goto_target, int chain,
if (chain == VCAP_IS2_CHAIN(0, pag))
return (goto_target == VCAP_IS2_CHAIN(1, pag));
- /* VCAP IS2 lookup 1 cannot jump anywhere */
+ /* VCAP IS2 lookup 1 can goto to PSFP block if hardware support */
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(1, pag))
+ return (goto_target == OCELOT_PSFP_CHAIN);
+
return false;
}
@@ -211,6 +222,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
+ u32 pol_ix, pol_max;
u64 rate;
int err;
@@ -269,10 +281,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_POLICE:
+ if (filter->block_id == PSFP_BLOCK_ID) {
+ filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
+ break;
+ }
if (filter->block_id != VCAP_IS2 ||
filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Police action can only be offloaded to VCAP IS2 lookup 0");
+ "Police action can only be offloaded to VCAP IS2 lookup 0 or PSFP");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
@@ -286,6 +302,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
return -EOPNOTSUPP;
}
filter->action.police_ena = true;
+
+ pol_ix = a->hw_index + ocelot->vcap_pol.base;
+ pol_max = ocelot->vcap_pol.max;
+
+ if (ocelot->vcap_pol.max2 && pol_ix > pol_max) {
+ pol_ix += ocelot->vcap_pol.base2 - pol_max - 1;
+ pol_max = ocelot->vcap_pol.max2;
+ }
+
+ if (pol_ix >= pol_max)
+ return -EINVAL;
+
+ filter->action.pol_ix = pol_ix;
+
rate = a->police.rate_bytes_ps;
filter->action.pol.rate = div_u64(rate, 1000) * 8;
filter->action.pol.burst = a->police.burst;
@@ -399,6 +429,14 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
+ case FLOW_ACTION_GATE:
+ if (filter->block_id != PSFP_BLOCK_ID) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate action can only be offloaded to PSFP chain");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
return -EOPNOTSUPP;
@@ -407,7 +445,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
if (filter->goto_target == -1) {
if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
- chain == 0) {
+ chain == 0 || filter->block_id == PSFP_BLOCK_ID) {
allow_missing_goto_target = true;
} else {
NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
@@ -521,13 +559,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
}
- if (filter->block_id == VCAP_IS1 &&
- !is_zero_ether_addr(match.mask->dst)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Key type S1_NORMAL cannot match on destination MAC");
- return -EOPNOTSUPP;
- }
-
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
@@ -542,6 +573,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
ether_addr_copy(filter->key.etype.dmac.value,
match.key->dst);
@@ -689,6 +728,10 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
if (ret)
return ret;
+ /* PSFP filter need to parse key by stream identification function. */
+ if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD)
+ return 0;
+
return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
}
@@ -763,13 +806,34 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
int chain = f->common.chain_index;
- int ret;
+ int block_id, ret;
if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
return -EOPNOTSUPP;
}
+ block_id = ocelot_chain_to_block(chain, ingress);
+ if (block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id],
+ f->cookie, true);
+ if (filter) {
+ /* Filter already exists on other ports */
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters");
+ return -EOPNOTSUPP;
+ }
+
+ filter->ingress_port_mask |= BIT(port);
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter didn't exist, create it now */
filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
@@ -792,6 +856,15 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_add(ocelot, filter);
+ if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) {
+ kfree(filter);
+ if (ocelot->ops->psfp_filter_add)
+ return ocelot->ops->psfp_filter_add(ocelot, port, f);
+
+ NL_SET_ERR_MSG_MOD(extack, "PSFP chain is not supported in HW");
+ return -EOPNOTSUPP;
+ }
+
return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
@@ -807,6 +880,13 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (block_id < 0)
return 0;
+ if (block_id == PSFP_BLOCK_ID) {
+ if (ocelot->ops->psfp_filter_del)
+ return ocelot->ops->psfp_filter_del(ocelot, f);
+
+ return -EOPNOTSUPP;
+ }
+
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
@@ -816,6 +896,12 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_del(ocelot, filter);
+ if (ingress) {
+ filter->ingress_port_mask &= ~BIT(port);
+ if (filter->ingress_port_mask)
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
@@ -825,12 +911,25 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
{
struct ocelot_vcap_filter *filter;
struct ocelot_vcap_block *block;
+ struct flow_stats stats = {0};
int block_id, ret;
block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
if (block_id < 0)
return 0;
+ if (block_id == PSFP_BLOCK_ID) {
+ if (ocelot->ops->psfp_stats_get) {
+ ret = ocelot->ops->psfp_stats_get(ocelot, f, &stats);
+ if (ret)
+ return ret;
+
+ goto stats_update;
+ }
+
+ return -EOPNOTSUPP;
+ }
+
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
@@ -841,7 +940,10 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0,
+ stats.pkts = filter->stats.pkts;
+
+stats_update:
+ flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index eaeba60b1bba..e271b6225b72 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -15,6 +15,7 @@
#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
@@ -457,7 +458,8 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
int port = priv->chip_port;
u32 rew_op = 0;
- if (!ocelot_can_inject(ocelot, 0))
+ if (!static_branch_unlikely(&ocelot_fdma_enabled) &&
+ !ocelot_can_inject(ocelot, 0))
return NETDEV_TX_BUSY;
/* Check if timestamping is needed */
@@ -475,9 +477,13 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
rew_op = ocelot_ptp_rew_op(skb);
}
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ if (static_branch_unlikely(&ocelot_fdma_enabled)) {
+ ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev);
+ } else {
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
- kfree_skb(skb);
+ consume_skb(skb);
+ }
return NETDEV_TX_OK;
}
@@ -764,10 +770,23 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
+static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu);
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
+ .ndo_change_mtu = ocelot_change_mtu,
.ndo_set_rx_mode = ocelot_set_rx_mode,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
@@ -1168,7 +1187,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
ocelot_port_bridge_join(ocelot, port, bridge);
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb,
false, extack);
if (err)
@@ -1182,7 +1201,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
err_switchdev_sync:
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
@@ -1195,7 +1214,7 @@ static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
}
@@ -1498,40 +1517,6 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
.notifier_call = ocelot_switchdev_blocking_event,
};
-static void vsc7514_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- struct ocelot_port_private *priv = netdev_priv(ndev);
- struct ocelot_port *ocelot_port = &priv->port;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {};
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void vsc7514_phylink_mac_config(struct phylink_config *config,
unsigned int link_an_mode,
const struct phylink_link_state *state)
@@ -1590,7 +1575,7 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ocelot_phylink_ops = {
- .validate = vsc7514_phylink_validate,
+ .validate = phylink_generic_validate,
.mac_config = vsc7514_phylink_mac_config,
.mac_link_down = vsc7514_phylink_mac_link_down,
.mac_link_up = vsc7514_phylink_mac_link_up,
@@ -1654,6 +1639,11 @@ static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ __set_bit(ocelot_port->phy_mode,
+ priv->phylink_config.supported_interfaces);
phylink = phylink_create(&priv->phylink_config,
of_fwnode_handle(portnp),
@@ -1699,12 +1689,16 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
+ dev->max_mtu = OCELOT_JUMBO_MTU;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- eth_hw_addr_gen(dev, ocelot->base_mac, port);
+ err = of_get_ethdev_address(portnp, dev);
+ if (err)
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
+
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
@@ -1714,14 +1708,20 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
if (err)
goto out;
+ if (ocelot->fdma)
+ ocelot_fdma_netdev_init(ocelot, dev);
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
- goto out;
+ goto out_fdma_deinit;
}
return 0;
+out_fdma_deinit:
+ if (ocelot->fdma)
+ ocelot_fdma_netdev_deinit(ocelot, dev);
out:
ocelot->ports[port] = NULL;
free_netdev(dev);
@@ -1734,9 +1734,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port)
struct ocelot_port_private *priv = container_of(ocelot_port,
struct ocelot_port_private,
port);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_fdma *fdma = ocelot->fdma;
unregister_netdev(priv->dev);
+ if (fdma)
+ ocelot_fdma_netdev_deinit(ocelot, priv->dev);
+
if (priv->phylink) {
rtnl_lock();
phylink_disconnect_phy(priv->phylink);
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 337cd08b1a54..d3544413a8a4 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -887,10 +887,18 @@ static void vcap_entry_set(struct ocelot *ocelot, int ix,
return es0_entry_set(ocelot, ix, filter);
}
-static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
- struct ocelot_policer *pol)
+struct vcap_policer_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 pol_ix;
+};
+
+int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+ struct ocelot_policer *pol)
{
struct qos_policer_conf pp = { 0 };
+ struct vcap_policer_entry *tmp;
+ int ret;
if (!pol)
return -EINVAL;
@@ -899,57 +907,74 @@ static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
pp.pir = pol->rate;
pp.pbs = pol->burst;
- return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ list_for_each_entry(tmp, &ocelot->vcap_pol.pol_list, list)
+ if (tmp->pol_ix == pol_ix) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->pol_ix = pol_ix;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &ocelot->vcap_pol.pol_list);
+
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_policer_add);
-static void ocelot_vcap_policer_del(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- u32 pol_ix)
+int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
{
- struct ocelot_vcap_filter *filter;
struct qos_policer_conf pp = {0};
- int index = -1;
-
- if (pol_ix < block->pol_lpr)
- return;
-
- list_for_each_entry(filter, &block->rules, list) {
- index++;
- if (filter->block_id == VCAP_IS2 &&
- filter->action.police_ena &&
- filter->action.pol_ix < pol_ix) {
- filter->action.pol_ix += 1;
- ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
- &filter->action.pol);
- is2_entry_set(ocelot, index, filter);
+ struct vcap_policer_entry *tmp, *n;
+ u8 z = 0;
+
+ list_for_each_entry_safe(tmp, n, &ocelot->vcap_pol.pol_list, list)
+ if (tmp->pol_ix == pol_ix) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
}
- }
- pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
- qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ if (z) {
+ pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+ return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+ }
- block->pol_lpr++;
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_policer_del);
-static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
- struct ocelot_vcap_block *block,
- struct ocelot_vcap_filter *filter)
+static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+ struct ocelot_vcap_block *block,
+ struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp;
struct list_head *pos, *n;
+ int ret;
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
- block->pol_lpr--;
- filter->action.pol_ix = block->pol_lpr;
- ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
- &filter->action.pol);
+ ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
+ if (ret)
+ return ret;
}
block->count++;
if (list_empty(&block->rules)) {
list_add(&filter->list, &block->rules);
- return;
+ return 0;
}
list_for_each_safe(pos, n, &block->rules) {
@@ -958,6 +983,8 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
break;
}
list_add(&filter->list, pos->prev);
+
+ return 0;
}
static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a,
@@ -1132,7 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
- int i, index;
+ int i, index, ret;
if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1141,7 +1168,9 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
}
/* Add filter to the linked list */
- ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter);
+ if (ret)
+ return ret;
/* Get the index of the inserted filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
@@ -1174,7 +1203,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
if (ocelot_vcap_filter_equal(filter, tmp)) {
if (tmp->block_id == VCAP_IS2 &&
tmp->action.police_ena)
- ocelot_vcap_policer_del(ocelot, block,
+ ocelot_vcap_policer_del(ocelot,
tmp->action.pol_ix);
list_del(pos);
@@ -1366,13 +1395,13 @@ int ocelot_vcap_init(struct ocelot *ocelot)
struct vcap_props *vcap = &ocelot->vcap[i];
INIT_LIST_HEAD(&block->rules);
- block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
ocelot_vcap_detect_constants(ocelot, vcap);
ocelot_vcap_init_one(ocelot, vcap);
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 38103b0255b0..4f4a495a60ad 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -18,313 +18,24 @@
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_hsio.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "ocelot_fdma.h"
#include "ocelot.h"
-static const u32 ocelot_ana_regmap[] = {
- REG(ANA_ADVLEARN, 0x009000),
- REG(ANA_VLANMASK, 0x009004),
- REG(ANA_PORT_B_DOMAIN, 0x009008),
- REG(ANA_ANAGEFIL, 0x00900c),
- REG(ANA_ANEVENTS, 0x009010),
- REG(ANA_STORMLIMIT_BURST, 0x009014),
- REG(ANA_STORMLIMIT_CFG, 0x009018),
- REG(ANA_ISOLATED_PORTS, 0x009028),
- REG(ANA_COMMUNITY_PORTS, 0x00902c),
- REG(ANA_AUTOAGE, 0x009030),
- REG(ANA_MACTOPTIONS, 0x009034),
- REG(ANA_LEARNDISC, 0x009038),
- REG(ANA_AGENCTRL, 0x00903c),
- REG(ANA_MIRRORPORTS, 0x009040),
- REG(ANA_EMIRRORPORTS, 0x009044),
- REG(ANA_FLOODING, 0x009048),
- REG(ANA_FLOODING_IPMC, 0x00904c),
- REG(ANA_SFLOW_CFG, 0x009050),
- REG(ANA_PORT_MODE, 0x009080),
- REG(ANA_PGID_PGID, 0x008c00),
- REG(ANA_TABLES_ANMOVED, 0x008b30),
- REG(ANA_TABLES_MACHDATA, 0x008b34),
- REG(ANA_TABLES_MACLDATA, 0x008b38),
- REG(ANA_TABLES_MACACCESS, 0x008b3c),
- REG(ANA_TABLES_MACTINDX, 0x008b40),
- REG(ANA_TABLES_VLANACCESS, 0x008b44),
- REG(ANA_TABLES_VLANTIDX, 0x008b48),
- REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
- REG(ANA_TABLES_ISDXTIDX, 0x008b50),
- REG(ANA_TABLES_ENTRYLIM, 0x008b00),
- REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
- REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
- REG(ANA_MSTI_STATE, 0x008e00),
- REG(ANA_PORT_VLAN_CFG, 0x007000),
- REG(ANA_PORT_DROP_CFG, 0x007004),
- REG(ANA_PORT_QOS_CFG, 0x007008),
- REG(ANA_PORT_VCAP_CFG, 0x00700c),
- REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
- REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
- REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
- REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
- REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
- REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
- REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
- REG(ANA_PORT_PORT_CFG, 0x007070),
- REG(ANA_PORT_POL_CFG, 0x007074),
- REG(ANA_PORT_PTP_CFG, 0x007078),
- REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
- REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
- REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
- REG(ANA_PFC_PFC_CFG, 0x008800),
- REG(ANA_PFC_PFC_TIMER, 0x008804),
- REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
- REG(ANA_IPT_IPT, 0x008004),
- REG(ANA_PPT_PPT, 0x008ac0),
- REG(ANA_FID_MAP_FID_MAP, 0x000000),
- REG(ANA_AGGR_CFG, 0x0090b4),
- REG(ANA_CPUQ_CFG, 0x0090b8),
- REG(ANA_CPUQ_CFG2, 0x0090bc),
- REG(ANA_CPUQ_8021_CFG, 0x0090c0),
- REG(ANA_DSCP_CFG, 0x009100),
- REG(ANA_DSCP_REWR_CFG, 0x009200),
- REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
- REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
- REG(ANA_VRAP_CFG, 0x009280),
- REG(ANA_VRAP_HDR_DATA, 0x009284),
- REG(ANA_VRAP_HDR_MASK, 0x009288),
- REG(ANA_DISCARD_CFG, 0x00928c),
- REG(ANA_FID_CFG, 0x009290),
- REG(ANA_POL_PIR_CFG, 0x004000),
- REG(ANA_POL_CIR_CFG, 0x004004),
- REG(ANA_POL_MODE_CFG, 0x004008),
- REG(ANA_POL_PIR_STATE, 0x00400c),
- REG(ANA_POL_CIR_STATE, 0x004010),
- REG(ANA_POL_STATE, 0x004014),
- REG(ANA_POL_FLOWC, 0x008b80),
- REG(ANA_POL_HYST, 0x008bec),
- REG(ANA_POL_MISC_CFG, 0x008bf0),
-};
-
-static const u32 ocelot_qs_regmap[] = {
- REG(QS_XTR_GRP_CFG, 0x000000),
- REG(QS_XTR_RD, 0x000008),
- REG(QS_XTR_FRM_PRUNING, 0x000010),
- REG(QS_XTR_FLUSH, 0x000018),
- REG(QS_XTR_DATA_PRESENT, 0x00001c),
- REG(QS_XTR_CFG, 0x000020),
- REG(QS_INJ_GRP_CFG, 0x000024),
- REG(QS_INJ_WR, 0x00002c),
- REG(QS_INJ_CTRL, 0x000034),
- REG(QS_INJ_STATUS, 0x00003c),
- REG(QS_INJ_ERR, 0x000040),
- REG(QS_INH_DBG, 0x000048),
-};
-
-static const u32 ocelot_qsys_regmap[] = {
- REG(QSYS_PORT_MODE, 0x011200),
- REG(QSYS_SWITCH_PORT_MODE, 0x011234),
- REG(QSYS_STAT_CNT_CFG, 0x011264),
- REG(QSYS_EEE_CFG, 0x011268),
- REG(QSYS_EEE_THRES, 0x011294),
- REG(QSYS_IGR_NO_SHARING, 0x011298),
- REG(QSYS_EGR_NO_SHARING, 0x01129c),
- REG(QSYS_SW_STATUS, 0x0112a0),
- REG(QSYS_EXT_CPU_CFG, 0x0112d0),
- REG(QSYS_PAD_CFG, 0x0112d4),
- REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
- REG(QSYS_QMAP, 0x0112dc),
- REG(QSYS_ISDX_SGRP, 0x011400),
- REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
- REG(QSYS_TFRM_MISC, 0x011310),
- REG(QSYS_TFRM_PORT_DLY, 0x011314),
- REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
- REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
- REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
- REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
- REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
- REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
- REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
- REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
- REG(QSYS_RED_PROFILE, 0x011338),
- REG(QSYS_RES_QOS_MODE, 0x011378),
- REG(QSYS_RES_CFG, 0x012000),
- REG(QSYS_RES_STAT, 0x012004),
- REG(QSYS_EGR_DROP_MODE, 0x01137c),
- REG(QSYS_EQ_CTRL, 0x011380),
- REG(QSYS_EVENTS_CORE, 0x011384),
- REG(QSYS_CIR_CFG, 0x000000),
- REG(QSYS_EIR_CFG, 0x000004),
- REG(QSYS_SE_CFG, 0x000008),
- REG(QSYS_SE_DWRR_CFG, 0x00000c),
- REG(QSYS_SE_CONNECT, 0x00003c),
- REG(QSYS_SE_DLB_SENSE, 0x000040),
- REG(QSYS_CIR_STATE, 0x000044),
- REG(QSYS_EIR_STATE, 0x000048),
- REG(QSYS_SE_STATE, 0x00004c),
- REG(QSYS_HSCH_MISC_CFG, 0x011388),
-};
-
-static const u32 ocelot_rew_regmap[] = {
- REG(REW_PORT_VLAN_CFG, 0x000000),
- REG(REW_TAG_CFG, 0x000004),
- REG(REW_PORT_CFG, 0x000008),
- REG(REW_DSCP_CFG, 0x00000c),
- REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
- REG(REW_PTP_CFG, 0x000050),
- REG(REW_PTP_DLY1_CFG, 0x000054),
- REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
- REG(REW_DSCP_REMAP_CFG, 0x000790),
- REG(REW_STAT_CFG, 0x000890),
- REG(REW_PPT, 0x000680),
-};
-
-static const u32 ocelot_sys_regmap[] = {
- REG(SYS_COUNT_RX_OCTETS, 0x000000),
- REG(SYS_COUNT_RX_UNICAST, 0x000004),
- REG(SYS_COUNT_RX_MULTICAST, 0x000008),
- REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
- REG(SYS_COUNT_RX_SHORTS, 0x000010),
- REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
- REG(SYS_COUNT_RX_JABBERS, 0x000018),
- REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
- REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
- REG(SYS_COUNT_RX_64, 0x000024),
- REG(SYS_COUNT_RX_65_127, 0x000028),
- REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_PAUSE, 0x00003c),
- REG(SYS_COUNT_RX_CONTROL, 0x000040),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
- REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
- REG(SYS_COUNT_TX_OCTETS, 0x000100),
- REG(SYS_COUNT_TX_UNICAST, 0x000104),
- REG(SYS_COUNT_TX_MULTICAST, 0x000108),
- REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
- REG(SYS_COUNT_TX_COLLISION, 0x000110),
- REG(SYS_COUNT_TX_DROPS, 0x000114),
- REG(SYS_COUNT_TX_PAUSE, 0x000118),
- REG(SYS_COUNT_TX_64, 0x00011c),
- REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000170),
- REG(SYS_RESET_CFG, 0x000508),
- REG(SYS_CMID, 0x00050c),
- REG(SYS_VLAN_ETYPE_CFG, 0x000510),
- REG(SYS_PORT_MODE, 0x000514),
- REG(SYS_FRONT_PORT_MODE, 0x000548),
- REG(SYS_FRM_AGING, 0x000574),
- REG(SYS_STAT_CFG, 0x000578),
- REG(SYS_SW_STATUS, 0x00057c),
- REG(SYS_MISC_CFG, 0x0005ac),
- REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
- REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
- REG(SYS_CM_ADDR, 0x000500),
- REG(SYS_CM_DATA, 0x000504),
- REG(SYS_PAUSE_CFG, 0x000608),
- REG(SYS_PAUSE_TOT_CFG, 0x000638),
- REG(SYS_ATOP, 0x00063c),
- REG(SYS_ATOP_TOT_CFG, 0x00066c),
- REG(SYS_MAC_FC_CFG, 0x000670),
- REG(SYS_MMGT, 0x00069c),
- REG(SYS_MMGT_FAST, 0x0006a0),
- REG(SYS_EVENTS_DIF, 0x0006a4),
- REG(SYS_EVENTS_CORE, 0x0006b4),
- REG(SYS_CNT, 0x000000),
- REG(SYS_PTP_STATUS, 0x0006b8),
- REG(SYS_PTP_TXSTAMP, 0x0006bc),
- REG(SYS_PTP_NXT, 0x0006c0),
- REG(SYS_PTP_CFG, 0x0006c4),
-};
-
-static const u32 ocelot_vcap_regmap[] = {
- /* VCAP_CORE_CFG */
- REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
- REG(VCAP_CORE_MV_CFG, 0x000004),
- /* VCAP_CORE_CACHE */
- REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
- REG(VCAP_CACHE_MASK_DAT, 0x000108),
- REG(VCAP_CACHE_ACTION_DAT, 0x000208),
- REG(VCAP_CACHE_CNT_DAT, 0x000308),
- REG(VCAP_CACHE_TG_DAT, 0x000388),
- /* VCAP_CONST */
- REG(VCAP_CONST_VCAP_VER, 0x000398),
- REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
- REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
- REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
- REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
- REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
- REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
- REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
- REG(VCAP_CONST_CORE_CNT, 0x0003b8),
- REG(VCAP_CONST_IF_CNT, 0x0003bc),
-};
-
-static const u32 ocelot_ptp_regmap[] = {
- REG(PTP_PIN_CFG, 0x000000),
- REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
- REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
- REG(PTP_PIN_TOD_NSEC, 0x00000c),
- REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
- REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
- REG(PTP_CFG_MISC, 0x0000a0),
- REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
- REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
-};
-
-static const u32 ocelot_dev_gmii_regmap[] = {
- REG(DEV_CLOCK_CFG, 0x0),
- REG(DEV_PORT_MISC, 0x4),
- REG(DEV_EVENTS, 0x8),
- REG(DEV_EEE_CFG, 0xc),
- REG(DEV_RX_PATH_DELAY, 0x10),
- REG(DEV_TX_PATH_DELAY, 0x14),
- REG(DEV_PTP_PREDICT_CFG, 0x18),
- REG(DEV_MAC_ENA_CFG, 0x1c),
- REG(DEV_MAC_MODE_CFG, 0x20),
- REG(DEV_MAC_MAXLEN_CFG, 0x24),
- REG(DEV_MAC_TAGS_CFG, 0x28),
- REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
- REG(DEV_MAC_IFG_CFG, 0x30),
- REG(DEV_MAC_HDX_CFG, 0x34),
- REG(DEV_MAC_DBG_CFG, 0x38),
- REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
- REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
- REG(DEV_MAC_STICKY, 0x44),
- REG(PCS1G_CFG, 0x48),
- REG(PCS1G_MODE_CFG, 0x4c),
- REG(PCS1G_SD_CFG, 0x50),
- REG(PCS1G_ANEG_CFG, 0x54),
- REG(PCS1G_ANEG_NP_CFG, 0x58),
- REG(PCS1G_LB_CFG, 0x5c),
- REG(PCS1G_DBG_CFG, 0x60),
- REG(PCS1G_CDET_CFG, 0x64),
- REG(PCS1G_ANEG_STATUS, 0x68),
- REG(PCS1G_ANEG_NP_STATUS, 0x6c),
- REG(PCS1G_LINK_STATUS, 0x70),
- REG(PCS1G_LINK_DOWN_CNT, 0x74),
- REG(PCS1G_STICKY, 0x78),
- REG(PCS1G_DEBUG_STATUS, 0x7c),
- REG(PCS1G_LPI_CFG, 0x80),
- REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
- REG(PCS1G_LPI_STATUS, 0x88),
- REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
- REG(PCS1G_TSTPAT_STATUS, 0x90),
- REG(DEV_PCS_FX100_CFG, 0x94),
- REG(DEV_PCS_FX100_STATUS, 0x98),
-};
+#define VSC7514_VCAP_POLICER_BASE 128
+#define VSC7514_VCAP_POLICER_MAX 191
static const u32 *ocelot_regmap[TARGET_MAX] = {
- [ANA] = ocelot_ana_regmap,
- [QS] = ocelot_qs_regmap,
- [QSYS] = ocelot_qsys_regmap,
- [REW] = ocelot_rew_regmap,
- [SYS] = ocelot_sys_regmap,
- [S0] = ocelot_vcap_regmap,
- [S1] = ocelot_vcap_regmap,
- [S2] = ocelot_vcap_regmap,
- [PTP] = ocelot_ptp_regmap,
- [DEV_GMII] = ocelot_dev_gmii_regmap,
+ [ANA] = vsc7514_ana_regmap,
+ [QS] = vsc7514_qs_regmap,
+ [QSYS] = vsc7514_qsys_regmap,
+ [REW] = vsc7514_rew_regmap,
+ [SYS] = vsc7514_sys_regmap,
+ [S0] = vsc7514_vcap_regmap,
+ [S1] = vsc7514_vcap_regmap,
+ [S2] = vsc7514_vcap_regmap,
+ [PTP] = vsc7514_ptp_regmap,
+ [DEV_GMII] = vsc7514_dev_gmii_regmap,
};
static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
@@ -633,211 +344,6 @@ static const struct ocelot_ops ocelot_ops = {
.netdev_to_port = ocelot_netdev_to_port,
};
-static const struct vcap_field vsc7514_vcap_es0_keys[] = {
- [VCAP_ES0_EGR_PORT] = { 0, 4},
- [VCAP_ES0_IGR_PORT] = { 4, 4},
- [VCAP_ES0_RSV] = { 8, 2},
- [VCAP_ES0_L2_MC] = { 10, 1},
- [VCAP_ES0_L2_BC] = { 11, 1},
- [VCAP_ES0_VID] = { 12, 12},
- [VCAP_ES0_DP] = { 24, 1},
- [VCAP_ES0_PCP] = { 25, 3},
-};
-
-static const struct vcap_field vsc7514_vcap_es0_actions[] = {
- [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
- [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
- [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
- [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
- [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
- [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
- [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
- [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
- [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
- [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
- [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
- [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
- [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
- [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
- [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
- [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
- [VCAP_ES0_ACT_RSV] = { 49, 24},
- [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is1_keys[] = {
- [VCAP_IS1_HK_TYPE] = { 0, 1},
- [VCAP_IS1_HK_LOOKUP] = { 1, 2},
- [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12},
- [VCAP_IS1_HK_RSV] = { 15, 9},
- [VCAP_IS1_HK_OAM_Y1731] = { 24, 1},
- [VCAP_IS1_HK_L2_MC] = { 25, 1},
- [VCAP_IS1_HK_L2_BC] = { 26, 1},
- [VCAP_IS1_HK_IP_MC] = { 27, 1},
- [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1},
- [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1},
- [VCAP_IS1_HK_TPID] = { 30, 1},
- [VCAP_IS1_HK_VID] = { 31, 12},
- [VCAP_IS1_HK_DEI] = { 43, 1},
- [VCAP_IS1_HK_PCP] = { 44, 3},
- /* Specific Fields for IS1 Half Key S1_NORMAL */
- [VCAP_IS1_HK_L2_SMAC] = { 47, 48},
- [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1},
- [VCAP_IS1_HK_ETYPE] = { 96, 16},
- [VCAP_IS1_HK_IP_SNAP] = {112, 1},
- [VCAP_IS1_HK_IP4] = {113, 1},
- /* Layer-3 Information */
- [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1},
- [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1},
- [VCAP_IS1_HK_L3_OPTIONS] = {116, 1},
- [VCAP_IS1_HK_L3_DSCP] = {117, 6},
- [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32},
- /* Layer-4 Information */
- [VCAP_IS1_HK_TCP_UDP] = {155, 1},
- [VCAP_IS1_HK_TCP] = {156, 1},
- [VCAP_IS1_HK_L4_SPORT] = {157, 16},
- [VCAP_IS1_HK_L4_RNG] = {173, 8},
- /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
- [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1},
- [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12},
- [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1},
- [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3},
- [VCAP_IS1_HK_IP4_IP4] = { 64, 1},
- [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1},
- [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1},
- [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1},
- [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6},
- [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32},
- [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32},
- [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8},
- [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1},
- [VCAP_IS1_HK_IP4_TCP] = {147, 1},
- [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8},
- [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32},
-};
-
-static const struct vcap_field vsc7514_vcap_is1_actions[] = {
- [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
- [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
- [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
- [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
- [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
- [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
- [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
- [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
- [VCAP_IS1_ACT_RSV] = { 29, 9},
- /* The fields below are incorrectly shifted by 2 in the manual */
- [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
- [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
- [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
- [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
- [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
- [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
- [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
- [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
- [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
- [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
- [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_keys[] = {
- /* Common: 46 bits */
- [VCAP_IS2_TYPE] = { 0, 4},
- [VCAP_IS2_HK_FIRST] = { 4, 1},
- [VCAP_IS2_HK_PAG] = { 5, 8},
- [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12},
- [VCAP_IS2_HK_RSV2] = { 25, 1},
- [VCAP_IS2_HK_HOST_MATCH] = { 26, 1},
- [VCAP_IS2_HK_L2_MC] = { 27, 1},
- [VCAP_IS2_HK_L2_BC] = { 28, 1},
- [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1},
- [VCAP_IS2_HK_VID] = { 30, 12},
- [VCAP_IS2_HK_DEI] = { 42, 1},
- [VCAP_IS2_HK_PCP] = { 43, 3},
- /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
- [VCAP_IS2_HK_L2_DMAC] = { 46, 48},
- [VCAP_IS2_HK_L2_SMAC] = { 94, 48},
- /* MAC_ETYPE (TYPE=000) */
- [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {142, 16},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {158, 16},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {174, 8},
- [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {182, 3},
- /* MAC_LLC (TYPE=001) */
- [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {142, 40},
- /* MAC_SNAP (TYPE=010) */
- [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {142, 40},
- /* MAC_ARP (TYPE=011) */
- [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48},
- [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1},
- [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1},
- [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1},
- [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1},
- [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1},
- [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1},
- [VCAP_IS2_HK_MAC_ARP_OPCODE] = {100, 2},
- [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {102, 32},
- [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {134, 32},
- [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {166, 1},
- /* IP4_TCP_UDP / IP4_OTHER common */
- [VCAP_IS2_HK_IP4] = { 46, 1},
- [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1},
- [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1},
- [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1},
- [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1},
- [VCAP_IS2_HK_L3_TOS] = { 51, 8},
- [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32},
- [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32},
- [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
- /* IP4_TCP_UDP (TYPE=100) */
- [VCAP_IS2_HK_TCP] = {124, 1},
- [VCAP_IS2_HK_L4_DPORT] = {125, 16},
- [VCAP_IS2_HK_L4_SPORT] = {141, 16},
- [VCAP_IS2_HK_L4_RNG] = {157, 8},
- [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
- [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
- [VCAP_IS2_HK_L4_FIN] = {167, 1},
- [VCAP_IS2_HK_L4_SYN] = {168, 1},
- [VCAP_IS2_HK_L4_RST] = {169, 1},
- [VCAP_IS2_HK_L4_PSH] = {170, 1},
- [VCAP_IS2_HK_L4_ACK] = {171, 1},
- [VCAP_IS2_HK_L4_URG] = {172, 1},
- [VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
- [VCAP_IS2_HK_L4_1588_VER] = {181, 4},
- /* IP4_OTHER (TYPE=101) */
- [VCAP_IS2_HK_IP4_L3_PROTO] = {124, 8},
- [VCAP_IS2_HK_L3_PAYLOAD] = {132, 56},
- /* IP6_STD (TYPE=110) */
- [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1},
- [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128},
- [VCAP_IS2_HK_IP6_L3_PROTO] = {175, 8},
- /* OAM (TYPE=111) */
- [VCAP_IS2_HK_OAM_MEL_FLAGS] = {142, 7},
- [VCAP_IS2_HK_OAM_VER] = {149, 5},
- [VCAP_IS2_HK_OAM_OPCODE] = {154, 8},
- [VCAP_IS2_HK_OAM_FLAGS] = {162, 8},
- [VCAP_IS2_HK_OAM_MEPID] = {170, 16},
- [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = {186, 1},
- [VCAP_IS2_HK_OAM_IS_Y1731] = {187, 1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_actions[] = {
- [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1},
- [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1},
- [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3},
- [VCAP_IS2_ACT_MASK_MODE] = { 5, 2},
- [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1},
- [VCAP_IS2_ACT_LRN_DIS] = { 8, 1},
- [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1},
- [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9},
- [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1},
- [VCAP_IS2_ACT_PORT_MASK] = { 20, 11},
- [VCAP_IS2_ACT_REW_OP] = { 31, 9},
- [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1},
- [VCAP_IS2_ACT_RSV] = { 41, 2},
- [VCAP_IS2_ACT_ACL_ID] = { 43, 6},
- [VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
-};
-
static struct vcap_props vsc7514_vcap_props[] = {
[VCAP_ES0] = {
.action_type_width = 0,
@@ -1045,6 +551,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ S1, "s1" },
{ S2, "s2" },
{ PTP, "ptp", 1 },
+ { FDMA, "fdma", 1 },
};
if (!np && !pdev->dev.platform_data)
@@ -1080,6 +587,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = target;
}
+ if (ocelot->targets[FDMA])
+ ocelot_fdma_init(pdev, ocelot);
+
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
@@ -1129,6 +639,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
+
+ ocelot->vcap_pol.base = VSC7514_VCAP_POLICER_BASE;
+ ocelot->vcap_pol.max = VSC7514_VCAP_POLICER_MAX;
+
ocelot->npi = -1;
err = ocelot_init(ocelot);
@@ -1139,6 +653,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_ocelot_devlink_unregister;
+ if (ocelot->fdma)
+ ocelot_fdma_start(ocelot);
+
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_ocelot_release_ports;
@@ -1179,6 +696,8 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ if (ocelot->fdma)
+ ocelot_fdma_deinit(ocelot);
devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
new file mode 100644
index 000000000000..c2af4eb8ca5d
--- /dev/null
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright (c) 2021 Innovative Advantage
+ */
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "ocelot.h"
+
+const u32 vsc7514_ana_regmap[] = {
+ REG(ANA_ADVLEARN, 0x009000),
+ REG(ANA_VLANMASK, 0x009004),
+ REG(ANA_PORT_B_DOMAIN, 0x009008),
+ REG(ANA_ANAGEFIL, 0x00900c),
+ REG(ANA_ANEVENTS, 0x009010),
+ REG(ANA_STORMLIMIT_BURST, 0x009014),
+ REG(ANA_STORMLIMIT_CFG, 0x009018),
+ REG(ANA_ISOLATED_PORTS, 0x009028),
+ REG(ANA_COMMUNITY_PORTS, 0x00902c),
+ REG(ANA_AUTOAGE, 0x009030),
+ REG(ANA_MACTOPTIONS, 0x009034),
+ REG(ANA_LEARNDISC, 0x009038),
+ REG(ANA_AGENCTRL, 0x00903c),
+ REG(ANA_MIRRORPORTS, 0x009040),
+ REG(ANA_EMIRRORPORTS, 0x009044),
+ REG(ANA_FLOODING, 0x009048),
+ REG(ANA_FLOODING_IPMC, 0x00904c),
+ REG(ANA_SFLOW_CFG, 0x009050),
+ REG(ANA_PORT_MODE, 0x009080),
+ REG(ANA_PGID_PGID, 0x008c00),
+ REG(ANA_TABLES_ANMOVED, 0x008b30),
+ REG(ANA_TABLES_MACHDATA, 0x008b34),
+ REG(ANA_TABLES_MACLDATA, 0x008b38),
+ REG(ANA_TABLES_MACACCESS, 0x008b3c),
+ REG(ANA_TABLES_MACTINDX, 0x008b40),
+ REG(ANA_TABLES_VLANACCESS, 0x008b44),
+ REG(ANA_TABLES_VLANTIDX, 0x008b48),
+ REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
+ REG(ANA_TABLES_ISDXTIDX, 0x008b50),
+ REG(ANA_TABLES_ENTRYLIM, 0x008b00),
+ REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
+ REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
+ REG(ANA_MSTI_STATE, 0x008e00),
+ REG(ANA_PORT_VLAN_CFG, 0x007000),
+ REG(ANA_PORT_DROP_CFG, 0x007004),
+ REG(ANA_PORT_QOS_CFG, 0x007008),
+ REG(ANA_PORT_VCAP_CFG, 0x00700c),
+ REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
+ REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
+ REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
+ REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
+ REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
+ REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
+ REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
+ REG(ANA_PORT_PORT_CFG, 0x007070),
+ REG(ANA_PORT_POL_CFG, 0x007074),
+ REG(ANA_PORT_PTP_CFG, 0x007078),
+ REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
+ REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
+ REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
+ REG(ANA_PFC_PFC_CFG, 0x008800),
+ REG(ANA_PFC_PFC_TIMER, 0x008804),
+ REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
+ REG(ANA_IPT_IPT, 0x008004),
+ REG(ANA_PPT_PPT, 0x008ac0),
+ REG(ANA_FID_MAP_FID_MAP, 0x000000),
+ REG(ANA_AGGR_CFG, 0x0090b4),
+ REG(ANA_CPUQ_CFG, 0x0090b8),
+ REG(ANA_CPUQ_CFG2, 0x0090bc),
+ REG(ANA_CPUQ_8021_CFG, 0x0090c0),
+ REG(ANA_DSCP_CFG, 0x009100),
+ REG(ANA_DSCP_REWR_CFG, 0x009200),
+ REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
+ REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
+ REG(ANA_VRAP_CFG, 0x009280),
+ REG(ANA_VRAP_HDR_DATA, 0x009284),
+ REG(ANA_VRAP_HDR_MASK, 0x009288),
+ REG(ANA_DISCARD_CFG, 0x00928c),
+ REG(ANA_FID_CFG, 0x009290),
+ REG(ANA_POL_PIR_CFG, 0x004000),
+ REG(ANA_POL_CIR_CFG, 0x004004),
+ REG(ANA_POL_MODE_CFG, 0x004008),
+ REG(ANA_POL_PIR_STATE, 0x00400c),
+ REG(ANA_POL_CIR_STATE, 0x004010),
+ REG(ANA_POL_STATE, 0x004014),
+ REG(ANA_POL_FLOWC, 0x008b80),
+ REG(ANA_POL_HYST, 0x008bec),
+ REG(ANA_POL_MISC_CFG, 0x008bf0),
+};
+EXPORT_SYMBOL(vsc7514_ana_regmap);
+
+const u32 vsc7514_qs_regmap[] = {
+ REG(QS_XTR_GRP_CFG, 0x000000),
+ REG(QS_XTR_RD, 0x000008),
+ REG(QS_XTR_FRM_PRUNING, 0x000010),
+ REG(QS_XTR_FLUSH, 0x000018),
+ REG(QS_XTR_DATA_PRESENT, 0x00001c),
+ REG(QS_XTR_CFG, 0x000020),
+ REG(QS_INJ_GRP_CFG, 0x000024),
+ REG(QS_INJ_WR, 0x00002c),
+ REG(QS_INJ_CTRL, 0x000034),
+ REG(QS_INJ_STATUS, 0x00003c),
+ REG(QS_INJ_ERR, 0x000040),
+ REG(QS_INH_DBG, 0x000048),
+};
+EXPORT_SYMBOL(vsc7514_qs_regmap);
+
+const u32 vsc7514_qsys_regmap[] = {
+ REG(QSYS_PORT_MODE, 0x011200),
+ REG(QSYS_SWITCH_PORT_MODE, 0x011234),
+ REG(QSYS_STAT_CNT_CFG, 0x011264),
+ REG(QSYS_EEE_CFG, 0x011268),
+ REG(QSYS_EEE_THRES, 0x011294),
+ REG(QSYS_IGR_NO_SHARING, 0x011298),
+ REG(QSYS_EGR_NO_SHARING, 0x01129c),
+ REG(QSYS_SW_STATUS, 0x0112a0),
+ REG(QSYS_EXT_CPU_CFG, 0x0112d0),
+ REG(QSYS_PAD_CFG, 0x0112d4),
+ REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
+ REG(QSYS_QMAP, 0x0112dc),
+ REG(QSYS_ISDX_SGRP, 0x011400),
+ REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
+ REG(QSYS_TFRM_MISC, 0x011310),
+ REG(QSYS_TFRM_PORT_DLY, 0x011314),
+ REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
+ REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
+ REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
+ REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
+ REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
+ REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
+ REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
+ REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
+ REG(QSYS_RED_PROFILE, 0x011338),
+ REG(QSYS_RES_QOS_MODE, 0x011378),
+ REG(QSYS_RES_CFG, 0x012000),
+ REG(QSYS_RES_STAT, 0x012004),
+ REG(QSYS_EGR_DROP_MODE, 0x01137c),
+ REG(QSYS_EQ_CTRL, 0x011380),
+ REG(QSYS_EVENTS_CORE, 0x011384),
+ REG(QSYS_CIR_CFG, 0x000000),
+ REG(QSYS_EIR_CFG, 0x000004),
+ REG(QSYS_SE_CFG, 0x000008),
+ REG(QSYS_SE_DWRR_CFG, 0x00000c),
+ REG(QSYS_SE_CONNECT, 0x00003c),
+ REG(QSYS_SE_DLB_SENSE, 0x000040),
+ REG(QSYS_CIR_STATE, 0x000044),
+ REG(QSYS_EIR_STATE, 0x000048),
+ REG(QSYS_SE_STATE, 0x00004c),
+ REG(QSYS_HSCH_MISC_CFG, 0x011388),
+};
+EXPORT_SYMBOL(vsc7514_qsys_regmap);
+
+const u32 vsc7514_rew_regmap[] = {
+ REG(REW_PORT_VLAN_CFG, 0x000000),
+ REG(REW_TAG_CFG, 0x000004),
+ REG(REW_PORT_CFG, 0x000008),
+ REG(REW_DSCP_CFG, 0x00000c),
+ REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
+ REG(REW_PTP_CFG, 0x000050),
+ REG(REW_PTP_DLY1_CFG, 0x000054),
+ REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
+ REG(REW_DSCP_REMAP_CFG, 0x000790),
+ REG(REW_STAT_CFG, 0x000890),
+ REG(REW_PPT, 0x000680),
+};
+EXPORT_SYMBOL(vsc7514_rew_regmap);
+
+const u32 vsc7514_sys_regmap[] = {
+ REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
+ REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
+ REG(SYS_COUNT_RX_SHORTS, 0x000010),
+ REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
+ REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
+ REG(SYS_COUNT_RX_64, 0x000024),
+ REG(SYS_COUNT_RX_65_127, 0x000028),
+ REG(SYS_COUNT_RX_128_255, 0x00002c),
+ REG(SYS_COUNT_RX_256_1023, 0x000030),
+ REG(SYS_COUNT_RX_1024_1526, 0x000034),
+ REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_PAUSE, 0x00003c),
+ REG(SYS_COUNT_RX_CONTROL, 0x000040),
+ REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
+ REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
+ REG(SYS_COUNT_TX_COLLISION, 0x000110),
+ REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
+ REG(SYS_COUNT_TX_64, 0x00011c),
+ REG(SYS_COUNT_TX_65_127, 0x000120),
+ REG(SYS_COUNT_TX_128_511, 0x000124),
+ REG(SYS_COUNT_TX_512_1023, 0x000128),
+ REG(SYS_COUNT_TX_1024_1526, 0x00012c),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000130),
+ REG(SYS_COUNT_TX_AGING, 0x000170),
+ REG(SYS_RESET_CFG, 0x000508),
+ REG(SYS_CMID, 0x00050c),
+ REG(SYS_VLAN_ETYPE_CFG, 0x000510),
+ REG(SYS_PORT_MODE, 0x000514),
+ REG(SYS_FRONT_PORT_MODE, 0x000548),
+ REG(SYS_FRM_AGING, 0x000574),
+ REG(SYS_STAT_CFG, 0x000578),
+ REG(SYS_SW_STATUS, 0x00057c),
+ REG(SYS_MISC_CFG, 0x0005ac),
+ REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
+ REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
+ REG(SYS_CM_ADDR, 0x000500),
+ REG(SYS_CM_DATA, 0x000504),
+ REG(SYS_PAUSE_CFG, 0x000608),
+ REG(SYS_PAUSE_TOT_CFG, 0x000638),
+ REG(SYS_ATOP, 0x00063c),
+ REG(SYS_ATOP_TOT_CFG, 0x00066c),
+ REG(SYS_MAC_FC_CFG, 0x000670),
+ REG(SYS_MMGT, 0x00069c),
+ REG(SYS_MMGT_FAST, 0x0006a0),
+ REG(SYS_EVENTS_DIF, 0x0006a4),
+ REG(SYS_EVENTS_CORE, 0x0006b4),
+ REG(SYS_CNT, 0x000000),
+ REG(SYS_PTP_STATUS, 0x0006b8),
+ REG(SYS_PTP_TXSTAMP, 0x0006bc),
+ REG(SYS_PTP_NXT, 0x0006c0),
+ REG(SYS_PTP_CFG, 0x0006c4),
+};
+EXPORT_SYMBOL(vsc7514_sys_regmap);
+
+const u32 vsc7514_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
+};
+EXPORT_SYMBOL(vsc7514_vcap_regmap);
+
+const u32 vsc7514_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+EXPORT_SYMBOL(vsc7514_ptp_regmap);
+
+const u32 vsc7514_dev_gmii_regmap[] = {
+ REG(DEV_CLOCK_CFG, 0x0),
+ REG(DEV_PORT_MISC, 0x4),
+ REG(DEV_EVENTS, 0x8),
+ REG(DEV_EEE_CFG, 0xc),
+ REG(DEV_RX_PATH_DELAY, 0x10),
+ REG(DEV_TX_PATH_DELAY, 0x14),
+ REG(DEV_PTP_PREDICT_CFG, 0x18),
+ REG(DEV_MAC_ENA_CFG, 0x1c),
+ REG(DEV_MAC_MODE_CFG, 0x20),
+ REG(DEV_MAC_MAXLEN_CFG, 0x24),
+ REG(DEV_MAC_TAGS_CFG, 0x28),
+ REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
+ REG(DEV_MAC_IFG_CFG, 0x30),
+ REG(DEV_MAC_HDX_CFG, 0x34),
+ REG(DEV_MAC_DBG_CFG, 0x38),
+ REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
+ REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
+ REG(DEV_MAC_STICKY, 0x44),
+ REG(PCS1G_CFG, 0x48),
+ REG(PCS1G_MODE_CFG, 0x4c),
+ REG(PCS1G_SD_CFG, 0x50),
+ REG(PCS1G_ANEG_CFG, 0x54),
+ REG(PCS1G_ANEG_NP_CFG, 0x58),
+ REG(PCS1G_LB_CFG, 0x5c),
+ REG(PCS1G_DBG_CFG, 0x60),
+ REG(PCS1G_CDET_CFG, 0x64),
+ REG(PCS1G_ANEG_STATUS, 0x68),
+ REG(PCS1G_ANEG_NP_STATUS, 0x6c),
+ REG(PCS1G_LINK_STATUS, 0x70),
+ REG(PCS1G_LINK_DOWN_CNT, 0x74),
+ REG(PCS1G_STICKY, 0x78),
+ REG(PCS1G_DEBUG_STATUS, 0x7c),
+ REG(PCS1G_LPI_CFG, 0x80),
+ REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
+ REG(PCS1G_LPI_STATUS, 0x88),
+ REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
+ REG(PCS1G_TSTPAT_STATUS, 0x90),
+ REG(DEV_PCS_FX100_CFG, 0x94),
+ REG(DEV_PCS_FX100_STATUS, 0x98),
+};
+EXPORT_SYMBOL(vsc7514_dev_gmii_regmap);
+
+const struct vcap_field vsc7514_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4 },
+ [VCAP_ES0_IGR_PORT] = { 4, 4 },
+ [VCAP_ES0_RSV] = { 8, 2 },
+ [VCAP_ES0_L2_MC] = { 10, 1 },
+ [VCAP_ES0_L2_BC] = { 11, 1 },
+ [VCAP_ES0_VID] = { 12, 12 },
+ [VCAP_ES0_DP] = { 24, 1 },
+ [VCAP_ES0_PCP] = { 25, 3 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_es0_keys);
+
+const struct vcap_field vsc7514_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 },
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 },
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 },
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 },
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 },
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 },
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 },
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 },
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 },
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 },
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 },
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 },
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 },
+ [VCAP_ES0_ACT_RSV] = { 49, 24 },
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_es0_actions);
+
+const struct vcap_field vsc7514_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1 },
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2 },
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
+ [VCAP_IS1_HK_RSV] = { 15, 9 },
+ [VCAP_IS1_HK_OAM_Y1731] = { 24, 1 },
+ [VCAP_IS1_HK_L2_MC] = { 25, 1 },
+ [VCAP_IS1_HK_L2_BC] = { 26, 1 },
+ [VCAP_IS1_HK_IP_MC] = { 27, 1 },
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 },
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 },
+ [VCAP_IS1_HK_TPID] = { 30, 1 },
+ [VCAP_IS1_HK_VID] = { 31, 12 },
+ [VCAP_IS1_HK_DEI] = { 43, 1 },
+ [VCAP_IS1_HK_PCP] = { 44, 3 },
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 47, 48 },
+ [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 },
+ [VCAP_IS1_HK_ETYPE] = { 96, 16 },
+ [VCAP_IS1_HK_IP_SNAP] = { 112, 1 },
+ [VCAP_IS1_HK_IP4] = { 113, 1 },
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 },
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 },
+ [VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 },
+ [VCAP_IS1_HK_L3_DSCP] = { 117, 6 },
+ [VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 },
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = { 155, 1 },
+ [VCAP_IS1_HK_TCP] = { 156, 1 },
+ [VCAP_IS1_HK_L4_SPORT] = { 157, 16 },
+ [VCAP_IS1_HK_L4_RNG] = { 173, 8 },
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 },
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 },
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 },
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 },
+ [VCAP_IS1_HK_IP4_IP4] = { 64, 1 },
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 },
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 },
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 },
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 },
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 },
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 },
+ [VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 },
+ [VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 },
+ [VCAP_IS1_HK_IP4_TCP] = { 147, 1 },
+ [VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is1_keys);
+
+const struct vcap_field vsc7514_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3 },
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1 },
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1 },
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 },
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8 },
+ [VCAP_IS1_ACT_RSV] = { 29, 9 },
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 },
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 },
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2 },
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13 },
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 },
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3 },
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1 },
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 },
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 },
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is1_actions);
+
+const struct vcap_field vsc7514_vcap_is2_keys[] = {
+ /* Common: 46 bits */
+ [VCAP_IS2_TYPE] = { 0, 4 },
+ [VCAP_IS2_HK_FIRST] = { 4, 1 },
+ [VCAP_IS2_HK_PAG] = { 5, 8 },
+ [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 },
+ [VCAP_IS2_HK_RSV2] = { 25, 1 },
+ [VCAP_IS2_HK_HOST_MATCH] = { 26, 1 },
+ [VCAP_IS2_HK_L2_MC] = { 27, 1 },
+ [VCAP_IS2_HK_L2_BC] = { 28, 1 },
+ [VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 },
+ [VCAP_IS2_HK_VID] = { 30, 12 },
+ [VCAP_IS2_HK_DEI] = { 42, 1 },
+ [VCAP_IS2_HK_PCP] = { 43, 3 },
+ /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+ [VCAP_IS2_HK_L2_DMAC] = { 46, 48 },
+ [VCAP_IS2_HK_L2_SMAC] = { 94, 48 },
+ /* MAC_ETYPE (TYPE=000) */
+ [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 },
+ [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 },
+ /* MAC_LLC (TYPE=001) */
+ [VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 },
+ /* MAC_SNAP (TYPE=010) */
+ [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 },
+ /* MAC_ARP (TYPE=011) */
+ [VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 },
+ [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 },
+ [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 },
+ [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 },
+ [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 },
+ [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 },
+ [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 },
+ [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 },
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 },
+ [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 },
+ [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 },
+ /* IP4_TCP_UDP / IP4_OTHER common */
+ [VCAP_IS2_HK_IP4] = { 46, 1 },
+ [VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 },
+ [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 },
+ [VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 },
+ [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 },
+ [VCAP_IS2_HK_L3_TOS] = { 51, 8 },
+ [VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 },
+ [VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 },
+ [VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 },
+ /* IP4_TCP_UDP (TYPE=100) */
+ [VCAP_IS2_HK_TCP] = { 124, 1 },
+ [VCAP_IS2_HK_L4_DPORT] = { 125, 16 },
+ [VCAP_IS2_HK_L4_SPORT] = { 141, 16 },
+ [VCAP_IS2_HK_L4_RNG] = { 157, 8 },
+ [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 },
+ [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 },
+ [VCAP_IS2_HK_L4_FIN] = { 167, 1 },
+ [VCAP_IS2_HK_L4_SYN] = { 168, 1 },
+ [VCAP_IS2_HK_L4_RST] = { 169, 1 },
+ [VCAP_IS2_HK_L4_PSH] = { 170, 1 },
+ [VCAP_IS2_HK_L4_ACK] = { 171, 1 },
+ [VCAP_IS2_HK_L4_URG] = { 172, 1 },
+ [VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 },
+ [VCAP_IS2_HK_L4_1588_VER] = { 181, 4 },
+ /* IP4_OTHER (TYPE=101) */
+ [VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 },
+ [VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 },
+ /* IP6_STD (TYPE=110) */
+ [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 },
+ [VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 },
+ [VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 },
+ /* OAM (TYPE=111) */
+ [VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 },
+ [VCAP_IS2_HK_OAM_VER] = { 149, 5 },
+ [VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 },
+ [VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 },
+ [VCAP_IS2_HK_OAM_MEPID] = { 170, 16 },
+ [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
+ [VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is2_keys);
+
+const struct vcap_field vsc7514_vcap_is2_actions[] = {
+ [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
+ [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
+ [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
+ [VCAP_IS2_ACT_MASK_MODE] = { 5, 2 },
+ [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 },
+ [VCAP_IS2_ACT_LRN_DIS] = { 8, 1 },
+ [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 },
+ [VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 },
+ [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 },
+ [VCAP_IS2_ACT_PORT_MASK] = { 20, 11 },
+ [VCAP_IS2_ACT_REW_OP] = { 31, 9 },
+ [VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 },
+ [VCAP_IS2_ACT_RSV] = { 41, 2 },
+ [VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
+ [VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
+};
+EXPORT_SYMBOL(vsc7514_vcap_is2_actions);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5736fcdafd7a..50ac3ee2577a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1704,7 +1704,9 @@ myri10ge_set_pauseparam(struct net_device *netdev,
static void
myri10ge_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
@@ -3740,7 +3742,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
int status = -ENXIO;
- int dac_enabled;
unsigned hdr_offset, ss_offset;
static int board_number;
@@ -3780,15 +3781,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
myri10ge_mask_surprise_down(pdev);
pci_set_master(pdev);
- dac_enabled = 1;
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (status != 0) {
- dac_enabled = 0;
- dev_err(&pdev->dev,
- "64-bit pci address mask was refused, trying 32-bit\n");
- status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- }
- if (status != 0) {
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled;
}
@@ -3872,10 +3866,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features;
-
- if (dac_enabled)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= mgp->features;
if (mgp->fw_ver_tiny < 37)
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index d74a80f010c5..3f371faeb6d0 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -114,6 +114,7 @@ static int sonic_probe1(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
int err = -ENODEV;
int i;
+ unsigned char addr[ETH_ALEN];
if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
return -EBUSY;
@@ -143,9 +144,10 @@ static int sonic_probe1(struct net_device *dev)
SONIC_WRITE(SONIC_CEP,0);
for (i=0; i<3; i++) {
val = SONIC_READ(SONIC_CAP0-i);
- dev->dev_addr[i*2] = val;
- dev->dev_addr[i*2+1] = val >> 8;
+ addr[i*2] = val;
+ addr[i*2+1] = val >> 8;
}
+ eth_hw_addr_set(dev, addr);
lp->dma_bitmode = SONIC_BITMODE32;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 8709d700e15a..b16f7c830f9b 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -203,6 +203,7 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
const int prom_addr = ONBOARD_SONIC_PROM_BASE;
unsigned short val;
+ u8 addr[ETH_ALEN];
/*
* On NuBus boards we can sometimes look in the ROM resources.
@@ -213,7 +214,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
int i;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = SONIC_READ_PROM(i);
+ addr[i] = SONIC_READ_PROM(i);
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -222,7 +224,8 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
* source has a rather long and detailed historical account of
* why this is so.
*/
- bit_reverse_addr(dev->dev_addr);
+ bit_reverse_addr(addr);
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -243,14 +246,15 @@ static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
SONIC_WRITE(SONIC_CEP, 15);
val = SONIC_READ(SONIC_CAP2);
- dev->dev_addr[5] = val >> 8;
- dev->dev_addr[4] = val & 0xff;
+ addr[5] = val >> 8;
+ addr[4] = val & 0xff;
val = SONIC_READ(SONIC_CAP1);
- dev->dev_addr[3] = val >> 8;
- dev->dev_addr[2] = val & 0xff;
+ addr[3] = val >> 8;
+ addr[2] = val & 0xff;
val = SONIC_READ(SONIC_CAP0);
- dev->dev_addr[1] = val >> 8;
- dev->dev_addr[0] = val & 0xff;
+ addr[1] = val >> 8;
+ addr[0] = val & 0xff;
+ eth_hw_addr_set(dev, addr);
if (!INVALID_MAC(dev->dev_addr))
return;
@@ -355,13 +359,16 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
static int mac_sonic_nubus_ethernet_addr(struct net_device *dev,
unsigned long prom_addr, int id)
{
+ u8 addr[ETH_ALEN];
int i;
+
for(i = 0; i < 6; i++)
- dev->dev_addr[i] = SONIC_READ_PROM(i);
+ addr[i] = SONIC_READ_PROM(i);
/* Some of the addresses are bit-reversed */
if (id != MACSONIC_DAYNA)
- bit_reverse_addr(dev->dev_addr);
+ bit_reverse_addr(addr);
+ eth_hw_addr_set(dev, addr);
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 0a02d8bd0a3e..52fef34d43f9 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -127,6 +127,7 @@ static int sonic_probe1(struct net_device *dev)
unsigned int base_addr = dev->base_addr;
int i;
int err = 0;
+ unsigned char addr[ETH_ALEN];
if (!request_mem_region(base_addr, 0x100, xtsonic_string))
return -EBUSY;
@@ -163,9 +164,10 @@ static int sonic_probe1(struct net_device *dev)
for (i=0; i<3; i++) {
unsigned int val = SONIC_READ(SONIC_CAP0-i);
- dev->dev_addr[i*2] = val;
- dev->dev_addr[i*2+1] = val >> 8;
+ addr[i*2] = val;
+ addr[i*2+1] = val >> 8;
}
+ eth_hw_addr_set(dev, addr);
lp->dma_bitmode = SONIC_BITMODE32;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index d1c32c65db05..6dd451adc331 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5461,8 +5461,11 @@ static int s2io_ethtool_set_led(struct net_device *dev,
return 0;
}
-static void s2io_ethtool_gringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+s2io_ethtool_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct s2io_nic *sp = netdev_priv(dev);
int i, tx_desc_count = 0, rx_desc_count = 0;
@@ -7655,7 +7658,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
struct s2io_nic *sp;
struct net_device *dev;
int i, j, ret;
- int dma_flag = false;
u32 mac_up, mac_down;
u64 val64 = 0, tmp64 = 0;
struct XENA_dev_config __iomem *bar0 = NULL;
@@ -7677,17 +7679,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
return ret;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
- dma_flag = true;
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- DBG_PRINT(ERR_DBG,
- "Unable to obtain 64bit DMA for coherent allocations\n");
- pci_disable_device(pdev);
- return -ENOMEM;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
} else {
pci_disable_device(pdev);
return -ENOMEM;
@@ -7717,7 +7710,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
sp = netdev_priv(dev);
sp->dev = dev;
sp->pdev = pdev;
- sp->high_dma_flag = dma_flag;
sp->device_enabled_once = false;
if (rx_ring_mode == 1)
sp->rxd_mode = RXD_MODE_1;
@@ -7865,9 +7857,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
dev->features |= dev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- if (sp->high_dma_flag == true)
- dev->features |= NETIF_F_HIGHDMA;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HIGHDMA;
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
INIT_WORK(&sp->set_link_task, s2io_set_link);
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index a4266d1544ab..cb7080eb5912 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -873,7 +873,6 @@ struct s2io_nic {
struct mac_addr def_mac_addr[256];
struct net_device_stats stats;
- int high_dma_flag;
int device_enabled_once;
char name[60];
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1969009a91e7..aa7c093f1f91 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3159,10 +3159,6 @@ static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
if (copy_from_user(&config, data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* Transmit HW Timestamp not supported */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
@@ -3353,7 +3349,7 @@ static const struct net_device_ops vxge_netdev_ops = {
};
static int vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config, int high_dma,
+ struct vxge_config *config,
int no_of_vpath, struct vxgedev **vdev_out)
{
struct net_device *ndev;
@@ -3425,11 +3421,7 @@ static int vxge_device_register(struct __vxge_hw_device *hldev,
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s : checksumming enabled", __func__);
- if (high_dma) {
- ndev->features |= NETIF_F_HIGHDMA;
- vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
- "%s : using High DMA", __func__);
- }
+ ndev->features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - 9600 */
ndev->min_mtu = VXGE_HW_MIN_MTU;
@@ -4286,7 +4278,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
struct __vxge_hw_device *hldev;
enum vxge_hw_status status;
int ret;
- int high_dma = 0;
u64 vpath_mask = 0;
struct vxgedev *vdev;
struct vxge_config *ll_config = NULL;
@@ -4376,22 +4367,9 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit0;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
vxge_debug_ll_config(VXGE_TRACE,
"%s : using 64bit DMA", __func__);
-
- high_dma = 1;
-
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- vxge_debug_init(VXGE_ERR,
- "%s : unable to obtain 64bit DMA for "
- "consistent allocations", __func__);
- ret = -ENOMEM;
- goto _exit1;
- }
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- vxge_debug_ll_config(VXGE_TRACE,
- "%s : using 32bit DMA", __func__);
} else {
ret = -ENOMEM;
goto _exit1;
@@ -4559,8 +4537,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
- ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
- &vdev);
+ ret = vxge_device_register(hldev, ll_config, no_of_vpath, &vdev);
if (ret) {
ret = -EINVAL;
goto _exit4;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 2af9faee96c5..f448c5682594 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -43,15 +43,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
struct circ_buf *ring;
ring = &priv->stats_ids.free_list;
- /* Check if buffer is full. */
- if (!CIRC_SPACE(ring->head, ring->tail,
- priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
- NFP_FL_STATS_ELEM_RS + 1))
+ /* Check if buffer is full, stats_ring_size must be power of 2 */
+ if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size))
return -ENOBUFS;
- memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
- ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
- (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
+ /* Each increment of head represents size of NFP_FL_STATS_ELEM_RS */
+ memcpy(&ring->buf[ring->head * NFP_FL_STATS_ELEM_RS],
+ &stats_context_id, NFP_FL_STATS_ELEM_RS);
+ ring->head = (ring->head + 1) & (priv->stats_ring_size - 1);
return 0;
}
@@ -86,11 +85,14 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
return -ENOENT;
}
- memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
+ /* Each increment of tail represents size of NFP_FL_STATS_ELEM_RS */
+ memcpy(&temp_stats_id, &ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS],
+ NFP_FL_STATS_ELEM_RS);
*stats_context_id = temp_stats_id;
- memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
- ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
- (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
+ memcpy(&ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS], &freed_stats_id,
+ NFP_FL_STATS_ELEM_RS);
+ /* stats_ring_size must be power of 2 */
+ ring->tail = (ring->tail + 1) & (priv->stats_ring_size - 1);
return 0;
}
@@ -138,13 +140,18 @@ static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
struct circ_buf *ring;
ring = &priv->mask_ids.mask_id_free_list;
- /* Checking if buffer is full. */
+ /* Checking if buffer is full,
+ * NFP_FLOWER_MASK_ENTRY_RS must be power of 2
+ */
if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
return -ENOBUFS;
- memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
- ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
- (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+ /* Each increment of head represents size of
+ * NFP_FLOWER_MASK_ELEMENT_RS
+ */
+ memcpy(&ring->buf[ring->head * NFP_FLOWER_MASK_ELEMENT_RS], &mask_id,
+ NFP_FLOWER_MASK_ELEMENT_RS);
+ ring->head = (ring->head + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
priv->mask_ids.last_used[mask_id] = ktime_get();
@@ -171,7 +178,11 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
if (ring->head == ring->tail)
goto err_not_found;
- memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
+ /* Each increment of tail represents size of
+ * NFP_FLOWER_MASK_ELEMENT_RS
+ */
+ memcpy(&temp_id, &ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS],
+ NFP_FLOWER_MASK_ELEMENT_RS);
*mask_id = temp_id;
reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
@@ -180,9 +191,10 @@ static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
if (ktime_before(ktime_get(), reuse_timeout))
goto err_not_found;
- memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
- ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
- (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
+ memcpy(&ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS], &freed_id,
+ NFP_FLOWER_MASK_ELEMENT_RS);
+ /* NFP_FLOWER_MASK_ENTRY_RS must be power of 2 */
+ ring->tail = (ring->tail + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
return 0;
@@ -338,11 +350,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
- if (nfp_release_stats_entry(app, stats_cxt)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
- err = -EINVAL;
- goto err_remove_rhash;
- }
err = -ENOENT;
goto err_remove_rhash;
}
@@ -359,21 +366,6 @@ int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
if (check_entry) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
- if (nfp_release_stats_entry(app, stats_cxt)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
- err = -EINVAL;
- goto err_remove_mask;
- }
-
- if (!nfp_flow->pre_tun_rule.dev &&
- !nfp_check_mask_remove(app, nfp_flow->mask_data,
- nfp_flow->meta.mask_len,
- NULL, &new_mask_id)) {
- NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
- err = -EINVAL;
- goto err_remove_mask;
- }
-
err = -EEXIST;
goto err_remove_mask;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 224089d04d98..f97eff5afd12 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1867,6 +1867,9 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
+ if (!netdev)
+ return -EOPNOTSUPP;
+
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 850bfdf83d0a..79257ec41987 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1944,7 +1944,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
xdp_prog, act);
continue;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dp->netdev, xdp_prog, act);
@@ -4097,7 +4097,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
- netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+ netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index cf7882933993..e0c27471bcdb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -381,7 +381,9 @@ err_bad_set:
}
static void nfp_net_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -406,7 +408,9 @@ static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
}
static int nfp_net_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nfp_net *nn = netdev_priv(netdev);
u32 rxd_cnt, txd_cnt;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 369f6ae700c7..181ac8e789a3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -286,8 +286,8 @@ nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
if (repr->dst->u.port_info.lower_dev != lower)
return;
- netdev->gso_max_size = lower->gso_max_size;
- netdev->gso_max_segs = lower->gso_max_segs;
+ netif_set_gso_max_size(netdev, lower->gso_max_size);
+ netif_set_gso_max_segs(netdev, lower->gso_max_segs);
netdev_update_features(netdev);
}
@@ -381,7 +381,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+ netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 9b530d7509a4..660013f716d4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4651,7 +4651,10 @@ static int nv_nway_reset(struct net_device *dev)
return ret;
}
-static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static void nv_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
@@ -4662,7 +4665,10 @@ static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* r
ring->tx_pending = np->tx_ring_size;
}
-static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+static int nv_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 660b07cb5b92..84cc79e928c8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -270,9 +270,13 @@ static int pch_gbe_nway_reset(struct net_device *netdev)
* pch_gbe_get_ringparam - Report ring sizes
* @netdev: Network interface device structure
* @ring: Ring param structure
+ * @kernel_ring: Ring external param structure
+ * @extack: netlink handle
*/
static void pch_gbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
@@ -288,12 +292,16 @@ static void pch_gbe_get_ringparam(struct net_device *netdev,
* pch_gbe_set_ringparam - Set ring sizes
* @netdev: Network interface device structure
* @ring: Ring param structure
+ * @kernel_ring: Ring external param structure
+ * @extack: netlink handle
* Returns
* 0: Successful.
* Negative value: Failed.
*/
static int pch_gbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *txdr, *tx_old;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 71d234291fc5..1dc40c537281 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -210,9 +210,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Get ieee1588's dev information */
pdev = adapter->ptp_pdev;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index e1a304886a3c..4c7e0c991105 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -69,7 +69,9 @@ pasemi_mac_ethtool_set_msglevel(struct net_device *netdev,
static void
pasemi_mac_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pasemi_mac *mac = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index c54d735b9e2e..386a5cf1e224 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -512,7 +512,9 @@ static int ionic_set_coalesce(struct net_device *netdev,
}
static void ionic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -523,7 +525,9 @@ static void ionic_get_ringparam(struct net_device *netdev,
}
static int ionic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_queue_params qparam;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 63f8a8163b5f..2ff7be17e5af 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3135,7 +3135,7 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL;
}
- lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
+ lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
if (!lif->dbid_inuse) {
dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index a075643f5826..3c4a84ea6321 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -392,7 +392,9 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
static void
netxen_nic_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netxen_adapter *adapter = netdev_priv(dev);
@@ -430,7 +432,9 @@ netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
static int
netxen_nic_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct netxen_adapter *adapter = netdev_priv(dev);
u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 452494f8c298..65e20693c549 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1036,12 +1036,12 @@ static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- kfree(p_mngr->acquired[type].cid_map);
+ bitmap_free(p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
- kfree(p_mngr->acquired_vf[type][vf].cid_map);
+ bitmap_free(p_mngr->acquired_vf[type][vf].cid_map);
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@@ -1054,15 +1054,10 @@ qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
u32 cid_start,
u32 cid_count, struct qed_cid_acquired_map *p_map)
{
- u32 size;
-
if (!cid_count)
return 0;
- size = DIV_ROUND_UP(cid_count,
- sizeof(unsigned long) * BITS_PER_BYTE) *
- sizeof(unsigned long);
- p_map->cid_map = kzalloc(size, GFP_KERNEL);
+ p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL);
if (!p_map->cid_map)
return -ENOMEM;
@@ -1216,7 +1211,6 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
struct qed_cid_acquired_map *p_map;
struct qed_conn_type_cfg *p_cfg;
int type;
- u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
@@ -1225,11 +1219,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
p_cfg = &p_mngr->conn_cfg[type];
if (p_cfg->cid_count) {
p_map = &p_mngr->acquired[type];
- len = DIV_ROUND_UP(p_map->max_count,
- sizeof(unsigned long) *
- BITS_PER_BYTE) *
- sizeof(unsigned long);
- memset(p_map->cid_map, 0, len);
+ bitmap_zero(p_map->cid_map, p_map->max_count);
}
if (!p_cfg->cids_per_vf)
@@ -1237,11 +1227,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
for (vf = 0; vf < MAX_NUM_VFS; vf++) {
p_map = &p_mngr->acquired_vf[type][vf];
- len = DIV_ROUND_UP(p_map->max_count,
- sizeof(unsigned long) *
- BITS_PER_BYTE) *
- sizeof(unsigned long);
- memset(p_map->cid_map, 0, len);
+ bitmap_zero(p_map->cid_map, p_map->max_count);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index f2cedbd9489c..ed1a84542ad2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -2721,6 +2721,25 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
+ * qed_get_protocol_type_str(): Get a string for Protocol type.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ *
+ * Return: String.
+ */
+const char *qed_get_protocol_type_str(u32 protocol_type);
+
+/**
+ * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id).
+ *
+ * Return: String.
+ */
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id);
+
+/**
* qed_set_rdma_error_level(): Sets the RDMA assert level.
* If the severity of the error will be
* above the level, the FW will assert.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 321c43408153..0ce37f2460a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -210,6 +210,82 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
XSTORM_PQ_INFO_OFFSET(pq_id))
+static const char * const s_protocol_types[] = {
+ "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
+ "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
+ "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
+ "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
+};
+
+static const char *s_ramrod_cmd_ids[][28] = {
+ {
+ "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
+ "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
+ "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
+ "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
+ "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
+ "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
+ { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
+ "FCOE_RAMROD_CMD_ID_STAT_FUNC",
+ "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
+ "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
+ "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
+ "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
+ "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
+ "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
+ { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
+ "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
+ "CORE_RAMROD_TX_QUEUE_STOP",
+ "CORE_RAMROD_RX_QUEUE_FLUSH",
+ "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
+ { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
+ "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
+ "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
+ "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
+ "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
+ "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
+ "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_ADD_UDP_FILTER",
+ "ETH_RAMROD_RX_DELETE_UDP_FILTER",
+ "ETH_RAMROD_RX_CREATE_GFT_ACTION",
+ "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
+ "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
+ "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
+ "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING",
+ "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
+ "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
+ "IWARP_RAMROD_CMD_ID_MODIFY_QP",
+ "IWARP_RAMROD_CMD_ID_DESTROY_QP",
+ "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
+ { NULL }, /*TOE*/
+ { NULL }, /*PREROCE*/
+ { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
+ "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
+ "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
+ "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
+};
+
/******************** INTERNAL IMPLEMENTATION *********************/
/* Returns the external VOQ number */
@@ -1647,6 +1723,32 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+const char *qed_get_protocol_type_str(u32 protocol_type)
+{
+ if (protocol_type >= ARRAY_SIZE(s_protocol_types))
+ return "Invalid protocol type";
+
+ return s_protocol_types[protocol_type];
+}
+
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id)
+{
+ const char *ramrod_cmd_id_str;
+
+ if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids))
+ return "Invalid protocol type";
+
+ if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0]))
+ return "Invalid Ramrod command ID";
+
+ ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id];
+
+ if (!ramrod_cmd_id_str)
+ return "Invalid Ramrod command ID";
+
+ return ramrod_cmd_id_str;
+}
+
static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
switch (storm_id) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 6958adeca86d..82e74f62b677 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2399,3 +2399,25 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ u32 i;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (sbid >= NUM_OF_SBS(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 84c17e97f569..7e5127f61744 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -186,6 +186,19 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
+ * qed_int_get_sb_dbg: Read debug information regarding a given SB
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource
+ * @p_sb: pointer to status block for which we want to get info
+ * @p_info: pointer to struct to fill with information regarding SB
+ *
+ * Return: 0 with status block info filled on success, otherwise return error
+ */
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info);
+
+/**
* qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 7673b3e07736..c5003fa1a25e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -255,27 +255,6 @@ static void __exit qed_exit(void)
}
module_exit(qed_exit);
-/* Check if the DMA controller on the machine can properly handle the DMA
- * addressing required by the device.
- */
-static int qed_set_coherency_mask(struct qed_dev *cdev)
-{
- struct device *dev = &cdev->pdev->dev;
-
- if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
- if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- DP_NOTICE(cdev,
- "Can't request 64-bit consistent allocations\n");
- return -EIO;
- }
- } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
- DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
- return -EIO;
- }
-
- return 0;
-}
-
static void qed_free_pci(struct qed_dev *cdev)
{
struct pci_dev *pdev = cdev->pdev;
@@ -351,9 +330,12 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
- rc = qed_set_coherency_mask(cdev);
- if (rc)
+ rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ DP_NOTICE(cdev, "Can't request DMA addresses\n");
+ rc = -EIO;
goto err2;
+ }
cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
@@ -447,7 +429,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->wol_support = true;
dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
-
+ dev_info->esl = qed_mcp_is_esl_supported(p_hwfn);
dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
@@ -2936,6 +2918,30 @@ out:
return status;
}
+static int
+qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
u8 dev_addr, u32 offset, u32 len)
{
@@ -2978,11 +2984,54 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
return rc;
}
+static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...)
+{
+ char buf[QED_MFW_REPORT_STR_SIZE];
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ va_list vl;
+
+ va_start(vl, fmt);
+ vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (p_ptt) {
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf));
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+ }
+}
+
static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
{
return QED_AFFIN_HWFN_IDX(cdev);
}
+static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ *esl_active = false;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
@@ -3038,6 +3087,9 @@ const struct qed_common_ops qed_common_ops_pass = {
.read_nvm_cfg = &qed_nvm_flash_cfg_read,
.read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
.set_grc_config = &qed_set_grc_config,
+ .mfw_report = &qed_mfw_report,
+ .get_sb_info = &qed_get_sb_info,
+ .get_esl_status = &qed_get_esl_status,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 64678a256f3b..da1eadabcb41 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -4158,3 +4158,25 @@ qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
return qed_mcp_send_debug_data(p_hwfn, p_ptt,
QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
}
+
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK);
+}
+
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc);
+ return rc;
+ }
+
+ *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 564723800d15..369e1892450a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -15,6 +15,8 @@
#include "qed_hsi.h"
#include "qed_dev_api.h"
+#define QED_MFW_REPORT_STR_SIZE 256
+
struct qed_mcp_link_speed_params {
bool autoneg;
@@ -1337,4 +1339,24 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 len);
+
+/**
+ * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not.
+ *
+ * @p_hwfn: hw function pointer
+ *
+ * Return: true if esl is supported, otherwise return false
+ */
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_esl_status(): Get enhanced system lockdown status
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource pointer
+ * @active: ESL active status data pointer
+ *
+ * Return: 0 with esl status info on success, otherwise return error
+ */
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
index 8a0e3c5d4bda..b70ee8200e15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -1191,6 +1191,7 @@ enum drv_msg_code_enum {
DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+ DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007),
};
#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 6f1a52e6beb2..b5e35f433a20 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -550,6 +550,8 @@
0x1 << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
0x1 << 0)
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 648176dfb871..3b54da963554 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -85,10 +85,12 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
goto err;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
- opaque_cid, cmd, protocol,
- (unsigned long)&p_ent->ramrod,
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
+ opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd),
+ cmd, qed_get_protocol_type_str(protocol), protocol,
+ (unsigned long long)(uintptr_t)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index e0473729b161..d01b9245f811 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -139,10 +139,13 @@ err:
if (!p_ptt)
return -EBUSY;
qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
- "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
+ "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
le16_to_cpu(p_ent->elem.hdr.echo));
qed_ptt_release(p_hwfn, p_ptt);
@@ -170,13 +173,16 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
p_ent->elem.hdr.cid,
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
p_ent->elem.hdr.cmd_id,
- p_ent->elem.hdr.protocol_id,
- p_ent->elem.data_ptr.hi,
- p_ent->elem.data_ptr.lo,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
@@ -271,17 +277,27 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
{
qed_spq_async_comp_cb cb;
- if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ if (!p_hwfn->p_spq)
return -EINVAL;
+ if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+ DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
+ p_eqe->protocol_id);
+
+ return -EINVAL;
+ }
+
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
if (cb) {
return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
&p_eqe->data, p_eqe->fw_return_code);
} else {
DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
+ "Unknown Async completion for %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
p_eqe->protocol_id);
+
return -EINVAL;
}
}
@@ -830,8 +846,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->recov_in_prog) {
DP_VERBOSE(p_hwfn,
QED_MSG_SPQ,
- "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
- p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+ "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
+ p_ent->elem.hdr.cmd_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id);
/* Let the flow complete w/o any error handling */
qed_spq_recov_set_ret_code(p_ent, fw_return_code);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8284c4c1528f..97a7ab0826ed 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -168,6 +168,8 @@ enum {
QEDE_PRI_FLAG_CMT,
QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
QEDE_PRI_FLAG_RECOVER_ON_ERROR,
+ QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */
+ QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */
QEDE_PRI_FLAG_LEN,
};
@@ -175,6 +177,8 @@ static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Coupled-Function",
"SmartAN capable",
"Recover on error",
+ "ESL capable",
+ "ESL active",
};
enum qede_ethtool_tests {
@@ -478,6 +482,7 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
static u32 qede_get_priv_flags(struct net_device *dev)
{
struct qede_dev *edev = netdev_priv(dev);
+ bool esl_active;
u32 flags = 0;
if (edev->dev_info.common.num_hwfns > 1)
@@ -489,6 +494,14 @@ static u32 qede_get_priv_flags(struct net_device *dev)
if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+ if (edev->dev_info.common.esl)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT);
+
+ edev->ops->common->get_esl_status(edev->cdev, &esl_active);
+
+ if (esl_active)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE);
+
return flags;
}
@@ -888,7 +901,9 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
}
static void qede_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -899,7 +914,9 @@ static void qede_get_ringparam(struct net_device *dev,
}
static int qede_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct qede_dev *edev = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 999abcfe3310..b242000a77fd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -10,6 +10,7 @@
#include <linux/bpf_trace.h>
#include <net/udp_tunnel.h>
#include <linux/ip.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <linux/if_ether.h>
@@ -1152,7 +1153,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
qede_rx_bd_ring_consume(rxq);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 06c6a5813606..b4e5a15e308b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -509,34 +509,95 @@ static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
-static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
+ char *p_sb = (char *)fp->sb_info->sb_virt;
+ u32 sb_size, i;
+
+ sb_size = sizeof(struct status_block);
+
+ for (i = 0; i < sb_size; i += 8)
+ DP_NOTICE(edev,
+ "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
+ p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
+ p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
+}
+
+static void
+qede_txq_fp_log_metadata(struct qede_dev *edev,
+ struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_chain *p_chain = &txq->tx_pbl;
+
+ /* Dump txq/fp/sb ids etc. other metadata */
DP_NOTICE(edev,
- "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
- txq->index, le16_to_cpu(*txq->hw_cons_ptr),
- qed_chain_get_cons_idx(&txq->tx_pbl),
- qed_chain_get_prod_idx(&txq->tx_pbl),
- jiffies);
+ "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
+ fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
+ p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
+
+ /* Dump all the relevant prod/cons indexes */
+ DP_NOTICE(edev,
+ "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
+ le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
+ qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
+}
+
+static void
+qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_sb_info_dbg sb_dbg;
+ int rc;
+
+ /* sb info */
+ qede_fp_sb_dump(edev, fp);
+
+ memset(&sb_dbg, 0, sizeof(sb_dbg));
+ rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
+
+ DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
+ sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
+
+ /* report to mfw */
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
+ if (!rc)
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
+ txq->index, fp->sb_info->igu_sb_id,
+ sb_dbg.igu_prod, sb_dbg.igu_cons,
+ sb_dbg.pi[TX_PI(txq->cos)]);
}
static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct qede_dev *edev = netdev_priv(dev);
- struct qede_tx_queue *txq;
- int cos;
+ int i;
netif_carrier_off(dev);
DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
- if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
- return;
+ for_each_queue(i) {
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ int cos;
- for_each_cos_in_txq(edev, cos) {
- txq = &edev->fp_array[txqueue].txq[cos];
+ fp = &edev->fp_array[i];
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
- if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
- qed_chain_get_prod_idx(&txq->tx_pbl))
- qede_tx_log_print(edev, txq);
+ for_each_cos_in_txq(edev, cos) {
+ txq = &fp->txq[cos];
+
+ /* Dump basic metadata for all queues */
+ qede_txq_fp_log_metadata(edev, fp, txq);
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, fp, txq);
+ }
}
if (IS_VF(edev))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 8c28fabb0ff6..39176e765767 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -304,11 +304,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
"HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- DP_ERR(edev, "config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
ptp->hw_ts_ioctl_called = 1;
ptp->tx_type = config.tx_type;
ptp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 71523d747e93..b30589a135c2 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3750,7 +3750,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
struct net_device *ndev = NULL;
struct ql3_adapter *qdev = NULL;
static int cards_found;
- int pci_using_dac, err;
+ int err;
err = pci_enable_device(pdev);
if (err) {
@@ -3766,11 +3766,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
- pci_using_dac = 1;
- else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
- pci_using_dac = 0;
-
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
goto err_out_free_regions;
@@ -3797,8 +3793,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
qdev->msg_enable = netif_msg_init(debug, default_msg);
- if (pci_using_dac)
- ndev->features |= NETIF_F_HIGHDMA;
+ ndev->features |= NETIF_F_HIGHDMA;
if (qdev->device_id == QL3032_DEVICE_ID)
ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index be7abee160e7..b25102fded7b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1698,7 +1698,7 @@ int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port);
int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port);
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
int qlcnic_read_mac_addr(struct qlcnic_adapter *);
-int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
void qlcnic_sriov_vf_set_multi(struct net_device *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 6f1d9c1fd1b0..23cd47d588e5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -609,7 +609,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
-int qlcnic_83xx_init(struct qlcnic_adapter *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 27dffa299ca6..dbb800769cb6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2432,7 +2432,7 @@ static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
qlcnic_set_sds_ring_count(adapter, rx_cnt);
}
-int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
@@ -2466,7 +2466,7 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
goto exit;
if (qlcnic_sriov_vf_check(adapter)) {
- err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ err = qlcnic_sriov_vf_init(adapter);
if (err)
goto detach_mbx;
else
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index fc364b4ab6eb..e10fe071a40f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -632,7 +632,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
static void
qlcnic_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
@@ -663,7 +665,9 @@ qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
static int
qlcnic_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u16 num_rxd, num_jumbo_rxd, num_txd;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ed84f0f97623..d320567b2cca 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2258,8 +2258,7 @@ static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
}
int
-qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
- int pci_using_dac)
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int err;
struct pci_dev *pdev = adapter->pdev;
@@ -2278,20 +2277,15 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
- NETIF_F_HW_VLAN_CTAG_RX);
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA);
netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA);
if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
-
if (qlcnic_vlan_tx_check(adapter))
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
@@ -2341,20 +2335,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
return 0;
}
-static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
-{
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
- *pci_using_dac = 1;
- else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
- *pci_using_dac = 0;
- else {
- dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
{
int ring;
@@ -2441,8 +2421,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
struct qlcnic_hardware_context *ahw;
- int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ int err;
err = pci_enable_device(pdev);
if (err)
@@ -2453,9 +2433,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
- err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
- if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
goto err_out_disable_pdev;
+ }
err = pci_request_regions(pdev, qlcnic_driver_name);
if (err)
@@ -2569,7 +2551,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
- err = qlcnic_83xx_init(adapter, pci_using_dac);
+ err = qlcnic_83xx_init(adapter);
if (err) {
switch (err) {
case -ENOTRECOVERABLE:
@@ -2633,7 +2615,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
- err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
+ err = qlcnic_setup_netdev(adapter, netdev);
if (err)
goto err_out_disable_mbx_intr;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 7160b42f51dd..c42b99cd58bd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -188,7 +188,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *, int);
void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
-int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *);
void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
struct qlcnic_vf_info *, u16);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index dd03be3fc82a..9282321c2e7f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- int i, num_vlans;
+ int i, num_vlans, ret;
u16 *vlans;
if (sriov->allowed_vlans)
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
sriov->num_allowed_vlans);
- qlcnic_sriov_alloc_vlans(adapter);
+ ret = qlcnic_sriov_alloc_vlans(adapter);
+ if (ret)
+ return ret;
if (!sriov->any_vlan)
return 0;
@@ -523,8 +525,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
- int pci_using_dac)
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter)
{
int err;
@@ -569,7 +570,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
- err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+ err = qlcnic_setup_netdev(adapter, adapter->netdev);
if (err)
goto err_out_send_channel_term;
@@ -612,7 +613,7 @@ static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err;
@@ -629,7 +630,7 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
if (err)
return err;
- err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
+ err = qlcnic_sriov_setup_vf(adapter);
if (err)
return err;
@@ -2154,7 +2155,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf;
@@ -2164,7 +2165,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ if (!vf->sriov_vlans)
+ return -ENOMEM;
}
+
+ return 0;
}
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 447720b93e5a..e90fa97c0ae6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
- qlcnic_sriov_alloc_vlans(adapter);
+ err = qlcnic_sriov_alloc_vlans(adapter);
+ if (err)
+ goto del_flr_queue;
return err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index f72e13b83869..f502db9cdea9 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -133,7 +133,9 @@ static int emac_nway_reset(struct net_device *netdev)
}
static void emac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
@@ -144,7 +146,9 @@ static void emac_get_ringparam(struct net_device *netdev,
}
static int emac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index d59fff2fbcc6..792ce9a323cd 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -246,7 +246,9 @@ qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
}
static void
-qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct qcaspi *qca = netdev_priv(dev);
@@ -257,7 +259,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
}
static int
-qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
+qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
struct qcaspi *qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 4f39f843bb3a..ad7b9e9d7f95 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1388,7 +1388,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
}
static void cp_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
ring->rx_max_pending = CP_RX_RING_SIZE;
ring->tx_max_pending = CP_TX_RING_SIZE;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 86c44bc5f73f..19e2621e0645 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -615,6 +615,7 @@ struct rtl8169_private {
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
u32 irq_mask;
+ int irq;
struct clk *clk;
struct {
@@ -1873,7 +1874,9 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
}
static void rtl8169_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *data)
+ struct ethtool_ringparam *data,
+ struct kernel_ethtool_ringparam *kernel_data,
+ struct netlink_ext_ack *extack)
{
data->rx_max_pending = NUM_RX_DESC;
data->rx_pending = NUM_RX_DESC;
@@ -1969,8 +1972,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
/* 8125A family. */
- { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
- { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ { 0x7cf, 0x609, RTL_GIGA_MAC_VER_61 },
+ /* It seems only XID 609 made it to the mass market.
+ * { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
+ * { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+ */
/* RTL8117 */
{ 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
@@ -1978,17 +1984,26 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
- { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
- { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
+ * { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
+ */
/* 8168H family. */
{ 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
- { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
+ */
/* 8168G family. */
{ 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
{ 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
- { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ */
{ 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
/* 8168F family. */
@@ -4698,7 +4713,7 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- free_irq(pci_irq_vector(pdev, 0), tp);
+ free_irq(tp->irq, tp);
phy_disconnect(tp->phydev);
@@ -4719,7 +4734,7 @@ static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
+ rtl8169_interrupt(tp->irq, tp);
}
#endif
@@ -4753,8 +4768,7 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
- retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- irqflags, dev->name, tp);
+ retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
@@ -4771,7 +4785,7 @@ out:
return retval;
err_free_irq:
- free_irq(pci_irq_vector(pdev, 0), tp);
+ free_irq(tp->irq, tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -5272,12 +5286,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* Disable ASPM L1 as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users.
- */
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
- tp->aspm_manageable = !rc;
-
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
@@ -5320,6 +5328,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mac_version = chipset;
+ /* Disable ASPM L1 as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ * Chips from RTL8168h partially have issues with L1.2, but seem
+ * to work fine with L1 and L1.1.
+ */
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+ else
+ rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ tp->aspm_manageable = !rc;
+
tp->dash_type = rtl_check_dash(tp);
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
@@ -5341,6 +5360,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Can't allocate interrupt\n");
return rc;
}
+ tp->irq = pci_irq_vector(pdev, 0);
INIT_WORK(&tp->wk.work, rtl_task);
@@ -5375,12 +5395,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
- dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
- dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
+ netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
} else {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
- dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
+ netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
}
dev->hw_features |= NETIF_F_RXALL;
@@ -5416,8 +5436,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
- rtl_chip_infos[chipset].name, dev->dev_addr, xid,
- pci_irq_vector(pdev, 0));
+ rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
if (jumbo_max)
netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
@@ -5441,7 +5460,9 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
- .driver.pm = pm_ptr(&rtl8169_pm_ops),
+#ifdef CONFIG_PM
+ .driver.pm = &rtl8169_pm_ops,
+#endif
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b4c597f4040c..b215cde68e10 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,8 +30,7 @@
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/reset.h>
-
-#include <asm/div64.h>
+#include <linux/math64.h>
#include "ravb.h"
@@ -1605,7 +1604,9 @@ static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
static void ravb_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1616,7 +1617,9 @@ static void ravb_get_ringparam(struct net_device *ndev,
}
static int ravb_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -2218,10 +2221,6 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
if (copy_from_user(&config, req->ifr_data, sizeof(config)))
return -EFAULT;
- /* Reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_tx_ctrl = 0;
@@ -2488,8 +2487,7 @@ static int ravb_set_gti(struct net_device *ndev)
if (!rate)
return -EINVAL;
- inc = 1000000000ULL << 20;
- do_div(inc, rate);
+ inc = div64_ul(1000000000ULL << 20, rate);
if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a3fbb2221c9a..d947a628e166 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2296,7 +2296,9 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
static void sh_eth_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2307,7 +2309,9 @@ static void sh_eth_get_ringparam(struct net_device *ndev,
}
static int sh_eth_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
@@ -3364,8 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
goto out_release;
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index ba4062881eed..3fcea211716c 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1995,17 +1995,6 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
return err ? -EOPNOTSUPP : 0;
}
-static int rocker_port_change_proto_down(struct net_device *dev,
- bool proto_down)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- if (rocker_port->dev->flags & IFF_UP)
- rocker_port_set_enable(rocker_port, !proto_down);
- rocker_port->dev->proto_down = proto_down;
- return 0;
-}
-
static void rocker_port_neigh_destroy(struct net_device *dev,
struct neighbour *n)
{
@@ -2037,7 +2026,6 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_set_mac_address = rocker_port_set_mac_address,
.ndo_change_mtu = rocker_port_change_mtu,
.ndo_get_phys_port_name = rocker_port_get_phys_port_name,
- .ndo_change_proto_down = rocker_port_change_proto_down,
.ndo_neigh_destroy = rocker_port_neigh_destroy,
.ndo_get_port_parent_id = rocker_port_get_port_parent_id,
};
@@ -2882,19 +2870,10 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "dma_set_mask failed\n");
- goto err_pci_set_dma_mask;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
+ goto err_pci_set_dma_mask;
}
if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 3e1ca7a8d029..bc70c6abd6a5 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2783,7 +2783,8 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
if (!ofdpa_port)
continue;
nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
- ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
+ ofdpa_flow_tbl_del(ofdpa_port,
+ OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT,
flow_entry);
}
spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 16a4cbae9326..c672f92d65e9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -749,6 +749,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
+ u8 addr[ETH_ALEN];
ether3_banner();
@@ -776,7 +777,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
priv(dev)->seeq = priv(dev)->base + data->base_offset;
dev->irq = ec->irq;
- ether3_addr(dev->dev_addr, ec);
+ ether3_addr(addr, ec);
+ eth_hw_addr_set(dev, addr);
priv(dev)->dev = dev;
timer_setup(&priv(dev)->timer, ether3_ledoff, 0);
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 835c838b7dfa..5dba4125d953 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -20,8 +20,11 @@
/* This is the maximum number of descriptor rings supported by the QDMA */
#define EFX_EF100_MAX_DMAQ_SIZE 16384UL
-static void ef100_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+ef100_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6aa81229b68a..f79b14a119ae 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx,
ef100_common_stat_mask(mask);
ef100_ethtool_stat_mask(mask);
+ if (!mc_stats)
+ return 0;
+
efx_nic_copy_stats(efx, mc_stats);
efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
stats, mc_stats, false);
@@ -993,11 +996,11 @@ static int ef100_process_design_param(struct efx_nic *efx,
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
- efx->net_dev->gso_max_size = nic_data->tso_max_payload_len;
+ netif_set_gso_max_size(efx->net_dev, nic_data->tso_max_payload_len);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
- efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs;
+ netif_set_gso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs);
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
@@ -1122,7 +1125,7 @@ static int ef100_probe_main(struct efx_nic *efx)
nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
- net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
+ netif_set_gso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
/* Read design parameters */
rc = ef100_check_design_params(efx);
if (rc) {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6960a2fe2b53..302dc835ac3d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -5,6 +5,7 @@
* Copyright 2005-2013 Solarflare Communications Inc.
*/
+#include <linux/filter.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -709,7 +710,7 @@ static int efx_register_netdev(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops;
- net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+ netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 3dbea028b325..ead550ae2709 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -10,6 +10,7 @@
#include "net_driver.h"
#include <linux/module.h>
+#include <linux/filter.h>
#include "efx_channels.h"
#include "efx.h"
#include "efx_common.h"
@@ -818,11 +819,8 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
old_txq_entries = efx->txq_entries;
efx->rxq_entries = rxq_entries;
efx->txq_entries = txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
/* Restart buffer table allocation */
efx->next_buffer_table = next_buffer_table;
@@ -864,11 +862,8 @@ rollback:
/* Swap back */
efx->rxq_entries = old_rxq_entries;
efx->txq_entries = old_txq_entries;
- for (i = 0; i < efx->n_channels; i++) {
- channel = efx->channel[i];
- efx->channel[i] = other_channel[i];
- other_channel[i] = channel;
- }
+ for (i = 0; i < efx->n_channels; i++)
+ swap(efx->channel[i], other_channel[i]);
goto out;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index f187631b2c5c..af37c990217e 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -9,6 +9,7 @@
*/
#include "net_driver.h"
+#include <linux/filter.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/gre.h>
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index e002ce21788d..48506373721a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -157,8 +157,11 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
-static void efx_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -168,8 +171,11 @@ static void efx_ethtool_get_ringparam(struct net_device *net_dev,
ring->tx_pending = efx->txq_entries;
}
-static int efx_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static int
+efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 314c9c69eb0e..60c595ef7589 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2267,7 +2267,7 @@ static int ef4_register_netdev(struct ef4_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &ef4_netdev_ops;
net_dev->ethtool_ops = &ef4_ethtool_ops;
- net_dev->gso_max_segs = EF4_TSO_MAX_SEGS;
+ netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
net_dev->min_mtu = EF4_MIN_MTU;
net_dev->max_mtu = EF4_MAX_MTU;
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 137e8a7aeaa1..907254b36663 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -637,8 +637,11 @@ static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
return 0;
}
-static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static void
+ef4_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
@@ -648,8 +651,11 @@ static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
ring->tx_pending = efx->txq_entries;
}
-static int ef4_ethtool_set_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *ring)
+static int
+ef4_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ef4_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 966f13e7475d..0c6cc2191369 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
struct ef4_rx_page_state *state;
unsigned index;
+ if (unlikely(!rx_queue->page_ring))
+ return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
@@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
do {
ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
@@ -728,7 +733,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index c4fe3c48ac46..899cc1671004 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -71,7 +71,6 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode, u32 loopback_speed)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
- int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -80,9 +79,8 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ return efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
NULL, 0, NULL);
- return rc;
}
int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 797e51802ccb..f0ef515e2ade 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1765,9 +1765,6 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
{
int rc;
- if (init->flags)
- return -EINVAL;
-
if ((init->tx_type != HWTSTAMP_TX_OFF) &&
(init->tx_type != HWTSTAMP_TX_ON))
return -ERANGE;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 606750938b89..2375cef577e4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -338,7 +338,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
efx_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 68fc7d317693..633ca77a26fd 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
unsigned int index;
struct page *page;
+ if (unlikely(!rx_queue->page_ring))
+ return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
@@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
do {
efx_recycle_rx_page(channel, rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
@@ -150,7 +155,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 89381f796985..dd6f69ced4ee 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
+ goto release_both;
+ }
+
lp = netdev_priv(ndev);
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 0ce403fa5f1a..af661c65ffe2 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -856,6 +856,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
word configuration_register;
word memory_info_register;
word memory_cfg_register;
+ u8 addr[ETH_ALEN];
/* Grab the region so that no one else tries to probe our ioports. */
if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME))
@@ -924,9 +925,10 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
word address;
address = inw( ioaddr + ADDR0 + i );
- dev->dev_addr[ i + 1] = address >> 8;
- dev->dev_addr[ i ] = address & 0xFF;
+ addr[i + 1] = address >> 8;
+ addr[i] = address & 0xFF;
}
+ eth_hw_addr_set(dev, addr);
/* get the memory information */
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index de7d8bf2c226..556bd353dd42 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -933,7 +933,7 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
}
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(priv->ndev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->ndev, prog, act);
@@ -1977,11 +1977,12 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
static int netsec_probe(struct platform_device *pdev)
{
- struct resource *mmio_res, *eeprom_res, *irq_res;
+ struct resource *mmio_res, *eeprom_res;
struct netsec_priv *priv;
u32 hw_ver, phy_addr = 0;
struct net_device *ndev;
int ret;
+ int irq;
mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mmio_res) {
@@ -1995,11 +1996,9 @@ static int netsec_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(&pdev->dev, "No IRQ resource found.\n");
- return -ENODEV;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ndev = alloc_etherdev(sizeof(*priv));
if (!ndev)
@@ -2010,7 +2009,7 @@ static int netsec_probe(struct platform_device *pdev)
spin_lock_init(&priv->reglock);
SET_NETDEV_DEV(ndev, &pdev->dev);
platform_set_drvdata(pdev, priv);
- ndev->irq = irq_res->start;
+ ndev->irq = irq;
priv->dev = &pdev->dev;
priv->ndev = ndev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 9160f9ed363a..6b5d96bced47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -317,6 +317,7 @@ enum tx_frame_status {
tx_not_ls = 0x1,
tx_err = 0x2,
tx_dma_own = 0x4,
+ tx_err_bump_tc = 0x8,
};
enum dma_irq_status {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index adfeb8d3293d..62a69a91ab22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -48,46 +49,60 @@
#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
+struct oxnas_dwmac;
+
+struct oxnas_dwmac_data {
+ int (*setup)(struct oxnas_dwmac *dwmac);
+};
+
struct oxnas_dwmac {
struct device *dev;
struct clk *clk;
struct regmap *regmap;
+ const struct oxnas_dwmac_data *data;
};
-static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac)
{
- struct oxnas_dwmac *dwmac = priv;
unsigned int value;
int ret;
- /* Reset HW here before changing the glue configuration */
- ret = device_reset(dwmac->dev);
- if (ret)
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0)
return ret;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX);
+
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
- if (ret < 0) {
- clk_disable_unprepare(dwmac->clk);
+ if (ret < 0)
return ret;
- }
/* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
value |= BIT(DWMAC_CKEN_GTX) |
/* Use simple mux for 25/125 Mhz clock switching */
- BIT(DWMAC_SIMPLE_MUX) |
- /* set auto switch tx clock source */
- BIT(DWMAC_AUTO_TX_SOURCE) |
- /* enable tx & rx vardelay */
- BIT(DWMAC_CKEN_TX_OUT) |
- BIT(DWMAC_CKEN_TXN_OUT) |
- BIT(DWMAC_CKEN_TX_IN) |
- BIT(DWMAC_CKEN_RX_OUT) |
- BIT(DWMAC_CKEN_RXN_OUT) |
- BIT(DWMAC_CKEN_RX_IN);
+ BIT(DWMAC_SIMPLE_MUX) |
+ /* set auto switch tx clock source */
+ BIT(DWMAC_AUTO_TX_SOURCE) |
+ /* enable tx & rx vardelay */
+ BIT(DWMAC_CKEN_TX_OUT) |
+ BIT(DWMAC_CKEN_TXN_OUT) |
+ BIT(DWMAC_CKEN_TX_IN) |
+ BIT(DWMAC_CKEN_RX_OUT) |
+ BIT(DWMAC_CKEN_RXN_OUT) |
+ BIT(DWMAC_CKEN_RX_IN);
regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
/* set tx & rx vardelay */
@@ -100,6 +115,27 @@ static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+ int ret;
+
+ /* Reset HW here before changing the glue configuration */
+ ret = device_reset(dwmac->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = dwmac->data->setup(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct oxnas_dwmac *dwmac = priv;
@@ -128,6 +164,12 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data) {
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->init = oxnas_dwmac_init;
@@ -166,8 +208,23 @@ err_remove_config_dt:
return ret;
}
+static const struct oxnas_dwmac_data ox810se_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox810se,
+};
+
+static const struct oxnas_dwmac_data ox820_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox820,
+};
+
static const struct of_device_id oxnas_dwmac_match[] = {
- { .compatible = "oxsemi,ox820-dwmac" },
+ {
+ .compatible = "oxsemi,ox810se-dwmac",
+ .data = &ox810se_dwmac_data,
+ },
+ {
+ .compatible = "oxsemi,ox820-dwmac",
+ .data = &ox820_dwmac_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 5c74b6279d69..2ffa0a11eea5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -113,8 +113,10 @@ static void rgmii_updatel(struct qcom_ethqos *ethqos,
rgmii_writel(ethqos, temp, offset);
}
-static void rgmii_dump(struct qcom_ethqos *ethqos)
+static void rgmii_dump(void *priv)
{
+ struct qcom_ethqos *ethqos = priv;
+
dev_dbg(&ethqos->pdev->dev, "Rgmii register dump\n");
dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n",
rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG));
@@ -447,6 +449,24 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
ethqos_configure(ethqos);
}
+static int ethqos_clks_config(void *priv, bool enabled)
+{
+ struct qcom_ethqos *ethqos = priv;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(ethqos->rgmii_clk);
+ if (ret) {
+ dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(ethqos->rgmii_clk);
+ }
+
+ return ret;
+}
+
static int qcom_ethqos_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -466,6 +486,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
}
+ plat_dat->clks_config = ethqos_clks_config;
+
ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
if (!ethqos) {
ret = -ENOMEM;
@@ -489,7 +511,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
goto err_mem;
}
- ret = clk_prepare_enable(ethqos->rgmii_clk);
+ ret = ethqos_clks_config(ethqos, true);
if (ret)
goto err_mem;
@@ -499,6 +521,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->bsp_priv = ethqos;
plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
+ plat_dat->dump_debug_regs = rgmii_dump;
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
@@ -507,12 +530,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- rgmii_dump(ethqos);
-
return ret;
err_clk:
- clk_disable_unprepare(ethqos->rgmii_clk);
+ ethqos_clks_config(ethqos, false);
err_mem:
stmmac_remove_config_dt(pdev, plat_dat);
@@ -530,7 +551,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
return -ENODEV;
ret = stmmac_pltfr_remove(pdev);
- clk_disable_unprepare(ethqos->rgmii_clk);
+ ethqos_clks_config(ethqos, false);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6924a6aacbd5..c469abc91fa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ bool regs_valid;
u32 regs[];
};
@@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = {
.set_to_rmii = rk3568_set_to_rmii,
.set_rgmii_speed = rk3568_set_gmac_speed,
.set_rmii_speed = rk3568_set_gmac_speed,
+ .regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
0xfe010000, /* gmac1 */
@@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
* to be distinguished.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
+ if (res && ops->regs_valid) {
int i = 0;
while (ops->regs[i]) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 617d0e4c6495..09644ab0d87a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -756,7 +756,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
if (err) {
dev_err(priv->device, "EMAC reset timeout\n");
- return -EFAULT;
+ return err;
}
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 66fc8be34bb7..dde5b772a5af 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -22,21 +22,21 @@
#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
-#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_DIV_SEL_20 0
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
-#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
-#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
-#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
-#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
-#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
@@ -96,31 +96,41 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
val |= ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
switch (dwmac->phy_intf_sel) {
case ETHER_CONFIG_INTF_RGMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_RMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_RST;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_MII:
default:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_EN;
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
}
- /* Start clock */
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
- val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
spin_unlock_irqrestore(&dwmac->lock, flags);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index cbf4429fb1d2..d3b4765c1a5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -32,6 +32,8 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
return tx_not_ls;
if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ ret = tx_err;
+
if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
x->tx_jabber++;
if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
@@ -53,16 +55,16 @@ static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
- if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
x->tx_underflow++;
+ ret |= tx_err_bump_tc;
+ }
if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
x->tx_ip_header_error++;
if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
x->tx_payload_error++;
-
- ret = tx_err;
}
if (unlikely(tdes3 & TDES3_DEFERRED))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5f129733aabd..5b195d5051d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -10,7 +10,6 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
#include <linux/hrtimer.h>
@@ -23,6 +22,7 @@
#include <linux/net_tstamp.h>
#include <linux/reset.h>
#include <net/page_pool.h>
+#include <uapi/linux/bpf.h>
struct stmmac_resources {
void __iomem *addr;
@@ -172,13 +172,28 @@ struct stmmac_flow_entry {
int is_l4;
};
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+ STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_LLDP,
+ STMMAC_RFS_T_1588,
+ STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+ unsigned long cookie;
+ u16 etype;
+ int in_use;
+ int type;
+ int tc;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
- int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
@@ -213,7 +228,6 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_irq[PHY_MAX_ADDR];
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -289,6 +303,10 @@ struct stmmac_priv {
struct stmmac_tc_entry *tc_entries;
unsigned int flow_entries_max;
struct stmmac_flow_entry *flow_entries;
+ unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_total;
+ struct stmmac_rfs_entry *rfs_entries;
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
@@ -317,8 +335,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_open(struct net_device *dev);
-int stmmac_release(struct net_device *dev);
+int stmmac_xdp_open(struct net_device *dev);
+void stmmac_xdp_release(struct net_device *dev);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
int stmmac_dvr_remove(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d89455803bed..164dff5ec32e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -290,7 +290,6 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(priv->plat->pdev),
sizeof(info->bus_info));
}
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
@@ -463,7 +462,9 @@ static int stmmac_nway_reset(struct net_device *dev)
}
static void stmmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct stmmac_priv *priv = netdev_priv(netdev);
@@ -474,7 +475,9 @@ static void stmmac_get_ringparam(struct net_device *netdev,
}
static int stmmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending < DMA_MIN_RX_SIZE ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index da8306f60730..639a753266e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -132,6 +132,8 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
#ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops;
@@ -400,7 +402,7 @@ static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
* Description: this function is to verify and enter in LPI mode in case of
* EEE.
*/
-static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -410,13 +412,14 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return; /* still unfinished work */
+ return -EBUSY; /* still unfinished work */
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->en_tx_lpi_clockgating);
+ return 0;
}
/**
@@ -448,8 +451,8 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
@@ -518,14 +521,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
return true;
}
-static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
-{
- /* Correct the clk domain crossing(CDC) error */
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
- return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- return 0;
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -557,7 +552,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
}
if (found) {
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -594,7 +589,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
@@ -644,10 +639,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
__func__, config.flags, config.tx_type, config.rx_filter);
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -899,6 +890,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
if (ret)
return ret;
@@ -921,8 +915,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- stmmac_ptp_register(priv);
-
return 0;
}
@@ -1461,16 +1453,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -2390,7 +2386,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
bool work_done = true;
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
budget = min(budget, stmmac_tx_avail(priv, queue));
@@ -2474,6 +2470,21 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return !!budget && work_done;
}
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
@@ -2539,6 +2550,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
} else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -2636,8 +2649,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
@@ -2789,21 +2802,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- (tc <= 256)) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -3241,7 +3240,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @init_ptp: initialize PTP if set
+ * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3251,7 +3250,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3308,13 +3307,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
- if (init_ptp) {
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- }
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
@@ -3673,7 +3672,7 @@ static int stmmac_request_irq(struct net_device *dev)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-int stmmac_open(struct net_device *dev)
+static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
@@ -3797,7 +3796,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
* Description:
* This is the stop entry point of the driver.
*/
-int stmmac_release(struct net_device *dev)
+static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
@@ -4482,6 +4481,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4494,13 +4497,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
p = rx_q->dma_rx + entry;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
break;
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4689,7 +4692,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX)
@@ -4722,7 +4725,7 @@ static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
res = STMMAC_XDP_REDIRECT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->dev, prog, act);
@@ -5753,21 +5756,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- tc <= 256) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -6327,7 +6316,7 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
int res;
@@ -6463,6 +6452,140 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
}
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ return ret;
+}
+
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -7038,7 +7161,8 @@ int stmmac_dvr_probe(struct device *device,
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -7086,6 +7210,9 @@ int stmmac_dvr_probe(struct device *device,
stmmac_init_fs(ndev);
#endif
+ if (priv->plat->dump_debug_regs)
+ priv->plat->dump_debug_regs(priv->plat->bsp_priv);
+
/* Let pm_runtime_put() disable the clocks.
* If CONFIG_PM is not enabled, the clocks will stay powered.
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 580cc035536b..1c9f02f9c317 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
time.tv_nsec = priv->plat->est->btr_reserve[0];
time.tv_sec = priv->plat->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
priv->plat->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
@@ -297,9 +297,6 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
{
int i;
- if (priv->plat->ptp_clk_freq_config)
- priv->plat->ptp_clk_freq_config(priv);
-
for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
if (i >= STMMAC_PPS_MAX)
break;
@@ -309,6 +306,11 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (priv->plat->ptp_max_adj)
stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+ /* Calculate the clock domain crossing (CDC) error if necessary */
+ priv->plat->cdc_error_adj = 0;
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1c4ea0b1b845..d61766eeac6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -232,11 +232,35 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
}
}
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+ int i;
+
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
+ priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
+
+ for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+ priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+ priv->rfs_entries = devm_kcalloc(priv->device,
+ priv->rfs_entries_total,
+ sizeof(*priv->rfs_entries),
+ GFP_KERNEL);
+ if (!priv->rfs_entries)
+ return -ENOMEM;
+
+ dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+ priv->rfs_entries_total);
+
+ return 0;
+}
+
static int tc_init(struct stmmac_priv *priv)
{
struct dma_features *dma_cap = &priv->dma_cap;
unsigned int count;
- int i;
+ int ret, i;
if (dma_cap->l3l4fnum) {
priv->flow_entries_max = dma_cap->l3l4fnum;
@@ -250,10 +274,14 @@ static int tc_init(struct stmmac_priv *priv)
for (i = 0; i < priv->flow_entries_max; i++)
priv->flow_entries[i].idx = i;
- dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
+ dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
priv->flow_entries_max);
}
+ ret = tc_rfs_init(priv);
+ if (ret)
+ return -ENOMEM;
+
if (!priv->plat->fpe_cfg) {
priv->plat->fpe_cfg = devm_kzalloc(priv->device,
sizeof(*priv->plat->fpe_cfg),
@@ -425,6 +453,8 @@ static int tc_parse_flow_actions(struct stmmac_priv *priv,
return 0;
}
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
+
static int tc_add_basic_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls,
struct stmmac_flow_entry *entry)
@@ -438,6 +468,7 @@ static int tc_add_basic_flow(struct stmmac_priv *priv,
return -EINVAL;
flow_rule_match_basic(rule, &match);
+
entry->ip_proto = match.key->ip_proto;
return 0;
}
@@ -607,16 +638,45 @@ static int tc_del_flow(struct stmmac_priv *priv,
return ret;
}
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ bool get_free)
+{
+ int i;
+
+ for (i = 0; i < priv->rfs_entries_total; i++) {
+ struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+ if (entry->cookie == cls->cookie)
+ return entry;
+ if (get_free && entry->in_use == false)
+ return entry;
+ }
+
+ return NULL;
+}
+
#define VLAN_PRIO_FULL_MASK (0x07)
static int tc_add_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
struct flow_match_vlan match;
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+ return -ENOENT;
+
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
return -EINVAL;
@@ -638,6 +698,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
prio = BIT(match.key->vlan_priority);
stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->type = STMMAC_RFS_T_VLAN;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
}
return 0;
@@ -646,12 +712,40 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
static int tc_del_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+
+ if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+ return -ENOENT;
+
+ stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->type = 0;
+
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
+
+ return 0;
+}
+
+static int tc_add_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct flow_match_basic match;
+
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
/* Nothing to do here */
- if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
return -EINVAL;
if (tc < 0) {
@@ -659,7 +753,86 @@ static int tc_del_vlan_flow(struct stmmac_priv *priv,
return -EINVAL;
}
- stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+ flow_rule_match_basic(rule, &match);
+
+ if (match.mask->n_proto) {
+ u16 etype = ntohs(match.key->n_proto);
+
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_err(priv->dev, "Only full mask is supported for EthType filter");
+ return -EINVAL;
+ }
+ switch (etype) {
+ case ETH_P_LLDP:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_LLDP;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, tc);
+ break;
+ case ETH_P_1588:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_1588])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_1588;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, tc);
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->etype = etype;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tc_del_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+
+ if (!entry || !entry->in_use ||
+ entry->type < STMMAC_RFS_T_LLDP ||
+ entry->type > STMMAC_RFS_T_1588)
+ return -ENOENT;
+
+ switch (entry->etype) {
+ case ETH_P_LLDP:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
+ break;
+ case ETH_P_1588:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported",
+ entry->etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->etype = 0;
+ entry->type = 0;
return 0;
}
@@ -673,6 +846,10 @@ static int tc_add_flow_cls(struct stmmac_priv *priv,
if (!ret)
return ret;
+ ret = tc_add_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
return tc_add_vlan_flow(priv, cls);
}
@@ -685,6 +862,10 @@ static int tc_del_flow_cls(struct stmmac_priv *priv,
if (!ret)
return ret;
+ ret = tc_del_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
return tc_del_vlan_flow(priv, cls);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index 2a616c6f7cd0..9d4d8c3dad0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
need_update = !!priv->xdp_prog != !!prog;
if (if_running && need_update)
- stmmac_release(dev);
+ stmmac_xdp_release(dev);
old_prog = xchg(&priv->xdp_prog, prog);
if (old_prog)
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
- stmmac_open(dev);
+ stmmac_xdp_open(dev);
return 0;
}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index d2d4f47c7e28..dba9f12efa1c 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4893,8 +4893,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long casreg_len;
struct net_device *dev;
struct cas *cp;
- int i, err, pci_using_dac;
u16 pci_cmd;
+ int i, err;
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
@@ -4965,23 +4965,10 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure DMA attributes. */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (err < 0) {
- dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
- "for consistent allocations\n");
- goto err_out_free_res;
- }
-
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, "
- "aborting\n");
- goto err_out_free_res;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_out_free_res;
}
casreg_len = pci_resource_len(pdev, 0);
@@ -5087,8 +5074,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
/* MTU range: 60 - varies or 9000 */
dev->min_mtu = CAS_MIN_MTU;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 0775a5542f2f..985073eba3bd 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1884,10 +1884,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *ndev;
struct bdx_priv *priv;
- int err, pci_using_dac, port;
unsigned long pciaddr;
u32 regionSize;
struct pci_nic *nic;
+ int err, port;
ENTER;
@@ -1900,16 +1900,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) /* it triggers interrupt, dunno why. */
goto err_pci; /* it's not a problem though */
- if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) &&
- !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
- pci_using_dac = 1;
- } else {
- if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
- (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, BDX_DRV_NAME);
@@ -1982,16 +1976,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* these fields are used for info purposes only
* so we can have them same for all ports of the board */
ndev->if_port = port;
- ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
- | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
- ;
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
- if (pci_using_dac)
- ndev->features |= NETIF_F_HIGHDMA;
-
/************** priv ****************/
priv = nic->priv[port] = netdev_priv(ndev);
@@ -2245,9 +2237,13 @@ static inline int bdx_tx_fifo_size_to_packets(int tx_size)
* bdx_get_ringparam - report ring sizes
* @netdev
* @ring
+ * @kernel_ring
+ * @extack
*/
static void
-bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct bdx_priv *priv = netdev_priv(netdev);
@@ -2262,9 +2258,13 @@ bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
* bdx_set_ringparam - set ring sizes
* @netdev
* @ring
+ * @kernel_ring
+ * @extack
*/
static int
-bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct bdx_priv *priv = netdev_priv(netdev);
int rx_size = 0;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index b05de9b61ad6..d45b6bb86f0b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -453,8 +453,11 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
}
-static void am65_cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+am65_cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c092cb61416a..8251d7eb001b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -345,7 +345,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
netif_txq = netdev_get_tx_queue(ndev, txqueue);
tx_chn = &common->tx_chns[txqueue];
- trans_start = netif_txq->trans_start;
+ trans_start = READ_ONCE(netif_txq->trans_start);
netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
txqueue,
@@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (ret < 0) {
dev_err(dev, "%pOF error reading port_id %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
if (!port_id || port_id > common->port_num) {
dev_err(dev, "%pOF has invalid port_id %u %s\n",
port_np, port_id, port_np->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto of_node_put;
}
port = am65_common_get_port(common, port_id);
@@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
- if (IS_ERR(port->slave.mac_sl))
- return PTR_ERR(port->slave.mac_sl);
+ if (IS_ERR(port->slave.mac_sl)) {
+ ret = PTR_ERR(port->slave.mac_sl);
+ goto of_node_put;
+ }
port->disabled = !of_device_is_available(port_np);
if (port->disabled) {
@@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
ret = PTR_ERR(port->slave.ifphy);
dev_err(dev, "%pOF error retrieving port phy: %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
port->slave.mac_only =
@@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
- if (ret)
- return dev_err_probe(dev, ret,
+ if (ret) {
+ ret = dev_err_probe(dev, ret,
"failed to register fixed-link phy %pOF\n",
port_np);
+ goto of_node_put;
+ }
port->slave.phy_node = of_node_get(port_np);
} else {
port->slave.phy_node =
@@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (!port->slave.phy_node) {
dev_err(dev,
"slave[%d] no phy found\n", port_id);
- return -ENODEV;
+ ret = -ENODEV;
+ goto of_node_put;
}
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
ret = of_get_mac_address(port_np, port->slave.mac_addr);
@@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
return 0;
+
+of_node_put:
+ of_node_put(port_np);
+ of_node_put(node);
+ return ret;
}
static void am65_cpsw_pcpu_stats_free(void *data)
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 7449436fc87c..bef5e68dac31 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -817,7 +817,9 @@ static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
static void cpmac_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct cpmac_priv *priv = netdev_priv(dev);
@@ -833,7 +835,9 @@ static void cpmac_get_ringparam(struct net_device *dev,
}
static int cpmac_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct cpmac_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 33142d505fc8..03575c017500 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -349,7 +349,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
int pkt_size = cpsw->rx_packet_max;
int ret = 0, port, ch = xmeta->ch;
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
struct cpsw_priv *priv;
struct page_pool *pool;
@@ -392,7 +392,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -442,7 +442,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index 158c8d3793f4..aa42141be3c0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -658,7 +658,9 @@ err:
}
void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -671,7 +673,9 @@ void cpsw_get_ringparam(struct net_device *ndev,
}
int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
int descs_num, ret;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 279e261e4720..bd4b1528cf99 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -283,7 +283,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
void *pa = page_address(page);
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct cpsw_meta_xdp *xmeta;
struct cpsw_common *cpsw;
struct net_device *ndev;
@@ -336,7 +336,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -386,7 +386,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index ecc2a6b7e28f..8f6817f346ba 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -626,10 +626,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -710,20 +706,26 @@ int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
struct cpsw_priv *priv = netdev_priv(dev);
struct cpsw_common *cpsw = priv->cpsw;
int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return cpsw_hwtstamp_set(dev, req);
- case SIOCGHWTSTAMP:
- return cpsw_hwtstamp_get(dev, req);
+ phy = cpsw->slaves[slave_no].phy;
+
+ if (!phy_has_hwtstamp(phy)) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
}
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
- return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
}
int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
@@ -1120,7 +1122,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
xmeta->ndev = priv->ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
page, dma,
cpsw->rx_packet_max,
@@ -1144,7 +1146,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
int size)
{
- struct page_pool_params pp_params;
+ struct page_pool_params pp_params = {};
struct page_pool *pool;
pp_params.order = 0;
@@ -1360,7 +1362,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
xdp_do_flush_map();
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(ndev, prog, act);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 435668ee542d..74555970730c 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -6,6 +6,8 @@
#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+#include <uapi/linux/bpf.h>
+
#include "davinci_cpdma.h"
#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
@@ -491,9 +493,13 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering);
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering);
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index d55f06120ce7..31df3267a01a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1454,23 +1454,33 @@ static int emac_dev_open(struct net_device *ndev)
}
/* Request IRQ */
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
- res_num))) {
- for (irq_num = res->start; irq_num <= res->end; irq_num++) {
- if (request_irq(irq_num, emac_irq, 0, ndev->name,
- ndev)) {
- dev_err(emac_dev,
- "DaVinci EMAC: request_irq() failed\n");
- ret = -EBUSY;
+ if (dev_of_node(&priv->pdev->dev)) {
+ while ((ret = platform_get_irq_optional(priv->pdev, res_num)) != -ENXIO) {
+ if (ret < 0)
+ goto rollback;
+ ret = request_irq(ret, emac_irq, 0, ndev->name, ndev);
+ if (ret) {
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n");
goto rollback;
}
+ res_num++;
}
- res_num++;
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ ret = request_irq(irq_num, emac_irq, 0, ndev->name, ndev);
+ if (ret) {
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n");
+ goto rollback;
+ }
+ }
+ res_num++;
+ }
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
}
- /* prepare counters for rollback in case of an error */
- res_num--;
- irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@@ -1554,16 +1564,24 @@ err:
napi_disable(&priv->napi);
rollback:
- for (q = res_num; q >= 0; q--) {
- res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
- /* at the first iteration, irq_num is already set to the
- * right value
- */
- if (q != res_num)
- irq_num = res->end;
+ if (dev_of_node(&priv->pdev->dev)) {
+ for (q = res_num - 1; q >= 0; q--) {
+ irq_num = platform_get_irq(priv->pdev, q);
+ if (irq_num > 0)
+ free_irq(irq_num, ndev);
+ }
+ } else {
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
- for (m = irq_num; m >= res->start; m--)
- free_irq(m, ndev);
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
}
cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
@@ -1899,13 +1917,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
goto err_free_txchan;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "error getting irq res\n");
- rc = -ENOENT;
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
goto err_free_rxchan;
- }
- ndev->irq = res->start;
+ ndev->irq = rc;
rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
if (!rc)
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 33c1592d5381..751fb0bc65c5 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2654,10 +2654,6 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- /* reserved for future extensions */
- if (cfg.flags)
- return -EINVAL;
-
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
gbe_dev->tx_ts_enabled = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f50f9a43d3ea..f47b8358669d 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -595,24 +595,24 @@ spider_net_set_multi(struct net_device *netdev)
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
+ DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
spider_net_set_promisc(card);
if (netdev->flags & IFF_ALLMULTI) {
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
- set_bit(i, bitmask);
- }
+ bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
goto write_hash;
}
+ bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
+
/* well, we know, what the broadcast hash value is: it's xfd
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- set_bit(0xfd, bitmask);
+ __set_bit(0xfd, bitmask);
netdev_for_each_mc_addr(ha, netdev) {
hash = spider_net_get_multicast_hash(netdev, ha->addr);
- set_bit(hash, bitmask);
+ __set_bit(hash, bitmask);
}
write_hash:
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 54f655a68148..93110dba0bfa 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -110,7 +110,9 @@ spider_net_ethtool_nway_reset(struct net_device *netdev)
static void
spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct spider_net_card *card = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index cf0917b29e30..5251fc324221 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1091,20 +1091,22 @@ static int tsi108_get_mac(struct net_device *dev)
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+ u8 addr[ETH_ALEN];
/* Note that the octets are reversed from what the manual says,
* producing an even weirder ordering...
*/
if (word2 == 0 && word1 == 0) {
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x06;
- dev->dev_addr[2] = 0xd2;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x00;
+ addr[0] = 0x00;
+ addr[1] = 0x06;
+ addr[2] = 0xd2;
+ addr[3] = 0x00;
+ addr[4] = 0x00;
if (0x8 == data->phy)
- dev->dev_addr[5] = 0x01;
+ addr[5] = 0x01;
else
- dev->dev_addr[5] = 0x02;
+ addr[5] = 0x02;
+ eth_hw_addr_set(dev, addr);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
@@ -1114,12 +1116,13 @@ static int tsi108_get_mac(struct net_device *dev)
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
} else {
- dev->dev_addr[0] = (word2 >> 16) & 0xff;
- dev->dev_addr[1] = (word2 >> 24) & 0xff;
- dev->dev_addr[2] = (word1 >> 0) & 0xff;
- dev->dev_addr[3] = (word1 >> 8) & 0xff;
- dev->dev_addr[4] = (word1 >> 16) & 0xff;
- dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ addr[0] = (word2 >> 16) & 0xff;
+ addr[1] = (word2 >> 24) & 0xff;
+ addr[2] = (word1 >> 0) & 0xff;
+ addr[3] = (word1 >> 8) & 0xff;
+ addr[4] = (word1 >> 16) & 0xff;
+ addr[5] = (word1 >> 24) & 0xff;
+ eth_hw_addr_set(dev, addr);
}
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -1136,14 +1139,12 @@ static int tsi108_set_mac(struct net_device *dev, void *addr)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1, word2;
- int i;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (i = 0; i < 6; i++)
- /* +2 is for the offset of the HW addr type */
- dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+ /* +2 is for the offset of the HW addr type */
+ eth_hw_addr_set(dev, ((unsigned char *)addr) + 2);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig
new file mode 100644
index 000000000000..4184a635fe01
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Vertexcom network device configuration
+#
+
+config NET_VENDOR_VERTEXCOM
+ bool "Vertexcom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Vertexcom cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_VERTEXCOM
+
+config MSE102X
+ tristate "Vertexcom MSE102x SPI"
+ depends on SPI
+ help
+ SPI driver for Vertexcom MSE102x SPI attached network chip.
+
+endif # NET_VENDOR_VERTEXCOM
diff --git a/drivers/net/ethernet/vertexcom/Makefile b/drivers/net/ethernet/vertexcom/Makefile
new file mode 100644
index 000000000000..f8b12e312637
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Vertexcom network device drivers.
+#
+
+obj-$(CONFIG_MSE102X) += mse102x.o
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
new file mode 100644
index 000000000000..89a31783fbb4
--- /dev/null
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 in-tech smart charging GmbH
+ *
+ * driver is based on micrel/ks8851_spi.c
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/spi/spi.h>
+#include <linux/of_net.h>
+
+#define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER)
+
+#define DRV_NAME "mse102x"
+
+#define DET_CMD 0x0001
+#define DET_SOF 0x0002
+#define DET_DFT 0x55AA
+
+#define CMD_SHIFT 12
+#define CMD_RTS (0x1 << CMD_SHIFT)
+#define CMD_CTR (0x2 << CMD_SHIFT)
+
+#define CMD_MASK GENMASK(15, CMD_SHIFT)
+#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+
+#define DET_CMD_LEN 4
+#define DET_SOF_LEN 2
+#define DET_DFT_LEN 2
+
+#define MIN_FREQ_HZ 6000000
+#define MAX_FREQ_HZ 7142857
+
+struct mse102x_stats {
+ u64 xfer_err;
+ u64 invalid_cmd;
+ u64 invalid_ctr;
+ u64 invalid_dft;
+ u64 invalid_len;
+ u64 invalid_rts;
+ u64 invalid_sof;
+ u64 tx_timeout;
+};
+
+static const char mse102x_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "SPI transfer errors",
+ "Invalid command",
+ "Invalid CTR",
+ "Invalid DFT",
+ "Invalid frame length",
+ "Invalid RTS",
+ "Invalid SOF",
+ "TX timeout",
+};
+
+struct mse102x_net {
+ struct net_device *ndev;
+
+ u8 rxd[8];
+ u8 txd[8];
+
+ u32 msg_enable ____cacheline_aligned;
+
+ struct sk_buff_head txq;
+ struct mse102x_stats stats;
+};
+
+struct mse102x_net_spi {
+ struct mse102x_net mse102x;
+ struct mutex lock; /* Protect SPI frame transfer */
+ struct work_struct tx_work;
+ struct spi_device *spidev;
+ struct spi_message spi_msg;
+ struct spi_transfer spi_xfer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *device_root;
+#endif
+};
+
+#define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x)
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mse102x_info_show(struct seq_file *s, void *what)
+{
+ struct mse102x_net_spi *mses = s->private;
+
+ seq_printf(s, "TX ring size : %u\n",
+ skb_queue_len(&mses->mse102x.txq));
+
+ seq_printf(s, "IRQ : %d\n",
+ mses->spidev->irq);
+
+ seq_printf(s, "SPI effective speed : %lu\n",
+ (unsigned long)mses->spi_xfer.effective_speed_hz);
+ seq_printf(s, "SPI mode : %x\n",
+ mses->spidev->mode);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(mse102x_info);
+
+static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
+{
+ mses->device_root = debugfs_create_dir(dev_name(&mses->mse102x.ndev->dev),
+ NULL);
+
+ debugfs_create_file("info", S_IFREG | 0444, mses->device_root, mses,
+ &mse102x_info_fops);
+}
+
+static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
+{
+ debugfs_remove_recursive(mses->device_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void mse102x_init_device_debugfs(struct mse102x_net_spi *mses)
+{
+}
+
+static void mse102x_remove_device_debugfs(struct mse102x_net_spi *mses)
+{
+}
+
+#endif
+
+/* SPI register read/write calls.
+ *
+ * All these calls issue SPI transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transferring packet data.
+ */
+
+static void mse102x_tx_cmd_spi(struct mse102x_net *mse, u16 cmd)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 txb[2];
+ int ret;
+
+ txb[0] = cpu_to_be16(DET_CMD);
+ txb[1] = cpu_to_be16(cmd);
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = NULL;
+ xfer->len = DET_CMD_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ }
+}
+
+static int mse102x_rx_cmd_spi(struct mse102x_net *mse, u8 *rxb)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 *txb = (__be16 *)mse->txd;
+ __be16 *cmd = (__be16 *)mse->rxd;
+ u8 *trx = mse->rxd;
+ int ret;
+
+ txb[0] = 0;
+ txb[1] = 0;
+
+ xfer->tx_buf = txb;
+ xfer->rx_buf = trx;
+ xfer->len = DET_CMD_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ } else if (*cmd != cpu_to_be16(DET_CMD)) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, *cmd);
+ mse->stats.invalid_cmd++;
+ ret = -EIO;
+ } else {
+ memcpy(rxb, trx + 2, 2);
+ }
+
+ return ret;
+}
+
+static inline void mse102x_push_header(struct sk_buff *skb)
+{
+ __be16 *header = skb_push(skb, DET_SOF_LEN);
+
+ *header = cpu_to_be16(DET_SOF);
+}
+
+static inline void mse102x_put_footer(struct sk_buff *skb)
+{
+ __be16 *footer = skb_put(skb, DET_DFT_LEN);
+
+ *footer = cpu_to_be16(DET_DFT);
+}
+
+static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
+ unsigned int pad)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ struct sk_buff *tskb;
+ int ret;
+
+ netif_dbg(mse, tx_queued, mse->ndev, "%s: skb %p, %d@%p\n",
+ __func__, txp, txp->len, txp->data);
+
+ if ((skb_headroom(txp) < DET_SOF_LEN) ||
+ (skb_tailroom(txp) < DET_DFT_LEN + pad)) {
+ tskb = skb_copy_expand(txp, DET_SOF_LEN, DET_DFT_LEN + pad,
+ GFP_KERNEL);
+ if (!tskb)
+ return -ENOMEM;
+
+ dev_kfree_skb(txp);
+ txp = tskb;
+ }
+
+ mse102x_push_header(txp);
+
+ if (pad)
+ skb_put_zero(txp, pad);
+
+ mse102x_put_footer(txp);
+
+ xfer->tx_buf = txp->data;
+ xfer->rx_buf = NULL;
+ xfer->len = txp->len;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ }
+
+ return ret;
+}
+
+static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
+ unsigned int frame_len)
+{
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+ struct spi_transfer *xfer = &mses->spi_xfer;
+ struct spi_message *msg = &mses->spi_msg;
+ __be16 *sof = (__be16 *)buff;
+ __be16 *dft = (__be16 *)(buff + DET_SOF_LEN + frame_len);
+ int ret;
+
+ xfer->rx_buf = buff;
+ xfer->tx_buf = NULL;
+ xfer->len = DET_SOF_LEN + frame_len + DET_DFT_LEN;
+
+ ret = spi_sync(mses->spidev, msg);
+ if (ret < 0) {
+ netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
+ __func__, ret);
+ mse->stats.xfer_err++;
+ } else if (*sof != cpu_to_be16(DET_SOF)) {
+ netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
+ __func__, *sof);
+ mse->stats.invalid_sof++;
+ ret = -EIO;
+ } else if (*dft != cpu_to_be16(DET_DFT)) {
+ netdev_dbg(mse->ndev, "%s: SPI frame tail is invalid (0x%04x)\n",
+ __func__, *dft);
+ mse->stats.invalid_dft++;
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static void mse102x_dump_packet(const char *msg, int len, const char *data)
+{
+ printk(KERN_DEBUG ": %s - packet len:%d\n", msg, len);
+ print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ data, len, true);
+}
+
+static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
+{
+ struct sk_buff *skb;
+ unsigned int rxalign;
+ unsigned int rxlen;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ u8 *rxpkt;
+ int ret;
+
+ mse102x_tx_cmd_spi(mse, CMD_CTR);
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ cmd_resp = be16_to_cpu(rx);
+
+ if (ret || ((cmd_resp & CMD_MASK) != CMD_RTS)) {
+ usleep_range(50, 100);
+
+ mse102x_tx_cmd_spi(mse, CMD_CTR);
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ if (ret)
+ return;
+
+ cmd_resp = be16_to_cpu(rx);
+ if ((cmd_resp & CMD_MASK) != CMD_RTS) {
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_rts++;
+ return;
+ }
+
+ net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
+ __func__);
+ }
+
+ rxlen = cmd_resp & LEN_MASK;
+ if (!rxlen) {
+ net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ mse->stats.invalid_len++;
+ return;
+ }
+
+ rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
+ skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
+ if (!skb)
+ return;
+
+ /* 2 bytes Start of frame (before ethernet header)
+ * 2 bytes Data frame tail (after ethernet frame)
+ * They are copied, but ignored.
+ */
+ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ mse->ndev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (netif_msg_pktdata(mse))
+ mse102x_dump_packet(__func__, skb->len, skb->data);
+
+ skb->protocol = eth_type_trans(skb, mse->ndev);
+ netif_rx_ni(skb);
+
+ mse->ndev->stats.rx_packets++;
+ mse->ndev->stats.rx_bytes += rxlen;
+}
+
+static int mse102x_tx_pkt_spi(struct mse102x_net *mse, struct sk_buff *txb,
+ unsigned long work_timeout)
+{
+ unsigned int pad = 0;
+ __be16 rx = 0;
+ u16 cmd_resp;
+ int ret;
+ bool first = true;
+
+ if (txb->len < 60)
+ pad = 60 - txb->len;
+
+ while (1) {
+ mse102x_tx_cmd_spi(mse, CMD_RTS | (txb->len + pad));
+ ret = mse102x_rx_cmd_spi(mse, (u8 *)&rx);
+ cmd_resp = be16_to_cpu(rx);
+
+ if (!ret) {
+ /* ready to send frame ? */
+ if (cmd_resp == CMD_CTR)
+ break;
+
+ net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
+ __func__, cmd_resp);
+ mse->stats.invalid_ctr++;
+ }
+
+ /* It's not predictable how long / many retries it takes to
+ * send at least one packet, so TX timeouts are possible.
+ * That's the reason why the netdev watchdog is not used here.
+ */
+ if (time_after(jiffies, work_timeout))
+ return -ETIMEDOUT;
+
+ if (first) {
+ /* throttle at first issue */
+ netif_stop_queue(mse->ndev);
+ /* fast retry */
+ usleep_range(50, 100);
+ first = false;
+ } else {
+ msleep(20);
+ }
+ }
+
+ ret = mse102x_tx_frame_spi(mse, txb, pad);
+ if (ret)
+ net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
+ __func__, ret);
+
+ return ret;
+}
+
+#define TX_QUEUE_MAX 10
+
+static void mse102x_tx_work(struct work_struct *work)
+{
+ /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */
+ unsigned long work_timeout = jiffies + msecs_to_jiffies(1000);
+ struct mse102x_net_spi *mses;
+ struct mse102x_net *mse;
+ struct sk_buff *txb;
+ int ret = 0;
+
+ mses = container_of(work, struct mse102x_net_spi, tx_work);
+ mse = &mses->mse102x;
+
+ while ((txb = skb_dequeue(&mse->txq))) {
+ mutex_lock(&mses->lock);
+ ret = mse102x_tx_pkt_spi(mse, txb, work_timeout);
+ mutex_unlock(&mses->lock);
+ if (ret) {
+ mse->ndev->stats.tx_dropped++;
+ } else {
+ mse->ndev->stats.tx_bytes += txb->len;
+ mse->ndev->stats.tx_packets++;
+ }
+
+ dev_kfree_skb(txb);
+ }
+
+ if (ret == -ETIMEDOUT) {
+ if (netif_msg_timer(mse))
+ netdev_err(mse->ndev, "tx work timeout\n");
+
+ mse->stats.tx_timeout++;
+ }
+
+ netif_wake_queue(mse->ndev);
+}
+
+static netdev_tx_t mse102x_start_xmit_spi(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ netif_dbg(mse, tx_queued, ndev,
+ "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+
+ skb_queue_tail(&mse->txq, skb);
+
+ if (skb_queue_len(&mse->txq) >= TX_QUEUE_MAX)
+ netif_stop_queue(ndev);
+
+ schedule_work(&mses->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mse102x_init_mac(struct mse102x_net *mse, struct device_node *np)
+{
+ struct net_device *ndev = mse->ndev;
+ int ret = of_get_ethdev_address(np, ndev);
+
+ if (ret) {
+ eth_hw_addr_random(ndev);
+ netdev_err(ndev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ }
+}
+
+/* Assumption: this is called for every incoming packet */
+static irqreturn_t mse102x_irq(int irq, void *_mse)
+{
+ struct mse102x_net *mse = _mse;
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ mutex_lock(&mses->lock);
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int mse102x_net_open(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ int ret;
+
+ ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
+ ndev->name, mse);
+ if (ret < 0) {
+ netdev_err(ndev, "Failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ netif_dbg(mse, ifup, ndev, "opening\n");
+
+ netif_start_queue(ndev);
+
+ netif_carrier_on(ndev);
+
+ netif_dbg(mse, ifup, ndev, "network device up\n");
+
+ return 0;
+}
+
+static int mse102x_net_stop(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ netif_info(mse, ifdown, ndev, "shutting down\n");
+
+ netif_carrier_off(mse->ndev);
+
+ /* stop any outstanding work */
+ flush_work(&mses->tx_work);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&mse->txq);
+
+ free_irq(ndev->irq, mse);
+
+ return 0;
+}
+
+static const struct net_device_ops mse102x_netdev_ops = {
+ .ndo_open = mse102x_net_open,
+ .ndo_stop = mse102x_net_stop,
+ .ndo_start_xmit = mse102x_start_xmit_spi,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void mse102x_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, DRV_NAME, sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(ndev->dev.parent), sizeof(di->bus_info));
+}
+
+static u32 mse102x_get_msglevel(struct net_device *ndev)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+
+ return mse->msg_enable;
+}
+
+static void mse102x_set_msglevel(struct net_device *ndev, u32 to)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+
+ mse->msg_enable = to;
+}
+
+static void mse102x_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *estats, u64 *data)
+{
+ struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_stats *st = &mse->stats;
+
+ memcpy(data, st, ARRAY_SIZE(mse102x_gstrings_stats) * sizeof(u64));
+}
+
+static void mse102x_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &mse102x_gstrings_stats,
+ sizeof(mse102x_gstrings_stats));
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static int mse102x_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mse102x_gstrings_stats);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct ethtool_ops mse102x_ethtool_ops = {
+ .get_drvinfo = mse102x_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mse102x_get_msglevel,
+ .set_msglevel = mse102x_set_msglevel,
+ .get_ethtool_stats = mse102x_get_ethtool_stats,
+ .get_strings = mse102x_get_strings,
+ .get_sset_count = mse102x_get_sset_count,
+};
+
+/* driver bus management functions */
+
+#ifdef CONFIG_PM_SLEEP
+
+static int mse102x_suspend(struct device *dev)
+{
+ struct mse102x_net *mse = dev_get_drvdata(dev);
+ struct net_device *ndev = mse->ndev;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ mse102x_net_stop(ndev);
+ }
+
+ return 0;
+}
+
+static int mse102x_resume(struct device *dev)
+{
+ struct mse102x_net *mse = dev_get_drvdata(dev);
+ struct net_device *ndev = mse->ndev;
+
+ if (netif_running(ndev)) {
+ mse102x_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mse102x_pm_ops, mse102x_suspend, mse102x_resume);
+
+static int mse102x_probe_spi(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mse102x_net_spi *mses;
+ struct net_device *ndev;
+ struct mse102x_net *mse;
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode |= SPI_MODE_3;
+ /* enforce minimum speed to ensure device functionality */
+ spi->master->min_speed_hz = MIN_FREQ_HZ;
+
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = MAX_FREQ_HZ;
+
+ if (spi->max_speed_hz < MIN_FREQ_HZ ||
+ spi->max_speed_hz > MAX_FREQ_HZ) {
+ dev_err(&spi->dev, "SPI max frequency out of range (min: %u, max: %u)\n",
+ MIN_FREQ_HZ, MAX_FREQ_HZ);
+ return -EINVAL;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device: %d\n", ret);
+ return ret;
+ }
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct mse102x_net_spi));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->needed_tailroom += ALIGN(DET_DFT_LEN, 4);
+ ndev->needed_headroom += ALIGN(DET_SOF_LEN, 4);
+ ndev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ ndev->tx_queue_len = 100;
+
+ mse = netdev_priv(ndev);
+ mses = to_mse102x_spi(mse);
+
+ mses->spidev = spi;
+ mutex_init(&mses->lock);
+ INIT_WORK(&mses->tx_work, mse102x_tx_work);
+
+ /* initialise pre-made spi transfer messages */
+ spi_message_init(&mses->spi_msg);
+ spi_message_add_tail(&mses->spi_xfer, &mses->spi_msg);
+
+ ndev->irq = spi->irq;
+ mse->ndev = ndev;
+
+ /* set the default message enable */
+ mse->msg_enable = netif_msg_init(-1, MSG_DEFAULT);
+
+ skb_queue_head_init(&mse->txq);
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ dev_set_drvdata(dev, mse);
+
+ netif_carrier_off(mse->ndev);
+ ndev->netdev_ops = &mse102x_netdev_ops;
+ ndev->ethtool_ops = &mse102x_ethtool_ops;
+
+ mse102x_init_mac(mse, dev->of_node);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "failed to register network device: %d\n", ret);
+ return ret;
+ }
+
+ mse102x_init_device_debugfs(mses);
+
+ return 0;
+}
+
+static int mse102x_remove_spi(struct spi_device *spi)
+{
+ struct mse102x_net *mse = dev_get_drvdata(&spi->dev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
+
+ if (netif_msg_drv(mse))
+ dev_info(&spi->dev, "remove\n");
+
+ mse102x_remove_device_debugfs(mses);
+ unregister_netdev(mse->ndev);
+
+ return 0;
+}
+
+static const struct of_device_id mse102x_match_table[] = {
+ { .compatible = "vertexcom,mse1021" },
+ { .compatible = "vertexcom,mse1022" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mse102x_match_table);
+
+static struct spi_driver mse102x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mse102x_match_table,
+ .pm = &mse102x_pm_ops,
+ },
+ .probe = mse102x_probe_spi,
+ .remove = mse102x_remove_spi,
+};
+module_spi_driver(mse102x_driver);
+
+MODULE_DESCRIPTION("MSE102x Network driver");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index e7065c9a8e38..b900ab5aef2a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1276,8 +1276,11 @@ static const struct attribute_group temac_attr_group = {
* ethtool support
*/
-static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -1291,8 +1294,11 @@ static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9b068b81ae09..377c94ec2486 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -41,8 +41,9 @@
#include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */
-#define TX_BD_NUM_DEFAULT 64
+#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
@@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
@@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
- XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- return -ETIMEDOUT;
- }
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
+ }
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
}
return 0;
@@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
@@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
- cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
- cur_p->status = 0;
cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -647,6 +661,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
}
/**
+ * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
+ * @lp: Pointer to the axienet_local structure
+ * @num_frag: The number of BDs to check for
+ *
+ * Return: 0, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked before BDs are allocated and transmission starts.
+ * This function returns 0 if a BD or group of BDs can be allocated for
+ * transmission. If the BD or any of the BDs are not free the function
+ * returns a busy status. This is invoked from axienet_start_xmit.
+ */
+static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+ int num_frag)
+{
+ struct axidma_bd *cur_p;
+
+ /* Ensure we see all descriptor updates from device or TX IRQ path */
+ rmb();
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ if (cur_p->cntrl)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
+/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
@@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
/* Matches barrier in axienet_start_xmit */
smp_mb();
- netif_wake_queue(ndev);
-}
-
-/**
- * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
- * @num_frag: The number of BDs to check for
- *
- * Return: 0, on success
- * NETDEV_TX_BUSY, if any of the descriptors are not free
- *
- * This function is invoked before BDs are allocated and transmission starts.
- * This function returns 0 if a BD or group of BDs can be allocated for
- * transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
- */
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
- int num_frag)
-{
- struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
- return NETDEV_TX_BUSY;
- return 0;
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
}
/**
@@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
-
+ if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
return NETDEV_TX_OK;
}
@@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev)
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
@@ -1323,8 +1350,11 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
}
-static void axienet_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1338,15 +1368,19 @@ static void axienet_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int axienet_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1503,65 +1537,6 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Only support the mode we are configured for */
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- if (lp->switch_x_sgmii)
- break;
- fallthrough;
- default:
- if (state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- linkmode_zero(supported);
- return;
- }
- }
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- fallthrough;
- default:
- break;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void axienet_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -1687,7 +1662,7 @@ static void axienet_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = axienet_validate,
+ .validate = phylink_generic_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
.mac_prepare = axienet_mac_prepare,
@@ -2080,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp);
@@ -2104,6 +2084,17 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.legacy_pre_march2020 = true;
+ lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+
+ __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
+ if (lp->switch_x_sgmii) {
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ lp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ lp->phylink_config.supported_interfaces);
+ }
lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
lp->phy_mode,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0815de581c7f..519599480b15 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1133,14 +1133,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->ndev = ndev;
/* Get IRQ for the device */
- res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "no IRQ found\n");
- rc = -ENXIO;
+ rc = platform_get_irq(ofdev, 0);
+ if (rc < 0)
goto error;
- }
- ndev->irq = res->start;
+ ndev->irq = rc;
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 65fdad1107fc..df77a22d1b81 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -382,9 +382,6 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
- return -EINVAL;
-
ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index);
if (ret)
return ret;